Merge tag 'v3.8.13.13' of git://kernel.ubuntu.com/ubuntu/linux into odroid-3.8.y

v3.8.13.13
This commit is contained in:
Mauro Ribeiro
2013-11-22 12:06:21 +09:00
108 changed files with 926 additions and 403 deletions

View File

@@ -510,6 +510,15 @@ tcp_syn_retries - INTEGER
tcp_timestamps - BOOLEAN
Enable timestamps as defined in RFC1323.
tcp_min_tso_segs - INTEGER
Minimal number of segments per TSO frame.
Since linux-3.12, TCP does an automatic sizing of TSO frames,
depending on flow rate, instead of filling 64Kbytes packets.
For specific usages, it's possible to force TCP to build big
TSO frames. Note that TCP stack might split too big TSO packets
if available window is too small.
Default: 2
tcp_tso_win_divisor - INTEGER
This allows control over what percentage of the congestion window
can be consumed by a single TSO frame.

View File

@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 8
SUBLEVEL = 13
EXTRAVERSION = .12
EXTRAVERSION = .13
NAME = Remoralised Urchins Update
# *DOCUMENTATION*

View File

@@ -195,6 +195,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
mfctl %cr30,%r6 /* PCX-W2 firmware bug */
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
mtctl %r6,%cr30 /* restore task thread info */
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10

View File

@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
size_t size;
int tmp;
if (copy_from_user(buf, buffer, count))
size = min(count, sizeof(buf));
if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);

View File

@@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sp = regs->areg[1];
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
}

View File

@@ -969,10 +969,17 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
*/
return_desc =
*(operand[0]->reference.where);
if (return_desc) {
acpi_ut_add_reference
(return_desc);
if (!return_desc) {
/*
* Element is NULL, do not allow the dereference.
* This provides compatibility with other ACPI
* implementations.
*/
return_ACPI_STATUS
(AE_AML_UNINITIALIZED_ELEMENT);
}
acpi_ut_add_reference(return_desc);
break;
default:
@@ -997,11 +1004,40 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_namespace_node
*)
return_desc);
if (!return_desc) {
break;
}
/*
* June 2013:
* buffer_fields/field_units require additional resolution
*/
switch (return_desc->common.type) {
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
status =
acpi_ex_read_data_from_field
(walk_state, return_desc,
&temp_desc);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
return_desc = temp_desc;
break;
default:
/* Add another reference to the object */
acpi_ut_add_reference
(return_desc);
break;
}
}
/* Add another reference to the object! */
acpi_ut_add_reference(return_desc);
break;
default:

View File

@@ -57,6 +57,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
struct acpi_walk_state *walk_state);
static acpi_status
acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
struct acpi_namespace_node *node,
struct acpi_walk_state *walk_state);
/*******************************************************************************
*
* FUNCTION: acpi_ex_store
@@ -376,7 +381,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
* When storing into an object the data is converted to the
* target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
* not be changed by a store operation. A copy_object can change
* the target type, however.
*
* The implicit_conversion flag is set to NO/FALSE only when
* storing to an arg_x -- as per the rules of the ACPI spec.
*
* Assumes parameters are already validated.
*
@@ -400,7 +409,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
target_type = acpi_ns_get_type(node);
target_desc = acpi_ns_get_attached_object(node);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p (%s) to node %p (%s)\n",
source_desc,
acpi_ut_get_object_type_name(source_desc), node,
acpi_ut_get_type_name(target_type)));
@@ -414,46 +423,31 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
return_ACPI_STATUS(status);
}
/* If no implicit conversion, drop into the default case below */
if ((!implicit_conversion) ||
((walk_state->opcode == AML_COPY_OP) &&
(target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
(target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
(target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
/*
* Force execution of default (no implicit conversion). Note:
* copy_object does not perform an implicit conversion, as per the ACPI
* spec -- except in case of region/bank/index fields -- because these
* objects must retain their original type permanently.
*/
target_type = ACPI_TYPE_ANY;
}
/* Do the actual store operation */
switch (target_type) {
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/* For fields, copy the source data to the target field. */
status = acpi_ex_write_data_to_field(source_desc, target_desc,
&walk_state->result_obj);
break;
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* These target types are all of type Integer/String/Buffer, and
* therefore support implicit conversion before the store.
*
* Copy and/or convert the source object to a new target object
* The simple data types all support implicit source operand
* conversion before the store.
*/
if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) {
/*
* However, copy_object and Stores to arg_x do not perform
* an implicit conversion, as per the ACPI specification.
* A direct store is performed instead.
*/
status = acpi_ex_store_direct_to_node(source_desc, node,
walk_state);
break;
}
/* Store with implicit source operand conversion support */
status =
acpi_ex_store_object_to_object(source_desc, target_desc,
&new_desc, walk_state);
@@ -467,13 +461,12 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
* the Name's type to that of the value being stored in it.
* source_desc reference count is incremented by attach_object.
*
* Note: This may change the type of the node if an explicit store
* has been performed such that the node/object type has been
* changed.
* Note: This may change the type of the node if an explicit
* store has been performed such that the node/object type
* has been changed.
*/
status =
acpi_ns_attach_object(node, new_desc,
new_desc->common.type);
status = acpi_ns_attach_object(node, new_desc,
new_desc->common.type);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Store %s into %s via Convert/Attach\n",
@@ -484,19 +477,83 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
}
break;
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/*
* For all fields, always write the source data to the target
* field. Any required implicit source operand conversion is
* performed in the function below as necessary. Note, field
* objects must retain their original type permanently.
*/
status = acpi_ex_write_data_to_field(source_desc, target_desc,
&walk_state->result_obj);
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Storing %s (%p) directly into node (%p) with no implicit conversion\n",
acpi_ut_get_object_type_name(source_desc),
source_desc, node));
/* No conversions for all other types. Just attach the source object */
status = acpi_ns_attach_object(node, source_desc,
source_desc->common.type);
/*
* No conversions for all other types. Directly store a copy of
* the source object. This is the ACPI spec-defined behavior for
* the copy_object operator.
*
* NOTE: For the Store operator, this is a departure from the
* ACPI spec, which states "If conversion is impossible, abort
* the running control method". Instead, this code implements
* "If conversion is impossible, treat the Store operation as
* a CopyObject".
*/
status = acpi_ex_store_direct_to_node(source_desc, node,
walk_state);
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_store_direct_to_node
*
* PARAMETERS: source_desc - Value to be stored
* node - Named object to receive the value
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: "Store" an object directly to a node. This involves a copy
* and an attach.
*
******************************************************************************/
static acpi_status
acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
struct acpi_namespace_node *node,
struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *new_desc;
ACPI_FUNCTION_TRACE(ex_store_direct_to_node);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Storing [%s] (%p) directly into node [%s] (%p)"
" with no implicit conversion\n",
acpi_ut_get_object_type_name(source_desc),
source_desc, acpi_ut_get_type_name(node->type),
node));
/* Copy the source object to a new object */
status =
acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Attach the new object to the node */
status = acpi_ns_attach_object(node, new_desc, new_desc->common.type);
acpi_ut_remove_reference(new_desc);
return_ACPI_STATUS(status);
}

View File

@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
* scmd->retries is decremented for commands which get retried
* scmd->allowed is incremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero).
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
if (!qc->err_mask && scmd->retries)
scmd->retries--;
if (!qc->err_mask)
scmd->allowed++;
__ata_eh_qc_complete(qc);
}

View File

@@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
vco = icst_hz_to_vco(icst->params, rate);
icst->rate = icst_hz(icst->params, vco);
vco_set(icst->vcoreg, icst->lockreg, vco);
vco_set(icst->lockreg, icst->vcoreg, vco);
return 0;
}

View File

@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -244,6 +256,7 @@ void proc_exit_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -256,6 +269,8 @@ void proc_exit_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -279,6 +294,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -288,6 +304,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}

View File

@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
static void cn_rx_skb(struct sk_buff *__skb)
{
struct nlmsghdr *nlh;
int err;
struct sk_buff *skb;
int len, err;
skb = skb_get(__skb);
if (skb->len >= NLMSG_SPACE(0)) {
nlh = nlmsg_hdr(skb);
len = nlmsg_len(nlh);
if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
if (len < (int)sizeof(struct cn_msg) ||
skb->len < nlh->nlmsg_len ||
nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
len > CONNECTOR_MAX_MSG_SIZE) {
kfree_skb(skb);
return;
}

View File

@@ -534,9 +534,15 @@ static bool dmi_matches(const struct dmi_system_id *dmi)
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
if (dmi_ident[s]
&& strstr(dmi_ident[s], dmi->matches[i].substr))
continue;
if (dmi_ident[s]) {
if (!dmi->matches[i].exact_match &&
strstr(dmi_ident[s], dmi->matches[i].substr))
continue;
else if (dmi->matches[i].exact_match &&
!strcmp(dmi_ident[s], dmi->matches[i].substr))
continue;
}
/* No match */
return false;
}

View File

@@ -411,9 +411,16 @@ long drm_ioctl(struct file *filp,
asize = drv_size;
}
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
u32 drv_size;
ioctl = &drm_ioctls[nr];
cmd = ioctl->cmd;
drv_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd);
if (drv_size > asize)
asize = drv_size;
cmd = ioctl->cmd;
} else
goto err_i1;

View File

@@ -797,6 +797,38 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D410PT",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D425KT",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D510MO",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D525MW",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
{ } /* terminating entry */
};

View File

@@ -1641,7 +1641,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
* does the same thing and more.
*/
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
(rdev->family != CHIP_RS880))
(rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {

View File

@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
struct vmw_fpriv *vmw_fp;
vmw_fp = vmw_fpriv(file_priv);
ttm_object_file_release(&vmw_fp->tfile);
if (vmw_fp->locked_master)
if (vmw_fp->locked_master) {
struct vmw_master *vmaster =
vmw_master(vmw_fp->locked_master);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
ttm_vt_unlock(&vmaster->lock);
drm_master_put(&vmw_fp->locked_master);
}
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -942,14 +950,13 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
}
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
vmw_execbuf_release_pinned_bo(dev_priv);
if (!dev_priv->enable_fb) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);

View File

@@ -971,7 +971,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (new_backup)
res->backup_offset = new_backup_offset;
if (!res->func->may_evict)
if (!res->func->may_evict || res->id == -1)
return;
write_lock(&dev_priv->resource_lock);

View File

@@ -8044,6 +8044,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
u64 *p;
int lo, hi;
int rv = 1;
unsigned long flags;
if (bb->shift < 0)
/* badblocks are disabled */
@@ -8058,7 +8059,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
sectors = next - s;
}
write_seqlock_irq(&bb->lock);
write_seqlock_irqsave(&bb->lock, flags);
p = bb->page;
lo = 0;
@@ -8174,7 +8175,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
write_sequnlock_irq(&bb->lock);
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
}

View File

@@ -1493,6 +1493,7 @@ static int raid1_spare_active(struct mddev *mddev)
}
}
if (rdev
&& rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
count++;

View File

@@ -1718,6 +1718,7 @@ static int raid10_spare_active(struct mddev *mddev)
}
sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
} else if (tmp->rdev
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;

View File

@@ -672,6 +672,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
if (rw & REQ_DISCARD)
bi->bi_vcnt = 0;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -706,6 +712,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
rbi->bi_next = NULL;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
if (rw & REQ_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
rbi, disk_devt(conf->mddev->gendisk),
@@ -2829,6 +2841,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
}
/* now that discard is done we can proceed with any sync */
clear_bit(STRIPE_DISCARD, &sh->state);
/*
* SCSI discard will change some bio fields and the stripe has
* no updated data, so remove it from hash list and the stripe
* will be reinitialized
*/
spin_lock_irq(&conf->device_lock);
remove_hash(sh);
spin_unlock_irq(&conf->device_lock);
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);

View File

@@ -698,14 +698,14 @@ static size_t can_get_size(const struct net_device *dev)
size_t size;
size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += sizeof(struct can_berr_counter);
size += nla_total_size(sizeof(struct can_berr_counter));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += sizeof(struct can_bittiming_const);
size += nla_total_size(sizeof(struct can_bittiming_const));
return size;
}

View File

@@ -968,9 +968,9 @@ static void unregister_flexcandev(struct net_device *dev)
}
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);

View File

@@ -1273,15 +1273,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
static void mib_counters_timer_wrapper(unsigned long _mp)
{
struct mv643xx_eth_private *mp = (void *)_mp;
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2368,6 +2366,7 @@ static int mv643xx_eth_open(struct net_device *dev)
mp->int_mask |= INT_TX_END_0 << i;
}
add_timer(&mp->mib_counters_timer);
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2915,7 +2914,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->mib_counters_timer.data = (unsigned long)mp;
mp->mib_counters_timer.function = mib_counters_timer_wrapper;
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
add_timer(&mp->mib_counters_timer);
spin_lock_init(&mp->mib_counters_lock);

View File

@@ -878,8 +878,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
if (!netdev_mc_empty(ndev)) {
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);

View File

@@ -1813,8 +1813,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
*skb_push(skb, 2) = 0;

View File

@@ -1076,6 +1076,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
{
struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
mutex_lock(&vi->config_lock);
if (!vi->config_enable)
goto done;
switch(action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
@@ -1088,6 +1093,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
default:
break;
}
done:
mutex_unlock(&vi->config_lock);
return NOTIFY_OK;
}

View File

@@ -977,7 +977,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
/* See iptunnel_xmit() */
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, &rt->dst, NULL);
ip_select_ident(skb, &rt->dst, NULL);
err = ip_local_out(skb);
if (likely(net_xmit_eval(err) == 0)) {

View File

@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
}
i = port->index;
memset(&sync, 0, sizeof(sync));
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
/* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==

View File

@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;

View File

@@ -209,6 +209,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
int i;
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
@@ -236,6 +237,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
}
work:
ath_restart_work(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
continue;
spin_lock_bh(&sc->tx.txq[i].axq_lock);
ath_txq_schedule(sc, &sc->tx.txq[i]);
spin_unlock_bh(&sc->tx.txq[i].axq_lock);
}
}
if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
@@ -546,20 +556,8 @@ static int ath_reset(struct ath_softc *sc, bool retry_tx)
int r;
ath9k_ps_wakeup(sc);
r = ath_reset_internal(sc, NULL, retry_tx);
if (retry_tx) {
int i;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
spin_lock_bh(&sc->tx.txq[i].axq_lock);
ath_txq_schedule(sc, &sc->tx.txq[i]);
spin_unlock_bh(&sc->tx.txq[i].axq_lock);
}
}
}
ath9k_ps_restore(sc);
return r;

View File

@@ -270,10 +270,12 @@ process_start:
}
} while (true);
if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
goto process_start;
spin_lock_irqsave(&adapter->main_proc_lock, flags);
if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto process_start;
}
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);

View File

@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
(bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
stats->rx_bufshift);
rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
p_drvinfo);
}

View File

@@ -115,6 +115,7 @@ struct xenvif *xenvif_alloc(struct device *parent,
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int evtchn);
void xenvif_disconnect(struct xenvif *vif);
void xenvif_free(struct xenvif *vif);
void xenvif_get(struct xenvif *vif);
void xenvif_put(struct xenvif *vif);

View File

@@ -302,6 +302,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
}
netdev_dbg(dev, "Successfully created xenvif\n");
__module_get(THIS_MODULE);
return vif;
}
@@ -367,9 +370,14 @@ void xenvif_disconnect(struct xenvif *vif)
if (vif->irq)
unbind_from_irqhandler(vif->irq, vif);
xen_netbk_unmap_frontend_rings(vif);
}
void xenvif_free(struct xenvif *vif)
{
unregister_netdev(vif->dev);
xen_netbk_unmap_frontend_rings(vif);
free_netdev(vif->dev);
module_put(THIS_MODULE);
}

View File

@@ -24,6 +24,12 @@
struct backend_info {
struct xenbus_device *dev;
struct xenvif *vif;
/* This is the state that will be reflected in xenstore when any
* active hotplug script completes.
*/
enum xenbus_state state;
enum xenbus_state frontend_state;
struct xenbus_watch hotplug_status_watch;
u8 have_hotplug_status_watch:1;
@@ -33,16 +39,20 @@ static int connect_rings(struct backend_info *);
static void connect(struct backend_info *);
static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be,
enum xenbus_state state);
static int netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
set_backend_state(be, XenbusStateClosed);
unregister_hotplug_status_watch(be);
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
xenvif_disconnect(be->vif);
xenvif_free(be->vif);
be->vif = NULL;
}
kfree(be);
@@ -126,6 +136,8 @@ static int netback_probe(struct xenbus_device *dev,
if (err)
goto fail;
be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */
backend_create_xenvif(be);
@@ -198,15 +210,113 @@ static void backend_create_xenvif(struct backend_info *be)
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
}
static void disconnect_backend(struct xenbus_device *dev)
static void backend_disconnect(struct backend_info *be)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
if (be->vif) {
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif)
xenvif_disconnect(be->vif);
be->vif = NULL;
}
static void backend_connect(struct backend_info *be)
{
if (be->vif)
connect(be);
}
static inline void backend_switch_state(struct backend_info *be,
enum xenbus_state state)
{
struct xenbus_device *dev = be->dev;
pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
be->state = state;
/* If we are waiting for a hotplug script then defer the
* actual xenbus state change.
*/
if (!be->have_hotplug_status_watch)
xenbus_switch_state(dev, state);
}
/* Handle backend state transitions:
*
* The backend state starts in InitWait and the following transitions are
* allowed.
*
* InitWait -> Connected
*
* ^ \ |
* | \ |
* | \ |
* | \ |
* | \ |
* | \ |
* | V V
*
* Closed <-> Closing
*
* The state argument specifies the eventual state of the backend and the
* function transitions to that state via the shortest path.
*/
static void set_backend_state(struct backend_info *be,
enum xenbus_state state)
{
while (be->state != state) {
switch (be->state) {
case XenbusStateClosed:
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
pr_info("%s: prepare for reconnect\n",
be->dev->nodename);
backend_switch_state(be, XenbusStateInitWait);
break;
case XenbusStateClosing:
backend_switch_state(be, XenbusStateClosing);
break;
default:
BUG();
}
break;
case XenbusStateInitWait:
switch (state) {
case XenbusStateConnected:
backend_connect(be);
backend_switch_state(be, XenbusStateConnected);
break;
case XenbusStateClosing:
case XenbusStateClosed:
backend_switch_state(be, XenbusStateClosing);
break;
default:
BUG();
}
break;
case XenbusStateConnected:
switch (state) {
case XenbusStateInitWait:
case XenbusStateClosing:
case XenbusStateClosed:
backend_disconnect(be);
backend_switch_state(be, XenbusStateClosing);
break;
default:
BUG();
}
break;
case XenbusStateClosing:
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
case XenbusStateClosed:
backend_switch_state(be, XenbusStateClosed);
break;
default:
BUG();
}
break;
default:
BUG();
}
}
}
@@ -218,43 +328,33 @@ static void frontend_changed(struct xenbus_device *dev,
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
pr_debug("frontend state %s", xenbus_strstate(frontend_state));
pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
be->frontend_state = frontend_state;
switch (frontend_state) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
printk(KERN_INFO "%s: %s: prepare for reconnect\n",
__func__, dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
set_backend_state(be, XenbusStateInitWait);
break;
case XenbusStateInitialised:
break;
case XenbusStateConnected:
if (dev->state == XenbusStateConnected)
break;
backend_create_xenvif(be);
if (be->vif)
connect(be);
set_backend_state(be, XenbusStateConnected);
break;
case XenbusStateClosing:
if (be->vif)
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
disconnect_backend(dev);
xenbus_switch_state(dev, XenbusStateClosing);
set_backend_state(be, XenbusStateClosing);
break;
case XenbusStateClosed:
xenbus_switch_state(dev, XenbusStateClosed);
set_backend_state(be, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
/* fall through if not online */
case XenbusStateUnknown:
set_backend_state(be, XenbusStateClosed);
device_unregister(&dev->dev);
break;
@@ -347,7 +447,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
if (IS_ERR(str))
return;
if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
xenbus_switch_state(be->dev, XenbusStateConnected);
/* Complete any pending state change */
xenbus_switch_state(be->dev, be->state);
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
}
@@ -377,12 +479,8 @@ static void connect(struct backend_info *be)
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
hotplug_status_changed,
"%s/%s", dev->nodename, "hotplug-status");
if (err) {
/* Switch now, since we can't do a watch. */
xenbus_switch_state(dev, XenbusStateConnected);
} else {
if (!err)
be->have_hotplug_status_watch = 1;
}
netif_wake_queue(be->vif->dev);
}

View File

@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
}

View File

@@ -1960,6 +1960,7 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
memset(&DevInfo, 0, sizeof(DevInfo));
DevInfo.MaxRDMBufferSize = BUFFER_4K;
DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
DevInfo.u32RxAlignmentCorrection = 0;

View File

@@ -152,6 +152,9 @@ ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
return -EINVAL;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)

View File

@@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
{
struct serial_icounter_struct icount;
struct serial_icounter_struct icount = {};
struct sb_uart_icount cnow;
struct sb_uart_port *port = state->port;

View File

@@ -570,6 +570,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
ltv_t *pLtv;
bool_t ltvAllocated = FALSE;
ENCSTRCT sEncryption;
size_t len;
#ifdef USE_WDS
hcf_16 hcfPort = HCF_PORT_0;
@@ -686,7 +687,8 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
break;
case CFG_CNF_OWN_NAME:
memset( lp->StationName, 0, sizeof( lp->StationName ));
memcpy( (void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
strlcpy(lp->StationName, &pLtv->u.u8[2], len);
pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
break;
case CFG_CNF_LOAD_BALANCING:
@@ -1800,6 +1802,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
size_t len;
int ret = 0;
/*------------------------------------------------------------------------*/
@@ -1810,8 +1813,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
wl_lock(lp, &flags);
memset( lp->StationName, 0, sizeof( lp->StationName ));
memcpy( lp->StationName, extra, wrqu->data.length);
len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
strlcpy(lp->StationName, extra, len);
/* Commit the adapter parameters */
wl_apply( lp );

View File

@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* pSCSI Host ID and enable for phba mode
*/
sh = scsi_host_lookup(phv->phv_host_id);
if (IS_ERR(sh)) {
if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
return PTR_ERR(sh);
return -EINVAL;
}
phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
sh = phv->phv_lld_host;
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
if (IS_ERR(sh)) {
if (!sh) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
return PTR_ERR(sh);
return -EINVAL;
}
}
} else {

View File

@@ -650,14 +650,28 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma);
struct uio_mem *mem;
if (mi < 0)
return -EINVAL;
mem = idev->info->mem + mi;
if (vma->vm_end - vma->vm_start > mem->size)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* We cannot use the vm_iomap_memory() helper here,
* because vma->vm_pgoff is the map index we looked
* up above in uio_find_mem_index(), rather than an
* actual page offset into the mmap.
*
* So we just do the physical mmap without a page
* offset.
*/
return remap_pfn_range(vma,
vma->vm_start,
idev->info->mem[mi].addr >> PAGE_SHIFT,
mem->addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}

View File

@@ -696,7 +696,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
if (data_direction != DMA_NONE) {
ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
&vq->iov[data_first], data_num,
data_direction == DMA_TO_DEVICE);
data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
break; /* TODO */

View File

@@ -375,39 +375,15 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev;
unsigned int len;
unsigned long start=0, off;
fbdev = to_au1100fb_device(fbi);
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
vma->vm_flags |= VM_IO;
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
return -EAGAIN;
}
return 0;
return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static struct fb_ops au1100fb_ops =

View File

@@ -1233,38 +1233,15 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
* method mainly to allow the use of the TLB streaming flag (CCA=6)
*/
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int len;
unsigned long start=0, off;
struct au1200fb_device *fbdev = info->par;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
vma->vm_flags |= VM_IO;
return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
return 0;
return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)

View File

@@ -1149,8 +1149,8 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_msg_ctx *msg_ctx;
struct ecryptfs_message *msg = NULL;
char *auth_tok_sig;
char *payload;
size_t payload_len;
char *payload = NULL;
size_t payload_len = 0;
int rc;
rc = ecryptfs_get_auth_tok_sig(&auth_tok_sig, auth_tok);
@@ -1202,8 +1202,8 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
crypt_stat->key_size);
}
out:
if (msg)
kfree(msg);
kfree(msg);
kfree(payload);
return rc;
}

View File

@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
if (insert_inode_locked(inode) < 0) {
rc = -EINVAL;
goto fail_unlock;
goto fail_put;
}
inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
fail_unlock:
clear_nlink(inode);
unlock_new_inode(inode);
fail_put:

View File

@@ -480,7 +480,8 @@ enum dmi_field {
};
struct dmi_strmatch {
unsigned char slot;
unsigned char slot:7;
unsigned char exact_match:1;
char substr[79];
};
@@ -508,7 +509,8 @@ struct dmi_system_id {
#define dmi_device_id dmi_system_id
#endif
#define DMI_MATCH(a, b) { a, b }
#define DMI_MATCH(a, b) { .slot = a, .substr = b }
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
#define PLATFORM_NAME_SIZE 20
#define PLATFORM_MODULE_PREFIX "platform:"

View File

@@ -797,6 +797,16 @@ static inline int skb_cloned(const struct sk_buff *skb)
(atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);
return 0;
}
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
@@ -1242,6 +1252,11 @@ static inline int skb_pagelen(const struct sk_buff *skb)
return len + skb_headlen(skb);
}
static inline bool skb_has_frags(const struct sk_buff *skb)
{
return skb_shinfo(skb)->nr_frags;
}
/**
* __skb_fill_page_desc - initialise a paged fragment in an skb
* @skb: buffer containing fragment to be initialised

View File

@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
unsigned char err_offset = 0;
u8 opt_len = opt[1];
u8 opt_iter;
u8 tag_len;
if (opt_len < 8) {
err_offset = 1;
@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
}
for (opt_iter = 6; opt_iter < opt_len;) {
if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
tag_len = opt[opt_iter + 1];
if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
err_offset = opt_iter + 1;
goto out;
}
opt_iter += opt[opt_iter + 1];
opt_iter += tag_len;
}
out:

View File

@@ -474,10 +474,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
{
return dst_orig;
}
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
return NULL;
}
#else
extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
int flags);
/* skb attached with this dst needs transformation if dst->xfrm is valid */
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
return dst->xfrm;
}
#endif
#endif /* _NET_DST_H */

View File

@@ -228,6 +228,7 @@ struct cg_proto;
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
* @sk_allocation: allocation mode
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_sndbuf: size of send buffer in bytes
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -352,6 +353,7 @@ struct sock {
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
u32 sk_pacing_rate; /* bytes per second */
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;

View File

@@ -292,6 +292,7 @@ extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
extern int sysctl_tcp_min_tso_segs;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;

View File

@@ -223,6 +223,8 @@ struct drm_mode_get_connector {
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
__u32 pad;
};
#define DRM_MODE_PROP_PENDING (1<<0)

View File

@@ -495,7 +495,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
if (miter->addr) {
miter->__offset += miter->consumed;
if (miter->__flags & SG_MITER_TO_SG)
if ((miter->__flags & SG_MITER_TO_SG) &&
!PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {

View File

@@ -1297,74 +1297,91 @@ out:
int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{
struct anon_vma *anon_vma = NULL;
struct page *page;
unsigned long haddr = addr & HPAGE_PMD_MASK;
int page_nid = -1, this_nid = numa_node_id();
int target_nid;
int current_nid = -1;
bool migrated;
bool page_locked = false;
bool page_locked;
bool migrated = false;
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
page = pmd_page(pmd);
get_page(page);
current_nid = page_to_nid(page);
page_nid = page_to_nid(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
if (current_nid == numa_node_id())
if (page_nid == this_nid)
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
/*
* Acquire the page lock to serialise THP migrations but avoid dropping
* page_table_lock if at all possible
*/
page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == -1) {
put_page(page);
goto clear_pmdnuma;
/* If the page was locked, there are no parallel migrations */
if (page_locked)
goto clear_pmdnuma;
/*
* Otherwise wait for potential migrations and retry. We do
* relock and check_same as the page may no longer be mapped.
* As the fault is being retried, do not account for it.
*/
spin_unlock(&mm->page_table_lock);
wait_on_page_locked(page);
page_nid = -1;
goto out;
}
/* Acquire the page lock to serialise THP migrations */
/* Page is misplaced, serialise migrations and parallel THP splits */
get_page(page);
spin_unlock(&mm->page_table_lock);
lock_page(page);
page_locked = true;
if (!page_locked)
lock_page(page);
anon_vma = page_lock_anon_vma_read(page);
/* Confirm the PTE did not while locked */
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp))) {
unlock_page(page);
put_page(page);
page_nid = -1;
goto out_unlock;
}
/*
* Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared.
*/
spin_unlock(&mm->page_table_lock);
/* Migrate the THP to the requested node */
migrated = migrate_misplaced_transhuge_page(mm, vma,
pmdp, pmd, addr,
page, target_nid);
pmdp, pmd, addr, page, target_nid);
if (migrated)
current_nid = target_nid;
else {
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp))) {
unlock_page(page);
goto out_unlock;
}
goto clear_pmdnuma;
}
page_nid = target_nid;
task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
return 0;
goto out;
clear_pmdnuma:
BUG_ON(!PageLocked(page));
pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_numa(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp);
if (page_locked)
unlock_page(page);
unlock_page(page);
out_unlock:
spin_unlock(&mm->page_table_lock);
if (current_nid != -1)
task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
out:
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
if (page_nid != -1)
task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
return 0;
}

View File

@@ -3490,12 +3490,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int current_nid)
unsigned long addr, int page_nid)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
if (current_nid == numa_node_id())
if (page_nid == numa_node_id())
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
return mpol_misplaced(page, vma, addr);
@@ -3506,7 +3506,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *page = NULL;
spinlock_t *ptl;
int current_nid = -1;
int page_nid = -1;
int target_nid;
bool migrated = false;
@@ -3536,15 +3536,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
return 0;
}
current_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, current_nid);
page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
/*
* Account for the fault against the current node if it not
* being replaced regardless of where the page is located.
*/
current_nid = numa_node_id();
put_page(page);
goto out;
}
@@ -3552,11 +3547,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Migrate to the requested node */
migrated = migrate_misplaced_page(page, target_nid);
if (migrated)
current_nid = target_nid;
page_nid = target_nid;
out:
if (current_nid != -1)
task_numa_fault(current_nid, 1, migrated);
if (page_nid != -1)
task_numa_fault(page_nid, 1, migrated);
return 0;
}
@@ -3571,7 +3566,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long offset;
spinlock_t *ptl;
bool numa = false;
int local_nid = numa_node_id();
spin_lock(&mm->page_table_lock);
pmd = *pmdp;
@@ -3594,9 +3588,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
pte_t pteval = *pte;
struct page *page;
int curr_nid = local_nid;
int page_nid = -1;
int target_nid;
bool migrated;
bool migrated = false;
if (!pte_present(pteval))
continue;
if (!pte_numa(pteval))
@@ -3618,25 +3613,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(page_mapcount(page) != 1))
continue;
/*
* Note that the NUMA fault is later accounted to either
* the node that is currently running or where the page is
* migrated to.
*/
curr_nid = local_nid;
target_nid = numa_migrate_prep(page, vma, addr,
page_to_nid(page));
if (target_nid == -1) {
page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
pte_unmap_unlock(pte, ptl);
if (target_nid != -1) {
migrated = migrate_misplaced_page(page, target_nid);
if (migrated)
page_nid = target_nid;
} else {
put_page(page);
continue;
}
/* Migrate to the requested node */
pte_unmap_unlock(pte, ptl);
migrated = migrate_misplaced_page(page, target_nid);
if (migrated)
curr_nid = target_nid;
task_numa_fault(curr_nid, 1, migrated);
if (page_nid != -1)
task_numa_fault(page_nid, 1, migrated);
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
}

View File

@@ -1570,39 +1570,38 @@ bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int ret = 0;
int page_lru;
/* Avoid migrating to a node that is nearly full */
if (migrate_balanced_pgdat(pgdat, 1)) {
int page_lru;
if (!migrate_balanced_pgdat(pgdat, 1))
return 0;
if (isolate_lru_page(page)) {
put_page(page);
return 0;
}
/* Page is isolated */
ret = 1;
page_lru = page_is_file_cache(page);
if (!PageTransHuge(page))
inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
else
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru,
HPAGE_PMD_NR);
}
if (isolate_lru_page(page))
return 0;
/*
* Page is either isolated or there is not enough space on the target
* node. If isolated, then it has taken a reference count and the
* callers reference can be safely dropped without the page
* disappearing underneath us during migration. Otherwise the page is
* not to be migrated but the callers reference should still be
* dropped so it does not leak.
* migrate_misplaced_transhuge_page() skips page migration's usual
* check on page_count(), so we must do it here, now that the page
* has been isolated: a GUP pin, or any other pin, prevents migration.
* The expected page count is 3: 1 for page's mapcount and 1 for the
* caller's pin and 1 for the reference taken by isolate_lru_page().
*/
if (PageTransHuge(page) && page_count(page) != 3) {
putback_lru_page(page);
return 0;
}
page_lru = page_is_file_cache(page);
mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
hpage_nr_pages(page));
/*
* Isolating the page has taken another reference, so the
* caller's reference can be safely dropped without the page
* disappearing underneath us during migration.
*/
put_page(page);
return ret;
return 1;
}
/*
@@ -1613,7 +1612,7 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
int migrate_misplaced_page(struct page *page, int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
int isolated;
int nr_remaining;
LIST_HEAD(migratepages);
@@ -1621,20 +1620,16 @@ int migrate_misplaced_page(struct page *page, int node)
* Don't migrate pages that are mapped in multiple processes.
* TODO: Handle false sharing detection instead of this hammer
*/
if (page_mapcount(page) != 1) {
put_page(page);
if (page_mapcount(page) != 1)
goto out;
}
/*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
*/
if (numamigrate_update_ratelimit(pgdat, 1)) {
put_page(page);
if (numamigrate_update_ratelimit(pgdat, 1))
goto out;
}
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated)
@@ -1651,12 +1646,19 @@ int migrate_misplaced_page(struct page *page, int node)
} else
count_vm_numa_event(NUMA_PAGE_MIGRATE);
BUG_ON(!list_empty(&migratepages));
out:
return isolated;
out:
put_page(page);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
* Migrates a THP to a given target node. page must be locked and is unlocked
* before returning.
*/
int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct vm_area_struct *vma,
pmd_t *pmd, pmd_t entry,
@@ -1687,29 +1689,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
if (!new_page) {
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
goto out_dropref;
}
if (!new_page)
goto out_fail;
page_xchg_last_nid(new_page, page_last_nid(page));
isolated = numamigrate_isolate_page(pgdat, page);
/*
* Failing to isolate or a GUP pin prevents migration. The expected
* page count is 2. 1 for anonymous pages without a mapping and 1
* for the callers pin. If the page was isolated, the page will
* need to be put back on the LRU.
*/
if (!isolated || page_count(page) != 2) {
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
if (!isolated) {
put_page(new_page);
if (isolated) {
putback_lru_page(page);
isolated = 0;
goto out;
}
goto out_keep_locked;
goto out_fail;
}
/* Prepare a page as a migration target */
@@ -1737,11 +1725,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unlock_page(new_page);
put_page(new_page); /* Free it */
unlock_page(page);
/* Retake the callers reference and putback on LRU */
get_page(page);
putback_lru_page(page);
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
goto out;
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
goto out_fail;
}
/*
@@ -1758,9 +1747,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
page_add_new_anon_rmap(new_page, vma, haddr);
pmdp_clear_flush(vma, haddr, pmd);
set_pmd_at(mm, haddr, pmd, entry);
page_add_new_anon_rmap(new_page, vma, haddr);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(page);
/*
@@ -1779,15 +1768,20 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
out:
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru,
-HPAGE_PMD_NR);
return isolated;
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref:
entry = pmd_mknonnuma(entry);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
unlock_page(page);
put_page(page);
out_keep_locked:
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */

View File

@@ -145,7 +145,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
split_huge_page_pmd(vma, addr, pmd);
else if (change_huge_pmd(vma, pmd, addr, newprot,
prot_numa)) {
pages += HPAGE_PMD_NR;
pages++;
continue;
}
/* fall through */

View File

@@ -152,7 +152,7 @@ static size_t vlan_get_size(const struct net_device *dev)
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return nla_total_size(2) + /* IFLA_VLAN_ID */
sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}

View File

@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
if (r == 0) {

View File

@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
return -EINVAL;
kmsg->msg_name = compat_ptr(tmp1);
kmsg->msg_iov = compat_ptr(tmp2);
kmsg->msg_control = compat_ptr(tmp3);

View File

@@ -10,6 +10,7 @@
#include <net/secure_seq.h>
#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
static int __init net_secret_init(void)
@@ -18,6 +19,7 @@ static int __init net_secret_init(void)
return 0;
}
late_initcall(net_secret_init);
#endif
#ifdef CONFIG_INET
static u32 seq_scale(u32 seq)

View File

@@ -2258,6 +2258,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_stamp = ktime_set(-1L, 0);
sk->sk_pacing_rate = ~0U;
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)

View File

@@ -321,8 +321,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
/* We are going to _remove_ AH header to keep sockets happy,
* so... Later this can change. */
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
goto out;
skb->ip_summed = CHECKSUM_NONE;

View File

@@ -274,7 +274,7 @@ begintw:
if (unlikely(!INET_TW_MATCH(sk, net, acookie,
saddr, daddr, ports,
dif))) {
sock_put(sk);
inet_twsk_put(inet_twsk(sk));
goto begintw;
}
goto out;

View File

@@ -598,7 +598,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
goto out_oversize;
/* Head of list must not be cloned. */
if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
if (skb_unclone(head, GFP_ATOMIC))
goto out_nomem;
/* If the first fragment is fragmented itself, we split

View File

@@ -844,7 +844,7 @@ static int __ip_append_data(struct sock *sk,
csummode = CHECKSUM_PARTIAL;
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
if (((length > mtu) || (skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,

View File

@@ -323,8 +323,17 @@ static int vti_rcv(struct sk_buff *skb)
tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
u32 oldmark = skb->mark;
int ret;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
/* temporarily mark the skb with the tunnel o_key, to
* only match policies with this mark.
*/
skb->mark = be32_to_cpu(tunnel->parms.o_key);
ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
skb->mark = oldmark;
if (!ret)
return -1;
tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -333,7 +342,6 @@ static int vti_rcv(struct sk_buff *skb)
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
skb->mark = 0;
secpath_reset(skb);
skb->dev = tunnel->dev;
return 1;
@@ -365,7 +373,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&fl4, 0, sizeof(fl4));
flowi4_init_output(&fl4, tunnel->parms.link,
be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
RT_SCOPE_UNIVERSE,
IPPROTO_IPIP, 0,
dst, tiph->saddr, 0, 0);

View File

@@ -2024,7 +2024,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
RT_SCOPE_LINK);
goto make_route;
}
if (fl4->saddr) {
if (!fl4->saddr) {
if (ipv4_is_multicast(fl4->daddr))
fl4->saddr = inet_select_addr(dev_out, 0,
fl4->flowi4_scope);

View File

@@ -29,6 +29,7 @@
static int zero;
static int one = 1;
static int two = 2;
static int gso_max_segs = GSO_MAX_SEGS;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -780,6 +781,15 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &zero,
.extra2 = &two,
},
{
.procname = "tcp_min_tso_segs",
.data = &sysctl_tcp_min_tso_segs,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &gso_max_segs,
},
{
.procname = "udp_mem",
.data = &sysctl_udp_mem,

View File

@@ -282,6 +282,8 @@
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
int sysctl_tcp_min_tso_segs __read_mostly = 2;
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -793,12 +795,28 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
xmit_size_goal = mss_now;
if (large_allowed && sk_can_gso(sk)) {
xmit_size_goal = ((sk->sk_gso_max_size - 1) -
inet_csk(sk)->icsk_af_ops->net_header_len -
inet_csk(sk)->icsk_ext_hdr_len -
tp->tcp_header_len);
u32 gso_size, hlen;
/* TSQ : try to have two TSO segments in flight */
/* Maybe we should/could use sk->sk_prot->max_header here ? */
hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
inet_csk(sk)->icsk_ext_hdr_len +
tp->tcp_header_len;
/* Goal is to send at least one packet per ms,
* not one big TSO packet every 100 ms.
* This preserves ACK clocking and is consistent
* with tcp_tso_should_defer() heuristic.
*/
gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
gso_size = max_t(u32, gso_size,
sysctl_tcp_min_tso_segs * mss_now);
xmit_size_goal = min_t(u32, gso_size,
sk->sk_gso_max_size - 1 - hlen);
/* TSQ : try to have at least two segments in flight
* (one in NIC TX ring, another in Qdisc)
*/
xmit_size_goal = min_t(u32, xmit_size_goal,
sysctl_tcp_limit_output_bytes >> 1);

View File

@@ -705,6 +705,34 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
}
}
/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
* Note: TCP stack does not yet implement pacing.
* FQ packet scheduler can be used to implement cheap but effective
* TCP pacing, to smooth the burst on large writes when packets
* in flight is significantly lower than cwnd (or rwin)
*/
static void tcp_update_pacing_rate(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
u64 rate;
/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
rate = (u64)tp->mss_cache * 2 * (HZ << 3);
rate *= max(tp->snd_cwnd, tp->packets_out);
/* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
* be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
* We probably need usec resolution in the future.
* Note: This also takes care of possible srtt=0 case,
* when tcp_rtt_estimator() was not yet called.
*/
if (tp->srtt > 8 + 2)
do_div(rate, tp->srtt);
sk->sk_pacing_rate = min_t(u64, rate, ~0U);
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
@@ -1272,7 +1300,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
}
TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
TCP_SKB_CB(prev)->end_seq++;
if (skb == tcp_highest_sack(sk))
tcp_advance_highest_sack(sk, skb);
@@ -3605,7 +3636,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
u32 prior_in_flight;
u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
u32 prior_fackets;
int prior_packets = tp->packets_out;
int prior_sacked = tp->sacked_out;
@@ -3723,6 +3754,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (dst)
dst_confirm(dst);
}
if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
tcp_update_pacing_rate(sk);
return 1;
no_queue:
@@ -6063,6 +6097,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
} else
tcp_init_metrics(sk);
tcp_update_pacing_rate(sk);
/* Prevent spurious tcp_cwnd_restart() on
* first data packet.
*/

View File

@@ -1045,8 +1045,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb_orphan(skb);
skb->sk = sk;
skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
tcp_wfree : sock_wfree;
skb->destructor = tcp_wfree;
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
/* Build TCP header and checksum it. */
@@ -1135,6 +1134,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
/* Make sure we own this skb before messing gso_size/gso_segs */
WARN_ON_ONCE(skb_cloned(skb));
if (skb->len <= mss_now || !sk_can_gso(sk) ||
skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
@@ -1216,9 +1218,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
if (nsize < 0)
nsize = 0;
if (skb_cloned(skb) &&
skb_is_nonlinear(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
/* Get a new skb... force flag on. */
@@ -1332,7 +1332,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
/* Remove acked data from a packet in the transmit queue. */
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
__pskb_trim_head(skb, len);
@@ -1784,7 +1784,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* If a full-sized TSO skb can be sent, do it. */
if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
sk->sk_gso_max_segs * tp->mss_cache))
tp->xmit_size_goal_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -1990,7 +1990,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
@@ -2014,13 +2013,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
break;
}
/* TSQ : sk_wmem_alloc accounts skb truesize,
* including skb overhead. But thats OK.
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* This allows for :
* - better RTT estimation and ACK scheduling
* - faster recovery
* - high rates
*/
if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
break;
}
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
@@ -2363,6 +2369,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
int oldpcount = tcp_skb_pcount(skb);
if (unlikely(oldpcount > 1)) {
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
tcp_init_tso_segs(sk, skb, cur_mss);
tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
}

View File

@@ -132,7 +132,7 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
* header and optional ESP marker bytes) and then modify the
* protocol to ESP, and then call into the transform receiver.
*/
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
goto drop;
/* Now we can update and verify the packet length... */

View File

@@ -142,8 +142,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
for_each_input_rcu(rcv_notify_handlers, handler)
handler->handler(skb);
if (skb_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)

View File

@@ -521,8 +521,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
/* We are going to _remove_ AH header to keep sockets happy,
* so... Later this can change. */
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
goto out;
skb->ip_summed = CHECKSUM_NONE;

View File

@@ -116,7 +116,7 @@ begintw:
}
if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
ports, dif))) {
sock_put(sk);
inet_twsk_put(inet_twsk(sk));
goto begintw;
}
goto out;

View File

@@ -1271,7 +1271,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
skb = skb_peek_tail(&sk->sk_write_queue);
cork->length += length;
if (((length > mtu) ||
(skb && skb_is_gso(skb))) &&
(skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
err = ip6_ufo_append_data(sk, getfrag, from, length,

View File

@@ -369,7 +369,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
}
/* Head of list must not be cloned. */
if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
if (skb_unclone(head, GFP_ATOMIC)) {
pr_debug("skb is cloned but can't expand head");
goto out_oom;
}

View File

@@ -419,7 +419,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
goto out_oversize;
/* Head of list must not be cloned. */
if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
if (skb_unclone(head, GFP_ATOMIC))
goto out_oom;
/* If the first fragment is fragmented itself, we split

View File

@@ -69,8 +69,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
if (skb_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)

View File

@@ -115,6 +115,11 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
{
return sk->sk_user_data;
}
static inline struct l2tp_net *l2tp_pernet(struct net *net)
{
BUG_ON(!net);
@@ -525,7 +530,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
return 0;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
if (!uh->check) {
LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
return 1;
@@ -1088,7 +1093,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Queue the packet to IP for output */
skb->local_df = 1;
#if IS_ENABLED(CONFIG_IPV6)
if (skb->sk->sk_family == PF_INET6)
if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
error = inet6_csk_xmit(skb, NULL);
else
#endif
@@ -1221,7 +1226,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Calculate UDP checksum if configured to do so */
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6)
if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
l2tp_xmit_ipv6_csum(sk, skb, udp_len);
else
#endif
@@ -1270,9 +1275,8 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
*/
static void l2tp_tunnel_destruct(struct sock *sk)
{
struct l2tp_tunnel *tunnel;
struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
tunnel = sk->sk_user_data;
if (tunnel == NULL)
goto end;
@@ -1595,7 +1599,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
}
/* Check if this socket has already been prepped */
tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
tunnel = l2tp_tunnel(sk);
if (tunnel != NULL) {
/* This socket has already been prepped */
err = -EBUSY;
@@ -1624,6 +1628,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
if (cfg != NULL)
tunnel->debug = cfg->debug;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
if (ipv6_addr_v4mapped(&np->saddr) &&
ipv6_addr_v4mapped(&np->daddr)) {
struct inet_sock *inet = inet_sk(sk);
tunnel->v4mapped = true;
inet->inet_saddr = np->saddr.s6_addr32[3];
inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
inet->inet_daddr = np->daddr.s6_addr32[3];
} else {
tunnel->v4mapped = false;
}
}
#endif
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1631,7 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6)
if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
udpv6_encap_enable();
else
#endif

View File

@@ -190,6 +190,9 @@ struct l2tp_tunnel {
struct sock *sock; /* Parent socket */
int fd; /* Parent fd, if tunnel socket
* was created by userspace */
#if IS_ENABLED(CONFIG_IPV6)
bool v4mapped;
#endif
uint8_t priv[0]; /* private data */
};

View File

@@ -352,7 +352,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
goto error_put_sess_tun;
}
local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
local_bh_enable();
sock_put(ps->tunnel_sock);
sock_put(sk);
@@ -421,7 +423,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb->data[0] = ppph[0];
skb->data[1] = ppph[1];
local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
local_bh_enable();
sock_put(sk_tun);
sock_put(sk);

View File

@@ -3113,7 +3113,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
}
band = chanctx_conf->def.chan->band;
sta = sta_info_get(sdata, peer);
sta = sta_info_get_bss(sdata, peer);
if (sta) {
qos = test_sta_flag(sta, WLAN_STA_WME);
} else {

View File

@@ -861,6 +861,8 @@ struct tpt_led_trigger {
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
* a scan complete for an aborted scan.
* @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
* cancelled.
*/
enum {
SCAN_SW_SCANNING,
@@ -868,6 +870,7 @@ enum {
SCAN_ONCHANNEL_SCANNING,
SCAN_COMPLETED,
SCAN_ABORTED,
SCAN_HW_CANCELLED,
};
/**

View File

@@ -2916,6 +2916,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_ADHOC:
if (!bssid)
return 0;
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
return 0;
if (ieee80211_is_beacon(hdr->frame_control)) {
return 1;
} else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {

View File

@@ -215,6 +215,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
enum ieee80211_band band;
int i, ielen, n_chans;
if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
return false;
do {
if (local->hw_scan_band == IEEE80211_NUM_BANDS)
return false;
@@ -903,7 +906,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
if (!local->scan_req)
goto out;
/*
* We have a scan running and the driver already reported completion,
* but the worker hasn't run yet or is stuck on the mutex - mark it as
* cancelled.
*/
if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
test_bit(SCAN_COMPLETED, &local->scanning)) {
set_bit(SCAN_HW_CANCELLED, &local->scanning);
goto out;
}
if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
/*
* Make sure that __ieee80211_scan_completed doesn't trigger a
* scan on another band.
*/
set_bit(SCAN_HW_CANCELLED, &local->scanning);
if (local->ops->cancel_hw_scan)
drv_cancel_hw_scan(local,
rcu_dereference_protected(local->scan_sdata,

View File

@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
sta->last_rx = jiffies;
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
u8 *qc = ieee80211_get_qos_ctl(hdr);

View File

@@ -1149,7 +1149,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = rcu_dereference(sdata->u.vlan.sta);
if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
return TX_DROP;
} else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
} else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}

View File

@@ -2105,6 +2105,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
}
rate = cfg80211_calculate_bitrate(&ri);
if (WARN_ONCE(!rate,
"Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
status->flag, status->rate_idx, status->vht_nss))
return 0;
/* rewind from end of MPDU */
if (status->flag & RX_FLAG_MACTIME_END)

View File

@@ -207,10 +207,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
struct tcf_ipt *ipt = a->priv;
struct xt_action_param par;
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return TC_ACT_UNSPEC;
}
if (skb_unclone(skb, GFP_ATOMIC))
return TC_ACT_UNSPEC;
spin_lock(&ipt->tcf_lock);

View File

@@ -130,8 +130,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
int i, munged = 0;
unsigned int off;
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_unclone(skb, GFP_ATOMIC))
return p->tcf_action;
off = skb_network_offset(skb);

View File

@@ -550,7 +550,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
if (!sctp_checksum_disable) {
if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
(dst_xfrm(dst) != NULL) || packet->ipfragok) {
__u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
/* 3) Put the resultant value into the checksum field in the

View File

@@ -1980,6 +1980,16 @@ struct used_address {
unsigned int name_len;
};
static int copy_msghdr_from_user(struct msghdr *kmsg,
struct msghdr __user *umsg)
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
return -EINVAL;
return 0;
}
static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address)
@@ -1998,8 +2008,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(msg_sys, msg_compat))
return -EFAULT;
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
} else {
err = copy_msghdr_from_user(msg_sys, msg);
if (err)
return err;
}
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
err = -EMSGSIZE;
@@ -2207,8 +2220,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(msg_sys, msg_compat))
return -EFAULT;
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
} else {
err = copy_msghdr_from_user(msg_sys, msg);
if (err)
return err;
}
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
err = -EMSGSIZE;

View File

@@ -1247,6 +1247,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
return 0;
}
static void unix_sock_inherit_flags(const struct socket *old,
struct socket *new)
{
if (test_bit(SOCK_PASSCRED, &old->flags))
set_bit(SOCK_PASSCRED, &new->flags);
if (test_bit(SOCK_PASSSEC, &old->flags))
set_bit(SOCK_PASSSEC, &new->flags);
}
static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
@@ -1281,6 +1290,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
/* attach accepted sock to socket */
unix_state_lock(tsk);
newsock->state = SS_CONNECTED;
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
return 0;

View File

@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
rep->udiag_state = sk->sk_state;
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);

View File

@@ -269,6 +269,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
wdev->wext.ibss.chandef.chan = chan;
wdev->wext.ibss.chandef.center_freq1 =
chan->center_freq;
break;
}
@@ -353,6 +355,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
if (chan) {
wdev->wext.ibss.chandef.chan = chan;
wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
wdev->wext.ibss.chandef.center_freq1 = freq;
wdev->wext.ibss.channel_fixed = true;
} else {
/* cfg80211_ibss_wext_join will pick one if needed */

Some files were not shown because too many files have changed in this diff Show More