Merge tag v3.10.44 into linux-linaro-lsk

This is the 3.10.44 stable release.
This commit is contained in:
Alex Shi
2014-06-17 15:50:01 +08:00
25 changed files with 151 additions and 99 deletions

View File

@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 43
SUBLEVEL = 44
EXTRAVERSION =
NAME = TOSSUG Baby Fish

View File

@@ -124,7 +124,7 @@
/* Device Bus parameters are required */
/* Read parameters */
devbus,bus-width = <8>;
devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;

View File

@@ -152,7 +152,7 @@
/* Device Bus parameters are required */
/* Read parameters */
devbus,bus-width = <8>;
devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;

View File

@@ -444,10 +444,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
.driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */

View File

@@ -382,6 +382,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
spin_lock_bh(&np->np_thread_lock);
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
pr_debug("iscsi_np is not enabled, reject connect request\n");
return rdma_reject(cma_id, NULL, 0);
}
spin_unlock_bh(&np->np_thread_lock);
pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
cma_id, cma_id->context);

View File

@@ -183,6 +183,7 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
else
hcsr &= ~H_IE;
dev->recvd_hw_ready = false;
mei_me_reg_write(hw, H_CSR, hcsr);
if (dev->dev_state == MEI_DEV_POWER_DOWN)
@@ -233,10 +234,7 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
static int mei_me_hw_ready_wait(struct mei_device *dev)
{
int err;
if (mei_me_hw_is_ready(dev))
return 0;
dev->recvd_hw_ready = false;
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,

View File

@@ -55,7 +55,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
spin_lock_init(&cq->lock);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);

View File

@@ -1190,15 +1190,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *cq;
unsigned long flags;
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0);
spin_unlock_irqrestore(&cq->lock, flags);
napi_schedule(&cq->napi);
}
}
#endif

View File

@@ -299,7 +299,6 @@ struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
int size;

View File

@@ -1295,7 +1295,6 @@ struct megasas_instance {
u32 *reply_queue;
dma_addr_t reply_queue_h;
unsigned long base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];

View File

@@ -3461,6 +3461,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
u32 max_sectors_1;
u32 max_sectors_2;
u32 tmp_sectors, msix_enable;
resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3469,14 +3470,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
if (pci_request_selected_regions(instance->pdev, instance->bar,
"megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
base_addr = pci_resource_start(instance->pdev, instance->bar);
instance->reg_set = ioremap_nocache(base_addr, 8192);
if (!instance->reg_set) {
printk(KERN_DEBUG "megasas: Failed to map IO mem\n");

View File

@@ -460,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np)
spin_lock_bh(&np->np_thread_lock);
np->np_exports--;
if (np->np_exports) {
np->enabled = true;
spin_unlock_bh(&np->np_thread_lock);
return 0;
}

View File

@@ -760,6 +760,7 @@ struct iscsi_np {
int np_ip_proto;
int np_sock_type;
enum np_thread_state_table np_thread_state;
bool enabled;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;

View File

@@ -250,6 +250,28 @@ static void iscsi_login_set_conn_values(
mutex_unlock(&auth_id_lock);
}
static __printf(2, 3) int iscsi_change_param_sprintf(
struct iscsi_conn *conn,
const char *fmt, ...)
{
va_list args;
unsigned char buf[64];
memset(buf, 0, sizeof buf);
va_start(args, fmt);
vsnprintf(buf, sizeof buf, fmt, args);
va_end(args);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
return 0;
}
/*
* This is the leading connection of a new session,
* or session reinstatement.
@@ -339,7 +361,6 @@ static int iscsi_login_zero_tsih_s2(
{
struct iscsi_node_attrib *na;
struct iscsi_session *sess = conn->sess;
unsigned char buf[32];
bool iser = false;
sess->tpg = conn->tpg;
@@ -380,26 +401,16 @@ static int iscsi_login_zero_tsih_s2(
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
memset(buf, 0, 32);
sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
}
/*
* Workaround for Initiators that have broken connection recovery logic.
*
* "We would really like to get rid of this." Linux-iSCSI.org team
*/
memset(buf, 0, 32);
sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
return -1;
}
if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
return -1;
@@ -411,12 +422,9 @@ static int iscsi_login_zero_tsih_s2(
unsigned long mrdsl, off;
int rc;
sprintf(buf, "RDMAExtensions=Yes");
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
return -1;
}
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
* Immediate Data + Unsolicitied Data-OUT if necessary..
@@ -446,12 +454,8 @@ static int iscsi_login_zero_tsih_s2(
pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
" to PAGE_SIZE\n", mrdsl);
sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
return -1;
}
}
return 0;
@@ -984,6 +988,7 @@ int iscsi_target_setup_login_socket(
}
np->np_transport = t;
np->enabled = true;
return 0;
}

View File

@@ -175,13 +175,16 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg)
static void iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg)
struct iscsi_portal_group *tpg,
bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
if (shutdown)
tpg_np->tpg_np->enabled = false;
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
}
@@ -197,7 +200,7 @@ void iscsit_clear_tpg_np_login_threads(
continue;
}
spin_unlock(&tpg->tpg_np_lock);
iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, false);
spin_lock(&tpg->tpg_np_lock);
}
spin_unlock(&tpg->tpg_np_lock);
@@ -520,7 +523,7 @@ static int iscsit_tpg_release_np(
struct iscsi_portal_group *tpg,
struct iscsi_np *np)
{
iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,

View File

@@ -409,7 +409,16 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
case READ_CAPACITY:
return 0;
case SERVICE_ACTION_IN:
switch (cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
return 1;
}
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:

View File

@@ -2034,6 +2034,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
if (!(dev->dev_flags & DF_CONFIGURED)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
}
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {

View File

@@ -50,14 +50,14 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
if ((ia_valid & ATTR_UID) &&
(!uid_eq(current_fsuid(), inode->i_uid) ||
!uid_eq(attr->ia_uid, inode->i_uid)) &&
!inode_capable(inode, CAP_CHOWN))
!capable_wrt_inode_uidgid(inode, CAP_CHOWN))
return -EPERM;
/* Make sure caller can chgrp. */
if ((ia_valid & ATTR_GID) &&
(!uid_eq(current_fsuid(), inode->i_uid) ||
(!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) &&
!inode_capable(inode, CAP_CHOWN))
!capable_wrt_inode_uidgid(inode, CAP_CHOWN))
return -EPERM;
/* Make sure a caller can chmod. */
@@ -67,7 +67,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
inode->i_gid) &&
!inode_capable(inode, CAP_FSETID))
!capable_wrt_inode_uidgid(inode, CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
@@ -160,7 +160,7 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
umode_t mode = attr->ia_mode;
if (!in_group_p(inode->i_gid) &&
!inode_capable(inode, CAP_FSETID))
!capable_wrt_inode_uidgid(inode, CAP_FSETID))
mode &= ~S_ISGID;
inode->i_mode = mode;
}

View File

@@ -1837,14 +1837,18 @@ EXPORT_SYMBOL(inode_init_owner);
* inode_owner_or_capable - check current task permissions to inode
* @inode: inode being checked
*
* Return true if current either has CAP_FOWNER to the inode, or
* owns the file.
* Return true if current either has CAP_FOWNER in a namespace with the
* inode owner uid mapped, or owns the file.
*/
bool inode_owner_or_capable(const struct inode *inode)
{
struct user_namespace *ns;
if (uid_eq(current_fsuid(), inode->i_uid))
return true;
if (inode_capable(inode, CAP_FOWNER))
ns = current_user_ns();
if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
return true;
return false;
}

View File

@@ -321,10 +321,11 @@ int generic_permission(struct inode *inode, int mask)
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (inode_capable(inode, CAP_DAC_OVERRIDE))
if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
return 0;
if (!(mask & MAY_WRITE))
if (inode_capable(inode, CAP_DAC_READ_SEARCH))
if (capable_wrt_inode_uidgid(inode,
CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
@@ -334,7 +335,7 @@ int generic_permission(struct inode *inode, int mask)
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
if (inode_capable(inode, CAP_DAC_OVERRIDE))
if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
return 0;
/*
@@ -342,7 +343,7 @@ int generic_permission(struct inode *inode, int mask)
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
if (inode_capable(inode, CAP_DAC_READ_SEARCH))
if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
@@ -2199,7 +2200,7 @@ static inline int check_sticky(struct inode *dir, struct inode *inode)
return 0;
if (uid_eq(dir->i_uid, fsuid))
return 0;
return !inode_capable(inode, CAP_FOWNER);
return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
}
/*

View File

@@ -211,7 +211,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool nsown_capable(int cap);
extern bool inode_capable(const struct inode *inode, int cap);
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
/* audit system wants to get cap info from files as well */

View File

@@ -733,6 +733,22 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
return AUDIT_BUILD_CONTEXT;
}
static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
{
int word, bit;
if (val > 0xffffffff)
return false;
word = AUDIT_WORD(val);
if (word >= AUDIT_BITMASK_SIZE)
return false;
bit = AUDIT_BIT(val);
return rule->mask[word] & bit;
}
/* At syscall entry and exit time, this filter is called if the
* audit_state is not low enough that auditing cannot take place, but is
* also not high enough that we already know we have to write an audit
@@ -750,11 +766,8 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
rcu_read_lock();
if (!list_empty(list)) {
int word = AUDIT_WORD(ctx->major);
int bit = AUDIT_BIT(ctx->major);
list_for_each_entry_rcu(e, list, list) {
if ((e->rule.mask[word] & bit) == bit &&
if (audit_in_mask(&e->rule, ctx->major) &&
audit_filter_rules(tsk, &e->rule, ctx, NULL,
&state, false)) {
rcu_read_unlock();
@@ -774,20 +787,16 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
static int audit_filter_inode_name(struct task_struct *tsk,
struct audit_names *n,
struct audit_context *ctx) {
int word, bit;
int h = audit_hash_ino((u32)n->ino);
struct list_head *list = &audit_inode_hash[h];
struct audit_entry *e;
enum audit_state state;
word = AUDIT_WORD(ctx->major);
bit = AUDIT_BIT(ctx->major);
if (list_empty(list))
return 0;
list_for_each_entry_rcu(e, list, list) {
if ((e->rule.mask[word] & bit) == bit &&
if (audit_in_mask(&e->rule, ctx->major) &&
audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
ctx->current_state = state;
return 1;

View File

@@ -445,22 +445,18 @@ bool nsown_capable(int cap)
}
/**
* inode_capable - Check superior capability over inode
* capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
* @inode: The inode in question
* @cap: The capability in question
*
* Return true if the current task has the given superior capability
* targeted at it's own user namespace and that the given inode is owned
* by the current user namespace or a child namespace.
*
* Currently we check to see if an inode is owned by the current
* user namespace by seeing if the inode's owner maps into the
* current user namespace.
*
* Return true if the current task has the given capability targeted at
* its own user namespace and that the given inode's uid and gid are
* mapped into the current user namespace.
*/
bool inode_capable(const struct inode *inode, int cap)
bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
{
struct user_namespace *ns = current_user_ns();
return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
kgid_has_mapping(ns, inode->i_gid);
}

View File

@@ -657,17 +657,21 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc)
{
struct page *page;
unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
/*
* Initialise the free scanner. The starting point is where we last
* scanned from (or the end of the zone if starting). The low point
* is the end of the pageblock the migration scanner is using.
* successfully isolated from, zone-cached value, or the end of the
* zone when isolating for the first time. We need this aligned to
* the pageblock boundary, because we do pfn -= pageblock_nr_pages
* in the for loop.
* The low boundary is the end of the pageblock the migration scanner
* is using.
*/
pfn = cc->free_pfn;
low_pfn = cc->migrate_pfn + pageblock_nr_pages;
pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
/*
* Take care that if the migration scanner is at the end of the zone
@@ -683,9 +687,10 @@ static void isolate_freepages(struct zone *zone,
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
pfn -= pageblock_nr_pages) {
unsigned long isolated;
unsigned long end_pfn;
if (!pfn_valid(pfn))
continue;
@@ -713,13 +718,10 @@ static void isolate_freepages(struct zone *zone,
isolated = 0;
/*
* As pfn may not start aligned, pfn+pageblock_nr_page
* may cross a MAX_ORDER_NR_PAGES boundary and miss
* a pfn_valid check. Ensure isolate_freepages_block()
* only scans within a pageblock
* Take care when isolating in last pageblock of a zone which
* ends in the middle of a pageblock.
*/
end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
end_pfn = min(end_pfn, z_end_pfn);
end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
isolated = isolate_freepages_block(cc, pfn, end_pfn,
freelist, false);
nr_freepages += isolated;
@@ -738,7 +740,14 @@ static void isolate_freepages(struct zone *zone,
/* split_free_page does not map the pages */
map_pages(freelist);
cc->free_pfn = high_pfn;
/*
* If we crossed the migrate scanner, we want to keep it that way
* so that compact_finished() may detect this
*/
if (pfn < low_pfn)
cc->free_pfn = max(pfn, zone->zone_start_pfn);
else
cc->free_pfn = high_pfn;
cc->nr_freepages = nr_freepages;
}
@@ -946,6 +955,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
;
}
/*
* Clear pageblock skip if there were failures recently and compaction
* is about to be retried after being deferred. kswapd does not do
* this reset as it'll reset the cached information when going to sleep.
*/
if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
__reset_isolation_suitable(zone);
/*
* Setup to move all movable pages to the end of the zone. Used cached
* information on where the scanners should start but check that it
@@ -962,14 +979,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
zone->compact_cached_migrate_pfn = cc->migrate_pfn;
}
/*
* Clear pageblock skip if there were failures recently and compaction
* is about to be retried after being deferred. kswapd does not do
* this reset as it'll reset the cached information when going to sleep.
*/
if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
__reset_isolation_suitable(zone);
migrate_prep_local();
while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
@@ -1003,7 +1012,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
if (err) {
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
if (err == -ENOMEM) {
/*
* migrate_pages() may return -ENOMEM when scanners meet
* and we want compact_finished() to detect it
*/
if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
ret = COMPACT_PARTIAL;
goto out;
}

View File

@@ -22,7 +22,6 @@
#endif
#include <net/netfilter/nf_conntrack_zones.h>
/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
err = ip_defrag(skb, user);
local_bh_enable();
if (!err)
if (!err) {
ip_send_check(ip_hdr(skb));
skb->local_df = 1;
}
return err;
}