ANDROID: mm: add cma pcp list

Add a PCP list for __GFP_CMA allocations so as not to deprive
MIGRATE_MOVABLE allocations quick access to pages on their PCP
lists.

Bug: 158645321
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org>
[isaacm@codeaurora.org: Resolve merge conflicts related to new mm
features]
Signed-off-by: Isaac J. Manjarres <isaacm@quicinc.com>
Change-Id: I2f238ea5f8e4aef9c45b1a3180ce6b6a36d63d77
This commit is contained in:
Chris Goldsworthy
2020-11-16 18:38:13 -08:00
committed by Suren Baghdasaryan
parent af82009880
commit 37b2d597bb
2 changed files with 61 additions and 42 deletions

View File

@@ -43,8 +43,6 @@ enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -61,6 +59,8 @@ enum migratetype {
*/
MIGRATE_CMA,
#endif
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif

View File

@@ -316,10 +316,10 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Movable",
"Reclaimable",
"HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
"HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif
@@ -3068,6 +3068,39 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return allocated;
}
/*
* Return the pcp list that corresponds to the migrate type if that list isn't
* empty.
* If the list is empty return NULL.
*/
static struct list_head *get_populated_pcp_list(struct zone *zone,
unsigned int order, struct per_cpu_pages *pcp,
int migratetype, unsigned int alloc_flags)
{
struct list_head *list = &pcp->lists[order_to_pindex(migratetype, order)];
if (list_empty(list)) {
int batch = READ_ONCE(pcp->batch);
int alloced;
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
* be 1 for small zones or for boot pagesets which
* should never store free pages as the pages may
* belong to arbitrary zones.
*/
if (batch > 1)
batch = max(batch >> order, 2);
alloced = rmqueue_bulk(zone, order, pcp->batch, list, migratetype, alloc_flags);
pcp->count += alloced << order;
if (list_empty(list))
list = NULL;
}
return list;
}
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -3405,7 +3438,7 @@ void free_unref_page(struct page *page, unsigned int order)
return;
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* We only track unmovable, reclaimable movable, and CMA on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
* offlined but treat HIGHATOMIC as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
@@ -3611,34 +3644,23 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list,
gfp_t gfp_flags)
{
struct page *page;
struct page *page = NULL;
struct list_head *list = NULL;
do {
if (list_empty(list)) {
int batch = READ_ONCE(pcp->batch);
int alloced;
/* First try to get CMA pages */
if (migratetype == MIGRATE_MOVABLE && alloc_flags & ALLOC_CMA)
list = get_populated_pcp_list(zone, order, pcp, get_cma_migrate_type(),
alloc_flags);
if (list == NULL) {
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
* be 1 for small zones or for boot pagesets which
* should never store free pages as the pages may
* belong to arbitrary zones.
* Either CMA is not suitable or there are no
* free CMA pages.
*/
if (batch > 1)
batch = max(batch >> order, 2);
if (migratetype == MIGRATE_MOVABLE && alloc_flags & ALLOC_CMA)
alloced = rmqueue_bulk(zone, order, batch, list,
get_cma_migrate_type(), alloc_flags);
if (unlikely(list_empty(list)))
alloced = rmqueue_bulk(zone, order, batch, list, migratetype,
alloc_flags);
pcp->count += alloced << order;
if (unlikely(list_empty(list)))
list = get_populated_pcp_list(zone, order, pcp, migratetype, alloc_flags);
if (unlikely(list == NULL) || unlikely(list_empty(list)))
return NULL;
}
@@ -3657,7 +3679,6 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
unsigned int alloc_flags)
{
struct per_cpu_pages *pcp;
struct list_head *list;
struct page *page;
unsigned long flags;
@@ -3670,8 +3691,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
*/
pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp->free_factor >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list, gfp_flags);
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, gfp_flags);
local_unlock_irqrestore(&pagesets.lock, flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
@@ -3693,16 +3713,9 @@ struct page *rmqueue(struct zone *preferred_zone,
struct page *page;
if (likely(pcp_allowed_order(order))) {
/*
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
* we need to skip it when CMA area isn't allowed.
*/
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype, alloc_flags);
goto out;
}
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype, alloc_flags);
goto out;
}
/*
@@ -3910,6 +3923,14 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
continue;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
#ifdef CONFIG_CMA
/*
* Note that this check is needed only
* when MIGRATE_CMA < MIGRATE_PCPTYPES.
*/
if (mt == MIGRATE_CMA)
continue;
#endif
if (!free_area_empty(area, mt))
return true;
}
@@ -5222,7 +5243,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct zone *zone;
struct zoneref *z;
struct per_cpu_pages *pcp;
struct list_head *pcp_list;
struct alloc_context ac;
gfp_t alloc_gfp;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -5302,7 +5322,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
/* Attempt the batch allocation */
local_lock_irqsave(&pagesets.lock, flags);
pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
while (nr_populated < nr_pages) {
@@ -5313,7 +5332,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
}
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list, alloc_gfp);
pcp, alloc_gfp);
if (unlikely(!page)) {
/* Try and get at least one page */
if (!nr_populated)