ANDROID: mm: do not allow file-backed pages from CMA

Android has carried custom patches not allowing file-backed page
allocation from CMA area since it could cause CMA allocation
failure/slowness. However, Compaction could allow migrating
file-backed pages to CMA area so causes CMA allocation's trouble.

This patch checks whether there are file-backed migration source
pages or not in compaction. If there are, compaction allows only
MIGRATE_MOVABLE's pageblock, not MIGRATE_CMA's one for selecting
migration target pages(i.e., free pages).

[surenb: original patch reworked using compact_control_ext to avoid
breaking frozen ABI]

Bug: 207498240
Bug: 305594365
Change-Id: Ibf30eea6bf24aafa2a75a73cef6084b1c837bd06
Signed-off-by: Minchan Kim <minchan@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Minchan Kim
2023-01-12 09:52:49 -08:00
committed by Treehugger Robot
parent 35482d0d38
commit a934b92361
2 changed files with 37 additions and 12 deletions

View File

@@ -818,9 +818,10 @@ static bool too_many_isolated(pg_data_t *pgdat)
* and cc->nr_migratepages is updated accordingly.
*/
static int
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
isolate_migratepages_block(struct compact_control_ext *cc_ext, unsigned long low_pfn,
unsigned long end_pfn, isolate_mode_t mode)
{
struct compact_control *cc = cc_ext->cc;
pg_data_t *pgdat = cc->zone->zone_pgdat;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct lruvec *lruvec;
@@ -1131,6 +1132,8 @@ isolate_success:
list_add(&page->lru, &cc->migratepages);
isolate_success_no_list:
cc->nr_migratepages += compound_nr(page);
if (!PageAnon(page))
cc_ext->nr_migrate_file_pages += compound_nr(page);
nr_isolated += compound_nr(page);
/*
@@ -1171,6 +1174,7 @@ isolate_fail:
}
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
cc_ext->nr_migrate_file_pages = 0;
nr_isolated = 0;
}
@@ -1244,6 +1248,7 @@ int
isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
unsigned long end_pfn)
{
struct compact_control_ext cc_ext = { .cc = cc };
unsigned long pfn, block_start_pfn, block_end_pfn;
int ret = 0;
@@ -1264,7 +1269,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
block_end_pfn, cc->zone))
continue;
ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
pfn = isolate_migratepages_block(&cc_ext, pfn, block_end_pfn,
ISOLATE_UNEVICTABLE);
if (ret)
@@ -1300,9 +1305,10 @@ static bool suitable_migration_source(struct compact_control *cc,
}
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct compact_control *cc,
static bool suitable_migration_target(struct compact_control_ext *cc_ext,
struct page *page)
{
struct compact_control *cc = cc_ext->cc;
/* If the page is a large free page, then disallow migration */
if (PageBuddy(page)) {
/*
@@ -1317,6 +1323,10 @@ static bool suitable_migration_target(struct compact_control *cc,
if (cc->ignore_block_suitable)
return true;
/* Allow file pages to migrate only into MIGRATE_MOVABLE blocks */
if (cc_ext->nr_migrate_file_pages)
return get_pageblock_migratetype(page) == MIGRATE_MOVABLE;
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (is_migrate_movable(get_pageblock_migratetype(page)))
return true;
@@ -1588,8 +1598,9 @@ fast_isolate_freepages(struct compact_control *cc)
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
*/
static void isolate_freepages(struct compact_control *cc)
static void isolate_freepages(struct compact_control_ext *cc_ext)
{
struct compact_control *cc = cc_ext->cc;
struct zone *zone = cc->zone;
struct page *page;
unsigned long block_start_pfn; /* start of current pageblock */
@@ -1647,7 +1658,7 @@ static void isolate_freepages(struct compact_control *cc)
continue;
/* Check the block is suitable for migration */
if (!suitable_migration_target(cc, page))
if (!suitable_migration_target(cc_ext, page))
continue;
/* If isolation recently failed, do not retry */
@@ -1713,11 +1724,12 @@ splitmap:
static struct page *compaction_alloc(struct page *migratepage,
unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
struct compact_control_ext *cc_ext = (struct compact_control_ext *)data;
struct compact_control *cc = cc_ext->cc;
struct page *freepage;
if (list_empty(&cc->freepages)) {
isolate_freepages(cc);
isolate_freepages(cc_ext);
if (list_empty(&cc->freepages))
return NULL;
@@ -1737,7 +1749,8 @@ static struct page *compaction_alloc(struct page *migratepage,
*/
static void compaction_free(struct page *page, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
struct compact_control_ext *cc_ext = (struct compact_control_ext *)data;
struct compact_control *cc = cc_ext->cc;
list_add(&page->lru, &cc->freepages);
cc->nr_freepages++;
@@ -1905,8 +1918,9 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
* starting at the block pointed to by the migrate scanner pfn within
* compact_control.
*/
static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
static isolate_migrate_t isolate_migratepages(struct compact_control_ext *cc_ext)
{
struct compact_control *cc = cc_ext->cc;
unsigned long block_start_pfn;
unsigned long block_end_pfn;
unsigned long low_pfn;
@@ -1984,7 +1998,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
}
/* Perform the isolation */
if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
if (isolate_migratepages_block(cc_ext, low_pfn, block_end_pfn,
isolate_mode))
return ISOLATE_ABORT;
@@ -2338,6 +2352,10 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
long vendor_ret;
struct compact_control_ext cc_ext = {
.cc = cc,
.nr_migrate_file_pages = 0,
};
/*
* These counters track activities during zone compaction. Initialize
@@ -2432,11 +2450,12 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->rescan = true;
}
switch (isolate_migratepages(cc)) {
switch (isolate_migratepages(&cc_ext)) {
case ISOLATE_ABORT:
ret = COMPACT_CONTENDED;
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
cc_ext.nr_migrate_file_pages = 0;
goto out;
case ISOLATE_NONE:
if (update_cached) {
@@ -2456,7 +2475,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
}
err = migrate_pages(&cc->migratepages, compaction_alloc,
compaction_free, (unsigned long)cc, cc->mode,
compaction_free, (unsigned long)&cc_ext, cc->mode,
MR_COMPACTION, NULL);
trace_mm_compaction_migratepages(cc->nr_migratepages, err,
@@ -2464,6 +2483,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
/* All pages were either migrated or will be released */
cc->nr_migratepages = 0;
cc_ext.nr_migrate_file_pages = 0;
if (err) {
putback_movable_pages(&cc->migratepages);
/*

View File

@@ -264,6 +264,11 @@ struct compact_control {
bool alloc_contig; /* alloc_contig_range allocation */
};
struct compact_control_ext {
struct compact_control *cc;
unsigned int nr_migrate_file_pages; /* Number of file pages to migrate */
};
/*
* Used in direct compaction when a page should be taken from the freelists
* immediately when one is created during the free path.