FROMLIST: BACKPORT: mm/page_alloc: Split out buddy removal code from rmqueue into separate helper

This is a preparation page to allow the buddy removal code to be reused
in a later patch.

No functional change.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Link: https://lore.kernel.org/all/20220420095906.27349-4-mgorman@techsingularity.net/

Conflicts:
        mm/page_alloc.c

1. Skipped changes in __rmqueue_pcplist which are not present in 5.10 kernel.
2. [1] introduced page allocation path a lot change to support cma first
   allocation policy so needed to adapt the change.

[1] ANDROID: cma: redirect page allocation to CMA

Bug: 230899966
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: I4584fbfdebf2637534d6a68635a44a81a176c253
This commit is contained in:
Mel Gorman
2022-04-22 16:49:50 -07:00
committed by Minchan Kim
parent a248d08a94
commit c249c40b79

View File

@@ -3566,6 +3566,53 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
#endif
}
static __always_inline
struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
unsigned int order, unsigned int alloc_flags,
int migratetype)
{
struct page *page;
unsigned long flags;
do {
page = NULL;
spin_lock_irqsave(&zone->lock, flags);
/*
* order-0 request can reach here when the pcplist is skipped
* due to non-CMA allocation context. HIGHATOMIC area is
* reserved for high-order atomic allocation, so order-0
* request should skip it.
*/
if (order > 0 && alloc_flags & ALLOC_HARDER) {
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page) {
if (migratetype == MIGRATE_MOVABLE &&
alloc_flags & ALLOC_CMA)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
if (!page)
page = __rmqueue(zone, order, migratetype,
alloc_flags);
}
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
}
__mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page));
spin_unlock_irqrestore(&zone->lock, flags);
} while (check_new_pages(page, order));
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
return page;
}
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
unsigned int alloc_flags,
@@ -3647,42 +3694,14 @@ struct page *rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
spin_lock_irqsave(&zone->lock, flags);
do {
page = NULL;
/*
* order-0 request can reach here when the pcplist is skipped
* due to non-CMA allocation context. HIGHATOMIC area is
* reserved for high-order atomic allocation, so order-0
* request should skip it.
*/
if (order > 0 && alloc_flags & ALLOC_HARDER) {
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page) {
if (migratetype == MIGRATE_MOVABLE &&
alloc_flags & ALLOC_CMA)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
if (!page)
page = __rmqueue(zone, order, migratetype,
alloc_flags);
}
} while (page && check_new_pages(page, order));
spin_unlock(&zone->lock);
if (!page)
goto failed;
__mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page));
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
migratetype);
if (unlikely(!page))
return NULL;
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
trace_android_vh_rmqueue(preferred_zone, zone, order,
gfp_flags, alloc_flags, migratetype);
local_irq_restore(flags);
out:
/* Separate test+clear to avoid unnecessary atomics */
@@ -3693,10 +3712,6 @@ out:
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
failed:
local_irq_restore(flags);
return NULL;
}
#ifdef CONFIG_FAIL_PAGE_ALLOC