ANDROID: vendor_hooks: Add hooks to avoid key threads stalled in

memory allocations

We add these hooks to avoid key threads blocked in memory allocation
path.
-android_vh_free_unref_page_bypass  ----We create a memory pool for the
key threads. This hook determines whether a page should be free to the
pool or to buddy freelist. It works with a existing hook
`android_vh_alloc_pages_reclaim_bypass`, which takes pages out of the
pool.

-android_vh_kvmalloc_node_use_vmalloc  ----For key threads, we perfer
not to run into direct reclaim. So we clear __GFP_DIRECT_RECLAIM flag.
For threads which are not that important, we perfer use vmalloc.

-android_vh_should_alloc_pages_retry  ----Before key threads run into
direct reclaim, we want to retry with a lower watermark.

-android_vh_unreserve_highatomic_bypass  ----We want to keep more
highatomic pages when unreserve them to avoid highatomic allocation
failures.

-android_vh_rmqueue_bulk_bypass  ----We found sometimes when key threads
run into rmqueue_bulk,  it took several milliseconds spinning at
zone->lock or filling per-cpu pages. We use this hook to take pages from
the mempool mentioned above,  rather than grab zone->lock and fill a
batch of pages to per-cpu.

Bug: 288216516
Bug: 304066882

Change-Id: I1656032d6819ca627723341987b6094775bc345f
Signed-off-by: Oven <liyangouwen1@oppo.com>
Signed-off-by: Qinglin Li <qinglin.li@amlogic.com>
(cherry picked from commit aa47cc7c206a8e159a735e6f1513ece6dac1e0ba)
This commit is contained in:
Oven
2023-06-30 10:57:12 +08:00
committed by Treehugger Robot
parent a1ebbe9d5e
commit 5b8c9a002d
4 changed files with 56 additions and 1 deletions

View File

@@ -195,6 +195,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pcplist_add_cma_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete);

View File

@@ -83,6 +83,24 @@ DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
TP_PROTO(int migratetype, bool *bypass),
TP_ARGS(migratetype, bypass));
DECLARE_HOOK(android_vh_free_unref_page_bypass,
TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
TP_ARGS(page, order, migratetype, bypass));
DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc,
TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc),
TP_ARGS(size, kmalloc_flags, use_vmalloc));
DECLARE_HOOK(android_vh_should_alloc_pages_retry,
TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry),
TP_ARGS(gfp_mask, order, alloc_flags,
migratetype, preferred_zone, page, should_alloc_retry));
DECLARE_HOOK(android_vh_unreserve_highatomic_bypass,
TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic),
TP_ARGS(force, zone, skip_unreserve_highatomic));
DECLARE_HOOK(android_vh_rmqueue_bulk_bypass,
TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
struct list_head *list),
TP_ARGS(order, pcp, migratetype, list));
DECLARE_HOOK(android_vh_mmap_region,
TP_PROTO(struct vm_area_struct *vma, unsigned long addr),
TP_ARGS(vma, addr));

View File

@@ -1742,11 +1742,15 @@ static void __free_pages_ok(struct page *page, unsigned int order,
int migratetype;
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
bool skip_free_unref_page = false;
if (!free_pages_prepare(page, order, true, fpi_flags))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
spin_lock_irqsave(&zone->lock, flags);
if (unlikely(has_isolate_pageblock(zone) ||
@@ -2988,6 +2992,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
struct page *page;
int order;
bool ret;
bool skip_unreserve_highatomic = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
ac->nodemask) {
@@ -2999,6 +3004,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
pageblock_nr_pages)
continue;
trace_android_vh_unreserve_highatomic_bypass(force, zone,
&skip_unreserve_highatomic);
if (skip_unreserve_highatomic)
continue;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]);
@@ -3255,6 +3265,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
int batch = READ_ONCE(pcp->batch);
int alloced;
trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
if (!list_empty(list))
return list;
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
@@ -3571,10 +3585,16 @@ void free_unref_page(struct page *page, unsigned int order)
unsigned long pfn = page_to_pfn(page);
int migratetype, pcpmigratetype;
bool pcp_skip_cma_pages = false;
bool skip_free_unref_page = false;
if (!free_unref_page_prepare(page, pfn, order))
return;
migratetype = get_pcppage_migratetype(page);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
/*
* We only track unmovable, reclaimable movable, and CMA on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
@@ -5155,6 +5175,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int zonelist_iter_cookie;
int reserve_flags;
unsigned long alloc_start = jiffies;
bool should_alloc_retry = false;
/*
* We also sanity check to catch abuse of atomic reserves being used by
* callers that are not in atomic context.
@@ -5287,6 +5308,11 @@ retry:
if (current->flags & PF_MEMALLOC)
goto nopage;
trace_android_vh_should_alloc_pages_retry(gfp_mask, order, &alloc_flags,
ac->migratetype, ac->preferred_zoneref->zone, &page, &should_alloc_retry);
if (should_alloc_retry)
goto retry;
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
&did_some_progress);

View File

@@ -27,8 +27,9 @@
#include <linux/uaccess.h>
#include "internal.h"
#ifndef __GENSYMS__
#ifndef __GENKSYMS__
#include <trace/hooks/syscall_check.h>
#include <trace/hooks/mm.h>
#endif
/**
@@ -598,6 +599,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
gfp_t kmalloc_flags = flags;
void *ret;
bool use_vmalloc = false;
/*
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
@@ -606,6 +608,9 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if ((flags & GFP_KERNEL) != GFP_KERNEL)
return kmalloc_node(size, flags, node);
trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
if (use_vmalloc)
goto use_vmalloc_node;
/*
* We want to attempt a large physically contiguous block first because
* it is less likely to fragment multiple larger blocks and therefore
@@ -635,6 +640,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
return NULL;
}
use_vmalloc_node:
return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0));
}