ANDROID: fix ABI breakage caused by per_cpu_pages

The patchset adds a new spin_lock field into per_cpu_pages which
breaks KMI so this patch introduces per_cpu_pages_ext and
per_cpu_pageset_ext and changes relavant functions and code
to use the _ext data structures instead of original one.

Bug: 230899966
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: Ic5156f784223695c9716409036e2973df69ef99b
This commit is contained in:
Minchan Kim
2022-05-02 12:44:15 -07:00
parent 2eb3710ce5
commit 42596c7b41
2 changed files with 54 additions and 23 deletions

View File

@@ -527,7 +527,6 @@ enum zone_watermarks {
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
@@ -548,6 +547,11 @@ struct per_cpu_pageset {
#endif
};
struct per_cpu_pageset_ext {
spinlock_t lock; /* Protects pageset.pcp.lists field */
struct per_cpu_pageset pageset;
};
struct per_cpu_nodestat {
s8 stat_threshold;
s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];

View File

@@ -83,6 +83,18 @@
/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
typedef int __bitwise fpi_t;
static inline struct per_cpu_pageset_ext *pcp_to_pageset_ext(struct per_cpu_pages *pcp)
{
struct per_cpu_pageset *ps = container_of(pcp, struct per_cpu_pageset, pcp);
return container_of(ps, struct per_cpu_pageset_ext, pageset);
}
static inline struct per_cpu_pageset_ext *pageset_to_pageset_ext(struct per_cpu_pageset *ps)
{
return container_of(ps, struct per_cpu_pageset_ext, pageset);
}
/* No special request */
#define FPI_NONE ((__force fpi_t)0)
@@ -3123,15 +3135,16 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
unsigned long flags;
struct per_cpu_pageset_ext *ps_ext = pcp_to_pageset_ext(pcp);
/*
* free_pcppages_bulk expects IRQs disabled for zone->lock
* so even though pcp->lock is not intended to be IRQ-safe,
* it's needed in this context.
*/
spin_lock_irqsave(&pcp->lock, flags);
spin_lock_irqsave(&ps_ext->lock, flags);
free_pcppages_bulk(zone, to_drain, pcp);
spin_unlock_irqrestore(&pcp->lock, flags);
spin_unlock_irqrestore(&ps_ext->lock, flags);
}
}
#endif
@@ -3142,18 +3155,20 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pageset *pset;
struct per_cpu_pageset_ext *ps_ext;
struct per_cpu_pages *pcp;
pset = per_cpu_ptr(zone->pageset, cpu);
ps_ext = pageset_to_pageset_ext(pset);
pcp = &pset->pcp;
if (pcp->count) {
unsigned long flags;
/* See drain_zone_pages on why this is disabling IRQs */
spin_lock_irqsave(&pcp->lock, flags);
spin_lock_irqsave(&ps_ext->lock, flags);
free_pcppages_bulk(zone, pcp->count, pcp);
spin_unlock_irqrestore(&pcp->lock, flags);
spin_unlock_irqrestore(&ps_ext->lock, flags);
}
}
@@ -3323,15 +3338,17 @@ static bool free_unref_page_commit(struct page *page, int migratetype,
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
struct per_cpu_pageset_ext *ps_ext;
unsigned long __maybe_unused UP_flags;
__count_vm_event(PGFREE);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
ps_ext = pcp_to_pageset_ext(pcp);
if (!locked) {
/* Protect against a parallel drain. */
pcp_trylock_prepare(UP_flags);
if (!spin_trylock(&pcp->lock)) {
if (!spin_trylock(&ps_ext->lock)) {
pcp_trylock_finish(UP_flags);
return false;
}
@@ -3345,7 +3362,7 @@ static bool free_unref_page_commit(struct page *page, int migratetype,
}
if (!locked) {
spin_unlock(&pcp->lock);
spin_unlock(&ps_ext->lock);
pcp_trylock_finish(UP_flags);
}
@@ -3397,6 +3414,7 @@ void free_unref_page(struct page *page)
void free_unref_page_list(struct list_head *list)
{
struct page *page, *next;
struct per_cpu_pageset_ext *ps_ext;
struct per_cpu_pages *pcp;
struct zone *locked_zone;
unsigned long flags;
@@ -3454,17 +3472,19 @@ void free_unref_page_list(struct list_head *list)
page = lru_to_page(list);
locked_zone = page_zone(page);
pcp = &this_cpu_ptr(locked_zone->pageset)->pcp;
spin_lock(&pcp->lock);
ps_ext = pcp_to_pageset_ext(pcp);
spin_lock(&ps_ext->lock);
list_for_each_entry_safe(page, next, list, lru) {
struct zone *zone = page_zone(page);
/* Different zone, different pcp lock. */
if (zone != locked_zone) {
spin_unlock(&pcp->lock);
spin_unlock(&ps_ext->lock);
locked_zone = zone;
pcp = &this_cpu_ptr(zone->pageset)->pcp;
spin_lock(&pcp->lock);
ps_ext = pcp_to_pageset_ext(pcp);
spin_lock(&ps_ext->lock);
}
migratetype = get_pcppage_migratetype(page);
@@ -3485,15 +3505,16 @@ void free_unref_page_list(struct list_head *list)
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
spin_unlock(&pcp->lock);
spin_unlock(&ps_ext->lock);
local_irq_restore(flags);
batch_count = 0;
local_irq_save(flags);
pcp = &this_cpu_ptr(locked_zone->pageset)->pcp;
spin_lock(&pcp->lock);
ps_ext = pcp_to_pageset_ext(pcp);
spin_lock(&ps_ext->lock);
}
}
spin_unlock(&pcp->lock);
spin_unlock(&ps_ext->lock);
local_irq_restore(flags);
}
@@ -3671,6 +3692,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
struct page *page = NULL;
unsigned long __maybe_unused UP_flags;
struct list_head *list = NULL;
struct per_cpu_pageset_ext *ps_ext = pcp_to_pageset_ext(pcp);
/*
* spin_trylock is not necessary right now due to due to
@@ -3682,7 +3704,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
*/
if (unlikely(!locked)) {
pcp_trylock_prepare(UP_flags);
if (!spin_trylock(&pcp->lock)) {
if (!spin_trylock(&ps_ext->lock)) {
pcp_trylock_finish(UP_flags);
return NULL;
}
@@ -3717,7 +3739,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
out:
if (!locked) {
spin_unlock(&pcp->lock);
spin_unlock(&ps_ext->lock);
pcp_trylock_finish(UP_flags);
}
@@ -6222,7 +6244,7 @@ static void build_zonelists(pg_data_t *pgdat)
* Other parts of the kernel may not check if the zone is available.
*/
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_pageset_ext, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
static void __build_all_zonelists(void *data)
@@ -6289,7 +6311,7 @@ build_all_zonelists_init(void)
* (a chicken-egg dilemma).
*/
for_each_possible_cpu(cpu)
setup_pageset(&per_cpu(boot_pageset, cpu), 0);
setup_pageset(&per_cpu(boot_pageset, cpu).pageset, 0);
mminit_verify_zonelist();
cpuset_init_current_mems_allowed();
@@ -6708,12 +6730,14 @@ static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
static void pageset_init(struct per_cpu_pageset *p)
{
struct per_cpu_pages *pcp;
struct per_cpu_pageset_ext *ps_ext;
int migratetype;
memset(p, 0, sizeof(*p));
ps_ext = pageset_to_pageset_ext(p);
pcp = &p->pcp;
spin_lock_init(&pcp->lock);
spin_lock_init(&ps_ext->lock);
for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
INIT_LIST_HEAD(&pcp->lists[migratetype]);
}
@@ -6760,7 +6784,10 @@ static void __meminit zone_pageset_init(struct zone *zone, int cpu)
void __meminit setup_zone_pageset(struct zone *zone)
{
int cpu;
zone->pageset = alloc_percpu(struct per_cpu_pageset);
struct per_cpu_pageset_ext *ps_ext;
ps_ext = alloc_percpu(struct per_cpu_pageset_ext);
zone->pageset = &ps_ext->pageset;
for_each_possible_cpu(cpu)
zone_pageset_init(zone, cpu);
}
@@ -6786,7 +6813,7 @@ void __init setup_per_cpu_pageset(void)
* the nodes these zones are associated with.
*/
for_each_possible_cpu(cpu) {
struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu).pageset;
memset(pcp->vm_numa_stat_diff, 0,
sizeof(pcp->vm_numa_stat_diff));
}
@@ -6804,7 +6831,7 @@ static __meminit void zone_pcp_init(struct zone *zone)
* relies on the ability of the linker to provide the
* offset of a (static) per cpu variable into the per cpu area.
*/
zone->pageset = &boot_pageset;
zone->pageset = &boot_pageset.pageset;
if (populated_zone(zone))
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
@@ -9140,13 +9167,13 @@ void zone_pcp_reset(struct zone *zone)
/* avoid races with drain_pages() */
local_irq_save(flags);
if (zone->pageset != &boot_pageset) {
if (zone->pageset != &boot_pageset.pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
drain_zonestat(zone, pset);
}
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
zone->pageset = &boot_pageset.pageset;
}
local_irq_restore(flags);
}