mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-25 20:10:23 +09:00
BACKPORT: mm: add __clear_page_lru_flags() to replace page_off_lru()
Similar to page_off_lru(), the new function does non-atomic clearing
of PageLRU() in addition to PageActive() and PageUnevictable(), on a
page that has no references left.
If PageActive() and PageUnevictable() are both set, refuse to clear
either and leave them to bad_page(). This is a behavior change that
is meant to help debug.
Link: https://lore.kernel.org/linux-mm/20201207220949.830352-7-yuzhao@google.com/
Link: https://lkml.kernel.org/r/20210122220600.906146-7-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 8756017962)
Bug: 227651406
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Change-Id: I0290916fa08277c50e228a8d3f39af67d62ff9d0
This commit is contained in:
@@ -62,27 +62,19 @@ static inline enum lru_list page_lru_base_type(struct page *page)
|
||||
}
|
||||
|
||||
/**
|
||||
* page_off_lru - which LRU list was page on? clearing its lru flags.
|
||||
* @page: the page to test
|
||||
*
|
||||
* Returns the LRU list a page was on, as an index into the array of LRU
|
||||
* lists; and clears its Unevictable or Active flags, ready for freeing.
|
||||
* __clear_page_lru_flags - clear page lru flags before releasing a page
|
||||
* @page: the page that was on lru and now has a zero reference
|
||||
*/
|
||||
static __always_inline enum lru_list page_off_lru(struct page *page)
|
||||
static __always_inline void __clear_page_lru_flags(struct page *page)
|
||||
{
|
||||
enum lru_list lru;
|
||||
__ClearPageLRU(page);
|
||||
|
||||
if (PageUnevictable(page)) {
|
||||
__ClearPageUnevictable(page);
|
||||
lru = LRU_UNEVICTABLE;
|
||||
} else {
|
||||
lru = page_lru_base_type(page);
|
||||
if (PageActive(page)) {
|
||||
__ClearPageActive(page);
|
||||
lru += LRU_ACTIVE;
|
||||
}
|
||||
}
|
||||
return lru;
|
||||
/* this shouldn't happen, so leave the flags to bad_page() */
|
||||
if (PageActive(page) && PageUnevictable(page))
|
||||
return;
|
||||
|
||||
__ClearPageActive(page);
|
||||
__ClearPageUnevictable(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -88,9 +88,8 @@ static void __page_cache_release(struct page *page)
|
||||
spin_lock_irqsave(&pgdat->lru_lock, flags);
|
||||
lruvec = mem_cgroup_page_lruvec(page, pgdat);
|
||||
VM_BUG_ON_PAGE(!PageLRU(page), page);
|
||||
__ClearPageLRU(page);
|
||||
del_page_from_lru_list(page, lruvec);
|
||||
page_off_lru(page);
|
||||
__clear_page_lru_flags(page);
|
||||
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
|
||||
}
|
||||
__ClearPageWaiters(page);
|
||||
@@ -1042,9 +1041,8 @@ void release_pages(struct page **pages, int nr)
|
||||
|
||||
lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
|
||||
VM_BUG_ON_PAGE(!PageLRU(page), page);
|
||||
__ClearPageLRU(page);
|
||||
del_page_from_lru_list(page, lruvec);
|
||||
page_off_lru(page);
|
||||
__clear_page_lru_flags(page);
|
||||
}
|
||||
|
||||
__ClearPageWaiters(page);
|
||||
|
||||
@@ -1928,9 +1928,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
|
||||
add_page_to_lru_list(page, lruvec);
|
||||
|
||||
if (put_page_testzero(page)) {
|
||||
__ClearPageLRU(page);
|
||||
del_page_from_lru_list(page, lruvec);
|
||||
__ClearPageActive(page);
|
||||
__clear_page_lru_flags(page);
|
||||
|
||||
if (unlikely(PageCompound(page))) {
|
||||
spin_unlock_irq(&pgdat->lru_lock);
|
||||
|
||||
Reference in New Issue
Block a user