Skip to content

Commit

Permalink
mm/page_alloc: check high-order pages for corruption during PCP opera…
Browse files Browse the repository at this point in the history
…tions

Eric Dumazet pointed out that commit 44042b4 ("mm/page_alloc: allow
high-order pages to be stored on the per-cpu lists") only checks the
head page during PCP refill and allocation operations.  This was an
oversight and all pages should be checked.  This will incur a small
performance penalty but it's necessary for correctness.

Link: https://lkml.kernel.org/r/20220310092456.GJ15701@techsingularity.net
Fixes: 44042b4 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists")
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Reported-by: Eric Dumazet <edumazet@google.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
gormanm authored and torvalds committed Mar 22, 2022
1 parent 3313204 commit 77fe7f1
Showing 1 changed file with 23 additions and 23 deletions.
46 changes: 23 additions & 23 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2291,56 +2291,56 @@ static inline int check_new_page(struct page *page)
return 1;
}

static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;

if (unlikely(check_new_page(p)))
return true;
}

return false;
}

#ifdef CONFIG_DEBUG_VM
/*
* With DEBUG_VM enabled, order-0 pages are checked for expected state when
* being allocated from pcp lists. With debug_pagealloc also enabled, they are
* also checked when pcp lists are refilled from the free lists.
*/
static inline bool check_pcp_refill(struct page *page)
static inline bool check_pcp_refill(struct page *page, unsigned int order)
{
if (debug_pagealloc_enabled_static())
return check_new_page(page);
return check_new_pages(page, order);
else
return false;
}

static inline bool check_new_pcp(struct page *page)
static inline bool check_new_pcp(struct page *page, unsigned int order)
{
return check_new_page(page);
return check_new_pages(page, order);
}
#else
/*
* With DEBUG_VM disabled, free order-0 pages are checked for expected state
* when pcp lists are being refilled from the free lists. With debug_pagealloc
* enabled, they are also checked when being allocated from the pcp lists.
*/
static inline bool check_pcp_refill(struct page *page)
static inline bool check_pcp_refill(struct page *page, unsigned int order)
{
return check_new_page(page);
return check_new_pages(page, order);
}
static inline bool check_new_pcp(struct page *page)
static inline bool check_new_pcp(struct page *page, unsigned int order)
{
if (debug_pagealloc_enabled_static())
return check_new_page(page);
return check_new_pages(page, order);
else
return false;
}
#endif /* CONFIG_DEBUG_VM */

static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;

if (unlikely(check_new_page(p)))
return true;
}

return false;
}

inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
Expand Down Expand Up @@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (unlikely(page == NULL))
break;

if (unlikely(check_pcp_refill(page)))
if (unlikely(check_pcp_refill(page, order)))
continue;

/*
Expand Down Expand Up @@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
page = list_first_entry(list, struct page, lru);
list_del(&page->lru);
pcp->count -= 1 << order;
} while (check_new_pcp(page));
} while (check_new_pcp(page, order));

return page;
}
Expand Down

0 comments on commit 77fe7f1

Please sign in to comment.