Skip to content

Commit

Permalink
memcg: fix page_cgroup fatal error in FLATMEM
Browse files Browse the repository at this point in the history
Now, SLAB is configured in very early stage and it can be used in
init routine now.

But replacing alloc_bootmem() in FLAT/DISCONTIGMEM's page_cgroup()
initialization breaks the allocation, now.
(Works well in SPARSEMEM case...it supports MEMORY_HOTPLUG and
 size of page_cgroup is in reasonable size (< 1 << MAX_ORDER.)

This patch revive FLATMEM+memory cgroup by using alloc_bootmem.

In future,
We stop to support FLATMEM (if no users) or rewrite codes for flatmem
completely.But this will adds more messy codes and overheads.

Reported-by: Li Zefan <lizf@cn.fujitsu.com>
Tested-by: Li Zefan <lizf@cn.fujitsu.com>
Tested-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
  • Loading branch information
hkamezawa authored and Pekka Enberg committed Jun 12, 2009
1 parent 8ebf975 commit ca371c0
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 20 deletions.
18 changes: 17 additions & 1 deletion include/linux/page_cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,19 @@ struct page_cgroup {
};

void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
void __init page_cgroup_init(void);

#ifdef CONFIG_SPARSEMEM
static inline void __init page_cgroup_init_flatmem(void)
{
}
extern void __init page_cgroup_init(void);
#else
void __init page_cgroup_init_flatmem(void);
static inline void __init page_cgroup_init(void)
{
}
#endif

struct page_cgroup *lookup_page_cgroup(struct page *page);

enum {
Expand Down Expand Up @@ -87,6 +99,10 @@ static inline void page_cgroup_init(void)
{
}

static inline void __init page_cgroup_init_flatmem(void)
{
}

#endif

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Expand Down
5 changes: 5 additions & 0 deletions init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,6 +539,11 @@ void __init __weak thread_info_cache_init(void)
*/
static void __init mm_init(void)
{
/*
* page_cgroup requires countinous pages as memmap
* and it's bigger than MAX_ORDER unless SPARSEMEM.
*/
page_cgroup_init_flatmem();
mem_init();
kmem_cache_init();
vmalloc_init();
Expand Down
29 changes: 10 additions & 19 deletions mm/page_cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
struct page_cgroup *base, *pc;
unsigned long table_size;
unsigned long start_pfn, nr_pages, index;
struct page *page;
unsigned int order;

start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages;
Expand All @@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
return 0;

table_size = sizeof(struct page_cgroup) * nr_pages;
order = get_order(table_size);
page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
if (!page)
page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
if (!page)

base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!base)
return -ENOMEM;
base = page_address(page);
for (index = 0; index < nr_pages; index++) {
pc = base + index;
__init_page_cgroup(pc, start_pfn + index);
Expand All @@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
return 0;
}

void __init page_cgroup_init(void)
void __init page_cgroup_init_flatmem(void)
{

int nid, fail;
Expand Down Expand Up @@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
if (!section->page_cgroup) {
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
if (slab_is_available()) {
base = kmalloc_node(table_size,
GFP_KERNEL | __GFP_NOWARN, nid);
if (!base)
base = vmalloc_node(table_size, nid);
} else {
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
table_size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
VM_BUG_ON(!slab_is_available());
base = kmalloc_node(table_size,
GFP_KERNEL | __GFP_NOWARN, nid);
if (!base)
base = vmalloc_node(table_size, nid);
} else {
/*
* We don't have to allocate page_cgroup again, but
Expand Down

0 comments on commit ca371c0

Please sign in to comment.