Skip to content

Commit

Permalink
page allocator: use a pre-calculated value instead of num_online_node…
Browse files Browse the repository at this point in the history
…s() in fast paths

num_online_nodes() is called in a number of places but most often by the
page allocator when deciding whether the zonelist needs to be filtered
based on cpusets or the zonelist cache.  This is actually a heavy function
and touches a number of cache lines.

This patch stores the number of online nodes at boot time and updates the
value when nodes get onlined and offlined.  The value is then used in a
number of important paths in place of num_online_nodes().

[rientjes@google.com: do not override definition of node_set_online() with macro]
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Christoph Lameter authored and torvalds committed Jun 17, 2009
1 parent 974709b commit 62bc62a
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 11 deletions.
19 changes: 16 additions & 3 deletions include/linux/nodemask.h
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,19 @@ static inline int num_node_state(enum node_states state)
#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])

extern int nr_node_ids;
extern int nr_online_nodes;

static inline void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}

static inline void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
#else

static inline int node_state(int node, enum node_states state)
Expand All @@ -434,7 +447,10 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define next_online_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1
#define nr_online_nodes 1

#define node_set_online(node) node_set_state((node), N_ONLINE)
#define node_set_offline(node) node_clear_state((node), N_ONLINE)
#endif

#define node_online_map node_states[N_ONLINE]
Expand All @@ -454,9 +470,6 @@ static inline int num_node_state(enum node_states state)
#define node_online(node) node_state((node), N_ONLINE)
#define node_possible(node) node_state((node), N_POSSIBLE)

#define node_set_online(node) node_set_state((node), N_ONLINE)
#define node_set_offline(node) node_clear_state((node), N_ONLINE)

#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)

Expand Down
4 changes: 2 additions & 2 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -875,7 +875,7 @@ static void return_unused_surplus_pages(struct hstate *h,
* can no longer free unreserved surplus pages. This occurs when
* the nodes with surplus pages have no free pages.
*/
unsigned long remaining_iterations = num_online_nodes();
unsigned long remaining_iterations = nr_online_nodes;

/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
Expand Down Expand Up @@ -904,7 +904,7 @@ static void return_unused_surplus_pages(struct hstate *h,
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
nr_pages--;
remaining_iterations = num_online_nodes();
remaining_iterations = nr_online_nodes;
}
}
}
Expand Down
10 changes: 6 additions & 4 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,9 @@ static unsigned long __meminitdata dma_reserve;

#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
int nr_online_nodes __read_mostly = 1;
EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif

int page_group_by_mobility_disabled __read_mostly;
Expand Down Expand Up @@ -1466,7 +1468,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
if (NUMA_BUILD)
zlc_mark_zone_full(zonelist, z);
try_next_zone:
if (NUMA_BUILD && !did_zlc_setup && num_online_nodes() > 1) {
if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
/*
* we do zlc_setup after the first zone is tried but only
* if there are multiple nodes make it worthwhile
Expand Down Expand Up @@ -2265,7 +2267,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
}


#define MAX_NODE_LOAD (num_online_nodes())
#define MAX_NODE_LOAD (nr_online_nodes)
static int node_load[MAX_NUMNODES];

/**
Expand Down Expand Up @@ -2474,7 +2476,7 @@ static void build_zonelists(pg_data_t *pgdat)

/* NUMA-aware ordering of nodes */
local_node = pgdat->node_id;
load = num_online_nodes();
load = nr_online_nodes;
prev_node = local_node;
nodes_clear(used_mask);

Expand Down Expand Up @@ -2625,7 +2627,7 @@ void build_all_zonelists(void)

printk("Built %i zonelists in %s order, mobility grouping %s. "
"Total pages: %ld\n",
num_online_nodes(),
nr_online_nodes,
zonelist_order_name[current_zonelist_order],
page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);
Expand Down
2 changes: 1 addition & 1 deletion mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -3737,7 +3737,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
to_cpumask(l->cpus));
}

if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Expand Down
2 changes: 1 addition & 1 deletion net/sunrpc/svc.c
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ svc_pool_map_choose_mode(void)
{
unsigned int node;

if (num_online_nodes() > 1) {
if (nr_online_nodes > 1) {
/*
* Actually have multiple NUMA nodes,
* so split pools on NUMA node boundaries
Expand Down

0 comments on commit 62bc62a

Please sign in to comment.