Skip to content
This repository has been archived by the owner on Jan 12, 2024. It is now read-only.

Commit

Permalink
mempolicy: move rebind functions
Browse files Browse the repository at this point in the history
Move the mpol_rebind_{policy,task,mm}() functions after mpol_new() to avoid
having to declare function prototypes.

Cc: Paul Jackson <pj@sgi.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
rientjes authored and torvalds committed Apr 28, 2008
1 parent 65d66fc commit 1d0d268
Showing 1 changed file with 91 additions and 94 deletions.
185 changes: 91 additions & 94 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,6 @@ struct mempolicy default_policy = {
.policy = MPOL_DEFAULT,
};

static void mpol_rebind_policy(struct mempolicy *pol,
const nodemask_t *newmask);

/* Check that the nodemask contains at least one populated zone */
static int is_valid_nodemask(nodemask_t *nodemask)
{
Expand Down Expand Up @@ -203,6 +200,97 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
return ERR_PTR(-EINVAL);
}

/* Migrate a policy to a different set of nodes */
static void mpol_rebind_policy(struct mempolicy *pol,
const nodemask_t *newmask)
{
nodemask_t tmp;
int static_nodes;
int relative_nodes;

if (!pol)
return;
static_nodes = pol->flags & MPOL_F_STATIC_NODES;
relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES;
if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;

switch (pol->policy) {
case MPOL_DEFAULT:
break;
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
if (static_nodes)
nodes_and(tmp, pol->w.user_nodemask, *newmask);
else if (relative_nodes)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
newmask);
else {
nodes_remap(tmp, pol->v.nodes,
pol->w.cpuset_mems_allowed, *newmask);
pol->w.cpuset_mems_allowed = *newmask;
}
pol->v.nodes = tmp;
if (!node_isset(current->il_next, tmp)) {
current->il_next = next_node(current->il_next, tmp);
if (current->il_next >= MAX_NUMNODES)
current->il_next = first_node(tmp);
if (current->il_next >= MAX_NUMNODES)
current->il_next = numa_node_id();
}
break;
case MPOL_PREFERRED:
if (static_nodes) {
int node = first_node(pol->w.user_nodemask);

if (node_isset(node, *newmask))
pol->v.preferred_node = node;
else
pol->v.preferred_node = -1;
} else if (relative_nodes) {
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
newmask);
pol->v.preferred_node = first_node(tmp);
} else {
pol->v.preferred_node = node_remap(pol->v.preferred_node,
pol->w.cpuset_mems_allowed, *newmask);
pol->w.cpuset_mems_allowed = *newmask;
}
break;
default:
BUG();
break;
}
}

/*
* Wrapper for mpol_rebind_policy() that just requires task
* pointer, and updates task mempolicy.
*/

void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
mpol_rebind_policy(tsk->mempolicy, new);
}

/*
* Rebind each vma in mm to new nodemask.
*
* Call holding a reference to mm. Takes mm->mmap_sem during call.
*/

void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;

down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
mpol_rebind_policy(vma->vm_policy, new);
up_write(&mm->mmap_sem);
}

static void gather_stats(struct page *, void *, int pte_dirty);
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
Expand Down Expand Up @@ -1757,97 +1845,6 @@ void numa_default_policy(void)
do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
}

/* Migrate a policy to a different set of nodes */
static void mpol_rebind_policy(struct mempolicy *pol,
const nodemask_t *newmask)
{
nodemask_t tmp;
int static_nodes;
int relative_nodes;

if (!pol)
return;
static_nodes = pol->flags & MPOL_F_STATIC_NODES;
relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES;
if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;

switch (pol->policy) {
case MPOL_DEFAULT:
break;
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
if (static_nodes)
nodes_and(tmp, pol->w.user_nodemask, *newmask);
else if (relative_nodes)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
newmask);
else {
nodes_remap(tmp, pol->v.nodes,
pol->w.cpuset_mems_allowed, *newmask);
pol->w.cpuset_mems_allowed = *newmask;
}
pol->v.nodes = tmp;
if (!node_isset(current->il_next, tmp)) {
current->il_next = next_node(current->il_next, tmp);
if (current->il_next >= MAX_NUMNODES)
current->il_next = first_node(tmp);
if (current->il_next >= MAX_NUMNODES)
current->il_next = numa_node_id();
}
break;
case MPOL_PREFERRED:
if (static_nodes) {
int node = first_node(pol->w.user_nodemask);

if (node_isset(node, *newmask))
pol->v.preferred_node = node;
else
pol->v.preferred_node = -1;
} else if (relative_nodes) {
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
newmask);
pol->v.preferred_node = first_node(tmp);
} else {
pol->v.preferred_node = node_remap(pol->v.preferred_node,
pol->w.cpuset_mems_allowed, *newmask);
pol->w.cpuset_mems_allowed = *newmask;
}
break;
default:
BUG();
break;
}
}

/*
* Wrapper for mpol_rebind_policy() that just requires task
* pointer, and updates task mempolicy.
*/

void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
mpol_rebind_policy(tsk->mempolicy, new);
}

/*
* Rebind each vma in mm to new nodemask.
*
* Call holding a reference to mm. Takes mm->mmap_sem during call.
*/

void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;

down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
mpol_rebind_policy(vma->vm_policy, new);
up_write(&mm->mmap_sem);
}

/*
* Display pages allocated per node and memory policy via /proc.
*/
Expand Down

0 comments on commit 1d0d268

Please sign in to comment.