Skip to content

Commit

Permalink
Merge "Break out create_contiguous_mspace_with_base This routine allo…
Browse files Browse the repository at this point in the history
…ws creating a contiguous mspace from raw mapped memory. In turn, this will enable preallocation of the 3 heap spaces, which will help remembered sets and zygote/app checks given pointer values."
  • Loading branch information
Barry Hayes authored and Android (Google) Code Review committed Feb 9, 2010
2 parents 92b2566 + b17772d commit 2dcf1fb
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 40 deletions.
6 changes: 6 additions & 0 deletions include/cutils/mspace.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ mspace create_contiguous_mspace(size_t starting_capacity, size_t max_capacity,
mspace create_contiguous_mspace_with_name(size_t starting_capacity,
size_t max_capacity, int locked, const char *name);

/*
Identical to create_contiguous_mspace, but uses previously mapped memory.
*/
mspace create_contiguous_mspace_with_base(size_t starting_capacity,
size_t max_capacity, int locked, void *base);

size_t destroy_contiguous_mspace(mspace msp);
#endif

Expand Down
107 changes: 67 additions & 40 deletions libcutils/mspace.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,77 +134,58 @@ assert(nb >= 0); //xxx deal with the trim case
return oldbrk;
}

mspace create_contiguous_mspace_with_name(size_t starting_capacity,
size_t max_capacity, int locked, char const * name) {
int fd, ret;
mspace create_contiguous_mspace_with_base(size_t starting_capacity,
size_t max_capacity, int locked, void *base) {
struct mspace_contig_state *cs;
char buf[ASHMEM_NAME_LEN] = "mspace";
void *base;
unsigned int pagesize;
mstate m;

if (starting_capacity > max_capacity)
return (mspace)0;

init_mparams();
pagesize = PAGESIZE;

/* Create the anonymous memory that will back the mspace.
* This reserves all of the virtual address space we could
* ever need. Physical pages will be mapped as the memory
* is touched.
*
* Align max_capacity to a whole page.
*/
max_capacity = (size_t)ALIGN_UP(max_capacity, pagesize);

if (name)
snprintf(buf, sizeof(buf), "mspace/%s", name);
fd = ashmem_create_region(buf, max_capacity);
if (fd < 0)
return (mspace)0;

base = mmap(NULL, max_capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd);
if (base == MAP_FAILED)
return (mspace)0;

/* Make sure that base is at the beginning of a page.
*/
assert(starting_capacity <= max_capacity);
assert(((uintptr_t)base & (pagesize-1)) == 0);
assert(((uintptr_t)max_capacity & (pagesize-1)) == 0);
starting_capacity = (size_t)ALIGN_UP(starting_capacity, pagesize);

/* Reserve some space for the information that our MORECORE needs.
/* Make the first page read/write. dlmalloc needs to use that page.
*/
cs = base;
if (mprotect(base, starting_capacity, PROT_READ | PROT_WRITE) < 0) {
goto error;
}

/* Create the mspace, pointing to the memory we just reserved.
/* Create the mspace, pointing to the memory given.
*/
m = create_mspace_with_base((char *)base + sizeof(*cs), starting_capacity,
locked);
if (m == (mspace)0)
if (m == (mspace)0) {
goto error;

/* Make sure that m is in the same page as cs.
}
/* Make sure that m is in the same page as base.
*/
assert(((uintptr_t)m & (uintptr_t)~(pagesize-1)) == (uintptr_t)base);
/* Use some space for the information that our MORECORE needs.
*/
cs = (struct mspace_contig_state *)base;

/* Find out exactly how much of the memory the mspace
* is using.
*/
cs->brk = m->seg.base + m->seg.size;
cs->top = (char *)base + max_capacity;

assert((char *)base <= cs->brk);
assert(cs->brk <= cs->top);

/* Prevent access to the memory we haven't handed out yet.
*/
if (cs->brk != cs->top) {
/* mprotect() requires page-aligned arguments, but it's possible
* for cs->brk not to be page-aligned at this point.
*/
char *prot_brk = (char *)ALIGN_UP(cs->brk, pagesize);
if (mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)
if ((mprotect(base, prot_brk - (char *)base, PROT_READ | PROT_WRITE) < 0) ||
(mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)) {
goto error;
}
}

cs->m = m;
Expand All @@ -213,10 +194,56 @@ mspace create_contiguous_mspace_with_name(size_t starting_capacity,
return (mspace)m;

error:
munmap(base, max_capacity);
return (mspace)0;
}


mspace create_contiguous_mspace_with_name(size_t starting_capacity,
size_t max_capacity, int locked, char const *name) {
int fd, ret;
char buf[ASHMEM_NAME_LEN] = "mspace";
void *base;
unsigned int pagesize;
mstate m;

if (starting_capacity > max_capacity)
return (mspace)0;

init_mparams();
pagesize = PAGESIZE;

/* Create the anonymous memory that will back the mspace.
* This reserves all of the virtual address space we could
* ever need. Physical pages will be mapped as the memory
* is touched.
*
* Align max_capacity to a whole page.
*/
max_capacity = (size_t)ALIGN_UP(max_capacity, pagesize);

if (name)
snprintf(buf, sizeof(buf), "mspace/%s", name);
fd = ashmem_create_region(buf, max_capacity);
if (fd < 0)
return (mspace)0;

base = mmap(NULL, max_capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd);
if (base == MAP_FAILED)
return (mspace)0;

/* Make sure that base is at the beginning of a page.
*/
assert(((uintptr_t)base & (pagesize-1)) == 0);

m = create_contiguous_mspace_with_base(starting_capacity, max_capacity,
locked, base);
if (m == 0) {
munmap(base, max_capacity);
}
return m;
}

mspace create_contiguous_mspace(size_t starting_capacity,
size_t max_capacity, int locked) {
return create_contiguous_mspace_with_name(starting_capacity,
Expand Down

0 comments on commit 2dcf1fb

Please sign in to comment.