Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
Browse files Browse the repository at this point in the history
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
  dm: detect lost queue
  dm: publish dm_vcalloc
  dm: publish dm_table_unplug_all
  dm: publish dm_get_mapinfo
  dm: export struct dm_dev
  dm crypt: avoid unnecessary wait when splitting bio
  dm crypt: tidy ctx pending
  dm crypt: fix async inc_pending
  dm crypt: move dec_pending on error into write_io_submit
  dm crypt: remove inc_pending from write_io_submit
  dm crypt: tidy write loop pending
  dm crypt: tidy crypt alloc
  dm crypt: tidy inc pending
  dm exception store: use chunk_t for_areas
  dm exception store: introduce area_location function
  dm raid1: kcopyd should stop on error if errors handled
  dm mpath: remove is_active from struct dm_path
  dm mpath: use more error codes

Fixed up trivial conflict in drivers/md/dm-mpath.c manually.
  • Loading branch information
torvalds committed Oct 10, 2008
2 parents 73f6aa4 + 0c2322e commit b0af205
Show file tree
Hide file tree
Showing 9 changed files with 190 additions and 118 deletions.
109 changes: 66 additions & 43 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
ctx->sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
atomic_set(&ctx->pending, 1);
}

static int crypt_convert_block(struct crypt_config *cc,
Expand Down Expand Up @@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
{
int r;

atomic_set(&ctx->pending, 1);

while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
ctx->idx_out < ctx->bio_out->bi_vcnt) {

Expand Down Expand Up @@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
* May return a smaller bio when running out of pages
* May return a smaller bio when running out of pages, indicated by
* *out_of_pages set to 1.
*/
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
unsigned *out_of_pages)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
Expand All @@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
return NULL;

clone_init(io, clone);
*out_of_pages = 0;

for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
if (!page)
if (!page) {
*out_of_pages = 1;
break;
}

/*
* if additional pages cannot be allocated without waiting,
Expand Down Expand Up @@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
}
}

static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
struct bio *bio, sector_t sector)
{
struct crypt_config *cc = ti->private;
struct dm_crypt_io *io;

io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
io->sector = sector;
io->error = 0;
atomic_set(&io->pending, 0);

return io;
}

static void crypt_inc_pending(struct dm_crypt_io *io)
{
atomic_inc(&io->pending);
}

/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
Expand Down Expand Up @@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
struct bio *base_bio = io->base_bio;
struct bio *clone;

atomic_inc(&io->pending);
crypt_inc_pending(io);

/*
* The block layer might modify the bvec array, so always
Expand Down Expand Up @@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
io->error = -EIO;
crypt_dec_pending(io);
return;
}

Expand All @@ -664,66 +692,67 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,

if (async)
kcryptd_queue_io(io);
else {
atomic_inc(&io->pending);
else
generic_make_request(clone);
}
}

static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
int crypt_finished;
unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_size;
int r;

/*
* Prevent io from disappearing until this function completes.
*/
crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);

/*
* The allocated buffers can be smaller than the whole bio,
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
clone = crypt_alloc_buffer(io, remaining);
clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
if (unlikely(!clone)) {
io->error = -ENOMEM;
return;
break;
}

io->ctx.bio_out = clone;
io->ctx.idx_out = 0;

remaining -= clone->bi_size;

crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx);
crypt_finished = atomic_dec_and_test(&io->ctx.pending);

if (atomic_dec_and_test(&io->ctx.pending)) {
/* processed, no running async crypto */
/* Encryption was already finished, submit io now */
if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, r, 0);
if (unlikely(r < 0))
return;
} else
atomic_inc(&io->pending);

/* out of memory -> run queues */
if (unlikely(remaining)) {
/* wait for async crypto then reinitialize pending */
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
atomic_set(&io->ctx.pending, 1);
congestion_wait(WRITE, HZ/100);
/*
* If there was an error, do not try next fragments.
* For async, error is processed in async handler.
*/
if (unlikely(r < 0))
break;
}
}
}

static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;

/*
* Prevent io from disappearing until this function completes.
*/
atomic_inc(&io->pending);
/*
* Out of memory -> run queues
* But don't wait if split was due to the io size restriction
*/
if (unlikely(out_of_pages))
congestion_wait(WRITE, HZ/100);

crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
kcryptd_crypt_write_convert_loop(io);
if (unlikely(remaining))
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
}

crypt_dec_pending(io);
}
Expand All @@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
struct crypt_config *cc = io->target->private;
int r = 0;

atomic_inc(&io->pending);
crypt_inc_pending(io);

crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
io->sector);
Expand Down Expand Up @@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct crypt_config *cc = ti->private;
struct dm_crypt_io *io;

io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
io->sector = bio->bi_sector - ti->begin;
io->error = 0;
atomic_set(&io->pending, 0);
io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);

if (bio_data_dir(io->base_bio) == READ)
kcryptd_queue_io(io);
Expand Down
29 changes: 19 additions & 10 deletions drivers/md/dm-exception-store.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ struct pstore {
* Used to keep track of which metadata area the data in
* 'chunk' refers to.
*/
uint32_t current_area;
chunk_t current_area;

/*
* The next free chunk for an exception.
*/
uint32_t next_free;
chunk_t next_free;

/*
* The index of next free exception in the current
Expand Down Expand Up @@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
{
struct dm_io_region where = {
.bdev = ps->snap->cow->bdev,
Expand Down Expand Up @@ -208,17 +208,24 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
return req.result;
}

/*
* Convert a metadata area index to a chunk index.
*/
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
return 1 + ((ps->exceptions_per_area + 1) * area);
}

/*
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
static int area_io(struct pstore *ps, uint32_t area, int rw)
static int area_io(struct pstore *ps, chunk_t area, int rw)
{
int r;
uint32_t chunk;
chunk_t chunk;

/* convert a metadata area index to a chunk index */
chunk = 1 + ((ps->exceptions_per_area + 1) * area);
chunk = area_location(ps, area);

r = chunk_io(ps, chunk, rw, 0);
if (r)
Expand All @@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
return 0;
}

static int zero_area(struct pstore *ps, uint32_t area)
static int zero_area(struct pstore *ps, chunk_t area)
{
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
return area_io(ps, area, WRITE);
Expand Down Expand Up @@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)

static int read_exceptions(struct pstore *ps)
{
uint32_t area;
chunk_t area;
int r, full = 1;

/*
Expand Down Expand Up @@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
{
struct pstore *ps = get_info(store);
uint32_t stride;
chunk_t next_free;
sector_t size = get_dev_size(store->snap->cow->bdev);

/* Is there enough room ? */
Expand All @@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
* into account the location of the metadata chunks.
*/
stride = (ps->exceptions_per_area + 1);
if ((++ps->next_free % stride) == 1)
next_free = ++ps->next_free;
if (sector_div(next_free, stride) == 1)
ps->next_free++;

atomic_inc(&ps->pending_count);
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
unsigned int count = 0;
struct list_head *tmp;
size_t len, needed;
struct dm_dev *dd;
struct dm_dev_internal *dd;
struct dm_target_deps *deps;

deps = get_result_buffer(param, param_size, &len);
Expand All @@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
deps->count = count;
count = 0;
list_for_each_entry (dd, dm_table_get_devices(table), list)
deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);

param->data_size = param->data_start + needed;
}
Expand Down
Loading

0 comments on commit b0af205

Please sign in to comment.