Skip to content

Commit

Permalink
f3probe: speed up writing and reading
Browse files Browse the repository at this point in the history
This patch takes advantage of the new interface of struct device
to write and read sequential blocks with a single call.
  • Loading branch information
AltraMayor committed Nov 13, 2015
1 parent 067802c commit fe4e9dd
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 53 deletions.
8 changes: 1 addition & 7 deletions f3brew.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,11 +214,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state)

static struct argp argp = {options, parse_opt, adoc, doc, NULL, NULL, NULL};

/* It must be a power of 2 greater than, or equal to 2^20.
* The current vaule is 1MB.
*/
#define BIG_BLOCK_SIZE_BYTE (1 << 20)

static void write_blocks(struct device *dev,
uint64_t first_block, uint64_t last_block)
{
Expand Down Expand Up @@ -381,9 +376,8 @@ static void read_blocks(struct device *dev,
assert(BIG_BLOCK_SIZE_BYTE >= block_size);

while (first_pos <= last_block) {
uint64_t next_pos = first_pos + step;
char *probe_blk = buffer;
uint64_t pos;
uint64_t pos, next_pos = first_pos + step;

if (next_pos > last_block)
next_pos = last_block;
Expand Down
19 changes: 8 additions & 11 deletions libdevs.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,22 +56,19 @@ const char *dev_get_filename(struct device *dev);
* Methods
*/

/* One should use the following constant as the size of the buffer needed to
* batch writes or reads.
*
* It must be a power of 2 greater than, or equal to 2^20.
* The current vaule is 1MB.
*/
#define BIG_BLOCK_SIZE_BYTE (1 << 20)

int dev_read_blocks(struct device *dev, char *buf,
uint64_t first_pos, uint64_t last_pos);
int dev_write_blocks(struct device *dev, const char *buf,
uint64_t first_pos, uint64_t last_pos);

static inline int dev_read_block(struct device *dev, char *buf, uint64_t block)
{
return dev_read_blocks(dev, buf, block, block);
}

static inline int dev_write_block(struct device *dev, const char *buf,
uint64_t block)
{
return dev_write_blocks(dev, buf, block, block);
}

int dev_reset(struct device *dev);
void free_device(struct device *dev);

Expand Down
107 changes: 72 additions & 35 deletions libprobe.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,29 @@ static int write_blocks(struct device *dev,
{
const int block_order = dev_get_block_order(dev);
const int block_size = dev_get_block_size(dev);
uint64_t offset = first_pos << block_order;
/* Aligning these pointers is necessary to directly read and write
* the block device.
* For the file device, this is superfluous.
*/
char stack[align_head(block_order) + (1 << block_order)];
char *stamp_blk = align_mem(stack, block_order);
uint64_t pos;
char stack[align_head(block_order) + BIG_BLOCK_SIZE_BYTE];
char *buffer = align_mem(stack, block_order);
char *stamp_blk = buffer;
char *flush_blk = buffer + BIG_BLOCK_SIZE_BYTE;
uint64_t offset = first_pos << block_order;
uint64_t pos, write_pos = first_pos;

for (pos = first_pos; pos <= last_pos; pos++) {
fill_buffer_with_block(stamp_blk, block_order, offset, salt);
if (dev_write_block(dev, stamp_blk, pos) &&
dev_write_block(dev, stamp_blk, pos))
return true;
stamp_blk += block_size;
offset += block_size;

if (stamp_blk == flush_blk || pos == last_pos) {
if (dev_write_blocks(dev, buffer, write_pos, pos) &&
dev_write_blocks(dev, buffer, write_pos, pos))
return true;
stamp_blk = buffer;
write_pos = pos + 1;
}
}

return false;
Expand Down Expand Up @@ -180,13 +188,14 @@ static int write_bisect_blocks(struct device *dev,
static int is_block_good(struct device *dev, uint64_t pos, int *pis_good,
uint64_t salt)
{
const int block_size = dev_get_block_size(dev);
const int block_order = dev_get_block_order(dev);
char stack[align_head(block_order) + (1 << block_order)];
char stack[align_head(block_order) + block_size];
char *probe_blk = align_mem(stack, block_order);
uint64_t found_offset;

if (dev_read_block(dev, probe_blk, pos) &&
dev_read_block(dev, probe_blk, pos))
if (dev_read_blocks(dev, probe_blk, pos, pos) &&
dev_read_blocks(dev, probe_blk, pos, pos))
return true;

*pis_good = !validate_buffer_with_block(probe_blk, block_order,
Expand Down Expand Up @@ -264,14 +273,38 @@ static int bisect(struct device *dev, struct bisect_stats *pstats,
static int count_good_blocks(struct device *dev, uint64_t *pcount,
uint64_t first_pos, uint64_t last_pos, uint64_t salt)
{
uint64_t pos, count = 0;

for (pos = first_pos; pos <= last_pos; pos++) {
int is_good;
if (is_block_good(dev, pos, &is_good, salt))
const int block_size = dev_get_block_size(dev);
const int block_order = dev_get_block_order(dev);
char stack[align_head(block_order) + BIG_BLOCK_SIZE_BYTE];
char *buffer = align_mem(stack, block_order);
uint64_t expected_sector_offset = first_pos << block_order;
uint64_t start_pos = first_pos;
uint64_t step = (BIG_BLOCK_SIZE_BYTE >> block_order) - 1;
uint64_t count = 0;

assert(BIG_BLOCK_SIZE_BYTE >= block_size);

while (start_pos <= last_pos) {
char *probe_blk = buffer;
uint64_t pos, next_pos = start_pos + step;

if (next_pos > last_pos)
next_pos = last_pos;
if (dev_read_blocks(dev, buffer, start_pos, next_pos) &&
dev_read_blocks(dev, buffer, start_pos, next_pos))
return true;
if (is_good)
count++;

for (pos = start_pos; pos <= next_pos; pos++) {
uint64_t found_sector_offset;
if (!validate_buffer_with_block(probe_blk, block_order,
&found_sector_offset, salt) &&
expected_sector_offset == found_sector_offset)
count++;
expected_sector_offset += block_size;
probe_blk += block_size;
}

start_pos = next_pos + 1;
}

*pcount = count;
Expand Down Expand Up @@ -408,28 +441,32 @@ static int find_a_bad_block(struct device *dev,
n = gap;
for (i = 0; i < n; i++)
samples[i] = left_pos + 1 + i;

/* Write @samples. */
if (write_blocks(dev, left_pos + 1, *pright_pos - 1, salt))
return true;
} else {
n = N_BLOCK_SAMPLES;
for (i = 0; i < n; i++)
samples[i] = uint64_rand_range(left_pos + 1,
*pright_pos - 1);
}

/* Sort entries of @samples to minimize reads.
* As soon as one finds a bad block, one can stop and ignore
* the remaining blocks because the found bad block is
* the leftmost bad block.
*/
qsort(samples, n, sizeof(uint64_t), uint64_cmp);

/* Write @samples. */
prv_sample = left_pos;
for (i = 0; i < n; i++) {
if (samples[i] == prv_sample)
continue;
prv_sample = samples[i];
if (write_blocks(dev, prv_sample, prv_sample, salt))
return true;
/* Sort entries of @samples to minimize reads.
* As soon as one finds a bad block, one can stop and ignore
* the remaining blocks because the found bad block is
* the leftmost bad block.
*/
qsort(samples, n, sizeof(uint64_t), uint64_cmp);

/* Write @samples. */
prv_sample = left_pos;
for (i = 0; i < n; i++) {
if (samples[i] == prv_sample)
continue;
prv_sample = samples[i];
if (write_blocks(dev, prv_sample, prv_sample, salt))
return true;
}
}

/* Reset. */
Expand Down Expand Up @@ -611,8 +648,8 @@ static int find_wrap(struct device *dev,
char *probe_blk = align_mem(stack, block_order);
uint64_t found_offset;

if (dev_read_block(dev, probe_blk, pos) &&
dev_read_block(dev, probe_blk, pos))
if (dev_read_blocks(dev, probe_blk, pos, pos) &&
dev_read_blocks(dev, probe_blk, pos, pos))
return true;

if (!validate_buffer_with_block(probe_blk, block_order,
Expand Down

0 comments on commit fe4e9dd

Please sign in to comment.