Skip to content

Commit

Permalink
Add support for bs_is_seq_rand
Browse files Browse the repository at this point in the history
This option will switch the meaning of the read,write part of any
blocksize setting to mean sequential,random instead.

Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Jul 25, 2013
1 parent eefd98b commit 6aca9b3
Show file tree
Hide file tree
Showing 7 changed files with 65 additions and 20 deletions.
5 changes: 5 additions & 0 deletions HOWTO
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,11 @@ bs_unaligned If this option is given, any byte size value within bsrange
may be used as a block range. This typically wont work with
direct IO, as that normally requires sector alignment.

bs_is_seq_rand If this option is set, fio will use the normal read,write
blocksize settings as sequential,random instead. Any random
read or write will use the WRITE blocksize settings, and any
sequential read or write will use the READ blocksize setting.

zero_buffers If this option is given, fio will init the IO buffers to
all zeroes. The default is to fill them with random data.

Expand Down
2 changes: 2 additions & 0 deletions cconv.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ void convert_thread_options_to_cpu(struct thread_options *o,
o->softrandommap = le32_to_cpu(top->softrandommap);
o->bs_unaligned = le32_to_cpu(top->bs_unaligned);
o->fsync_on_close = le32_to_cpu(top->fsync_on_close);
o->bs_is_seq_rand = le32_to_cpu(top->bs_is_seq_rand);
o->random_distribution = le32_to_cpu(top->random_distribution);
o->zipf_theta.u.f = fio_uint64_to_double(le64_to_cpu(top->zipf_theta.u.i));
o->pareto_h.u.f = fio_uint64_to_double(le64_to_cpu(top->pareto_h.u.i));
Expand Down Expand Up @@ -281,6 +282,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
top->softrandommap = cpu_to_le32(o->softrandommap);
top->bs_unaligned = cpu_to_le32(o->bs_unaligned);
top->fsync_on_close = cpu_to_le32(o->fsync_on_close);
top->bs_is_seq_rand = cpu_to_le32(o->bs_is_seq_rand);
top->random_distribution = cpu_to_le32(o->random_distribution);
top->zipf_theta.u.i = __cpu_to_le64(fio_double_to_uint64(o->zipf_theta.u.f));
top->pareto_h.u.i = __cpu_to_le64(fio_double_to_uint64(o->pareto_h.u.f));
Expand Down
6 changes: 6 additions & 0 deletions fio.1
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,12 @@ for using direct IO, though it usually depends on the hardware block size.
This option is mutually exclusive with using a random map for files, so it
will turn off that option.
.TP
.BI bs_is_seq_rand \fR=\fPbool
If this option is set, fio will use the normal read,write blocksize settings as
sequential,random instead. Any random read or write will use the WRITE
blocksize settings, and any sequential read or write will use the READ
blocksize setting.
.TP
.B zero_buffers
Initialise buffers with all zeros. Default: fill buffers with random data.
.TP
Expand Down
55 changes: 37 additions & 18 deletions io_u.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,8 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
}

static int get_next_block(struct thread_data *td, struct io_u *io_u,
enum fio_ddir ddir, int rw_seq)
enum fio_ddir ddir, int rw_seq,
unsigned int *is_random)
{
struct fio_file *f = io_u->file;
uint64_t b, offset;
Expand All @@ -305,23 +306,30 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u,

if (rw_seq) {
if (td_random(td)) {
if (should_do_random(td, ddir))
if (should_do_random(td, ddir)) {
ret = get_next_rand_block(td, f, ddir, &b);
else {
*is_random = 1;
} else {
*is_random = 0;
io_u->flags |= IO_U_F_BUSY_OK;
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
ret = get_next_rand_block(td, f, ddir, &b);
}
} else
} else {
*is_random = 0;
ret = get_next_seq_offset(td, f, ddir, &offset);
}
} else {
io_u->flags |= IO_U_F_BUSY_OK;
*is_random = 0;

if (td->o.rw_seq == RW_SEQ_SEQ) {
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
if (ret) {
ret = get_next_rand_block(td, f, ddir, &b);
*is_random = 0;
}
} else if (td->o.rw_seq == RW_SEQ_IDENT) {
if (f->last_start != -1ULL)
offset = f->last_start - f->file_offset;
Expand Down Expand Up @@ -353,7 +361,8 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u,
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
unsigned int *is_random)
{
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
Expand All @@ -366,7 +375,7 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
td->ddir_seq_nr = td->o.ddir_seq_nr;
}

if (get_next_block(td, io_u, ddir, rw_seq_hit))
if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
return 1;

if (io_u->offset >= f->io_size) {
Expand All @@ -387,16 +396,17 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
return 0;
}

static int get_next_offset(struct thread_data *td, struct io_u *io_u)
static int get_next_offset(struct thread_data *td, struct io_u *io_u,
unsigned int *is_random)
{
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;

if (ops->fill_io_u_off)
return ops->fill_io_u_off(td, io_u);
return ops->fill_io_u_off(td, io_u, is_random);
}

return __get_next_offset(td, io_u);
return __get_next_offset(td, io_u, is_random);
}

static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
Expand All @@ -407,14 +417,20 @@ static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
return io_u->offset + buflen <= f->io_size + get_start_offset(td);
}

static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
unsigned int is_random)
{
const int ddir = io_u->ddir;
int ddir = io_u->ddir;
unsigned int buflen = 0;
unsigned int minbs, maxbs;
unsigned long r, rand_max;

assert(ddir_rw(ddir));
assert(ddir_rw(io_u->ddir));

if (td->o.bs_is_seq_rand)
ddir = is_random ? DDIR_WRITE: DDIR_READ;
else
ddir = io_u->ddir;

minbs = td->o.min_bs[ddir];
maxbs = td->o.max_bs[ddir];
Expand Down Expand Up @@ -471,16 +487,17 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
return buflen;
}

static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
unsigned int is_random)
{
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;

if (ops->fill_io_u_size)
return ops->fill_io_u_size(td, io_u);
return ops->fill_io_u_size(td, io_u, is_random);
}

return __get_next_buflen(td, io_u);
return __get_next_buflen(td, io_u, is_random);
}

static void set_rwmix_bytes(struct thread_data *td)
Expand Down Expand Up @@ -715,6 +732,8 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u)

static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
unsigned int is_random;

if (td->io_ops->flags & FIO_NOIO)
goto out;

Expand All @@ -740,12 +759,12 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u)
* No log, let the seq/rand engine retrieve the next buflen and
* position.
*/
if (get_next_offset(td, io_u)) {
if (get_next_offset(td, io_u, &is_random)) {
dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
return 1;
}

io_u->buflen = get_next_buflen(td, io_u);
io_u->buflen = get_next_buflen(td, io_u, is_random);
if (!io_u->buflen) {
dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
return 1;
Expand Down
11 changes: 11 additions & 0 deletions options.c
Original file line number Diff line number Diff line change
Expand Up @@ -1557,6 +1557,17 @@ struct fio_option fio_options[FIO_MAX_OPTS] = {
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "bs_is_seq_rand",
.lname = "Block size division is seq/random (not read/write)",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(bs_is_seq_rand),
.help = "Consider any blocksize setting to be sequential,ramdom",
.def = "0",
.parent = "blocksize",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "randrepeat",
.lname = "Random repeatable",
Expand Down
4 changes: 2 additions & 2 deletions profile.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ struct prof_io_ops {
int (*td_init)(struct thread_data *);
void (*td_exit)(struct thread_data *);

int (*fill_io_u_off)(struct thread_data *, struct io_u *);
int (*fill_io_u_size)(struct thread_data *, struct io_u *);
int (*fill_io_u_off)(struct thread_data *, struct io_u *, unsigned int *);
int (*fill_io_u_size)(struct thread_data *, struct io_u *, unsigned int);
struct fio_file *(*get_next_file)(struct thread_data *);

int (*io_u_lat)(struct thread_data *, uint64_t);
Expand Down
2 changes: 2 additions & 0 deletions thread_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ struct thread_options {
unsigned int softrandommap;
unsigned int bs_unaligned;
unsigned int fsync_on_close;
unsigned int bs_is_seq_rand;

unsigned int random_distribution;

Expand Down Expand Up @@ -317,6 +318,7 @@ struct thread_options_pack {
uint32_t softrandommap;
uint32_t bs_unaligned;
uint32_t fsync_on_close;
uint32_t bs_is_seq_rand;

uint32_t random_distribution;
fio_fp64_t zipf_theta;
Expand Down

0 comments on commit 6aca9b3

Please sign in to comment.