Skip to content

Commit

Permalink
Merge branch 'for-3.20/core' into for-3.20/drivers
Browse files Browse the repository at this point in the history
We need the tagging changes for the libata conversion.
  • Loading branch information
axboe committed Jan 23, 2015
2 parents 121c7ad + 24391c0 commit a4a1cc1
Show file tree
Hide file tree
Showing 16 changed files with 146 additions and 62 deletions.
30 changes: 26 additions & 4 deletions block/blk-lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,23 +283,45 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
* @discard: whether to discard the block range
*
* Description:
* Generate and issue number of bios with zerofiled pages.
* Zero-fill a block range. If the discard flag is set and the block
* device guarantees that subsequent READ operations to the block range
* in question will return zeroes, the blocks will be discarded. Should
* the discard request fail, if the discard flag is not set, or if
* discard_zeroes_data is not supported, this function will resort to
* zeroing the blocks manually, thus provisioning (allocating,
* anchoring) them. If the block device supports the WRITE SAME command
* blkdev_issue_zeroout() will use it to optimize the process of
* clearing the block range. Otherwise the zeroing will be performed
* using regular WRITE calls.
*/

int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
sector_t nr_sects, gfp_t gfp_mask, bool discard)
{
struct request_queue *q = bdev_get_queue(bdev);
unsigned char bdn[BDEVNAME_SIZE];

if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data) {

if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0))
return 0;

bdevname(bdev, bdn);
pr_warn("%s: DISCARD failed. Manually zeroing.\n", bdn);
}

if (bdev_write_same(bdev)) {
unsigned char bdn[BDEVNAME_SIZE];

if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
ZERO_PAGE(0)))
return 0;

bdevname(bdev, bdn);
pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
pr_warn("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
}

return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
Expand Down
80 changes: 49 additions & 31 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,35 +140,39 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth;
}

static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
bool nowrap)
{
int tag, org_last_tag, end;
bool wrap = last_tag != 0;
int tag, org_last_tag = last_tag;

org_last_tag = last_tag;
end = bm->depth;
do {
restart:
tag = find_next_zero_bit(&bm->word, end, last_tag);
if (unlikely(tag >= end)) {
while (1) {
tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
if (unlikely(tag >= bm->depth)) {
/*
* We started with an offset, start from 0 to
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
if (wrap) {
wrap = false;
end = org_last_tag;
last_tag = 0;
goto restart;
if (org_last_tag && last_tag && !nowrap) {
last_tag = org_last_tag = 0;
continue;
}
return -1;
}

if (!test_and_set_bit(tag, &bm->word))
break;

last_tag = tag + 1;
} while (test_and_set_bit(tag, &bm->word));
if (last_tag >= bm->depth - 1)
last_tag = 0;
}

return tag;
}

#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)

/*
* Straight forward bitmap tag implementation, where each bit is a tag
* (cleared == free, and set == busy). The small twist is using per-cpu
Expand All @@ -181,7 +185,7 @@ static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
* until the map is exhausted.
*/
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
unsigned int *tag_cache)
unsigned int *tag_cache, struct blk_mq_tags *tags)
{
unsigned int last_tag, org_last_tag;
int index, i, tag;
Expand All @@ -193,15 +197,24 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
index = TAG_TO_INDEX(bt, last_tag);

for (i = 0; i < bt->map_nr; i++) {
tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
BT_ALLOC_RR(tags));
if (tag != -1) {
tag += (index << bt->bits_per_word);
goto done;
}

last_tag = 0;
if (++index >= bt->map_nr)
/*
* Jump to next index, and reset the last tag to be the
* first tag of that index
*/
index++;
last_tag = (index << bt->bits_per_word);

if (index >= bt->map_nr) {
index = 0;
last_tag = 0;
}
}

*tag_cache = 0;
Expand All @@ -212,7 +225,7 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
* up using the specific cached tag.
*/
done:
if (tag == org_last_tag) {
if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
last_tag = tag + 1;
if (last_tag >= bt->depth - 1)
last_tag = 0;
Expand Down Expand Up @@ -241,13 +254,13 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
static int bt_get(struct blk_mq_alloc_data *data,
struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx,
unsigned int *last_tag)
unsigned int *last_tag, struct blk_mq_tags *tags)
{
struct bt_wait_state *bs;
DEFINE_WAIT(wait);
int tag;

tag = __bt_get(hctx, bt, last_tag);
tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
return tag;

Expand All @@ -258,7 +271,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
do {
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);

tag = __bt_get(hctx, bt, last_tag);
tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;

Expand All @@ -273,7 +286,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
tag = __bt_get(hctx, bt, last_tag);
tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;

Expand Down Expand Up @@ -304,7 +317,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
int tag;

tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
&data->ctx->last_tag);
&data->ctx->last_tag, data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;

Expand All @@ -320,7 +333,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
return BLK_MQ_TAG_FAIL;
}

tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;

Expand Down Expand Up @@ -392,7 +406,8 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,

BUG_ON(real_tag >= tags->nr_tags);
bt_clear_tag(&tags->bitmap_tags, real_tag);
*last_tag = real_tag;
if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
*last_tag = real_tag;
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
bt_clear_tag(&tags->breserved_tags, tag);
Expand Down Expand Up @@ -529,10 +544,12 @@ static void bt_free(struct blk_mq_bitmap_tags *bt)
}

static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
int node)
int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;

tags->alloc_policy = alloc_policy;

if (bt_alloc(&tags->bitmap_tags, depth, node, false))
goto enomem;
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
Expand All @@ -546,7 +563,8 @@ static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
}

struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
unsigned int reserved_tags, int node)
unsigned int reserved_tags,
int node, int alloc_policy)
{
struct blk_mq_tags *tags;

Expand All @@ -562,7 +580,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;

return blk_mq_init_bitmap_tags(tags, node);
return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}

void blk_mq_free_tags(struct blk_mq_tags *tags)
Expand Down
4 changes: 3 additions & 1 deletion block/blk-mq-tag.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,12 @@ struct blk_mq_tags {

struct request **rqs;
struct list_head page_list;

int alloc_policy;
};


extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);

extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
Expand Down
3 changes: 2 additions & 1 deletion block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1374,7 +1374,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
size_t rq_size, left;

tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
set->numa_node);
set->numa_node,
BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
if (!tags)
return NULL;

Expand Down
33 changes: 25 additions & 8 deletions block/blk-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
}

static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
int depth)
int depth, int alloc_policy)
{
struct blk_queue_tag *tags;

Expand All @@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
goto fail;

atomic_set(&tags->refcnt, 1);
tags->alloc_policy = alloc_policy;
tags->next_tag = 0;
return tags;
fail:
kfree(tags);
Expand All @@ -140,10 +142,11 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
* @alloc_policy: tag allocation policy
**/
struct blk_queue_tag *blk_init_tags(int depth)
struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
{
return __blk_queue_init_tags(NULL, depth);
return __blk_queue_init_tags(NULL, depth, alloc_policy);
}
EXPORT_SYMBOL(blk_init_tags);

Expand All @@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags);
* @q: the request queue for the device
* @depth: the maximum queue depth supported
* @tags: the tag to use
* @alloc_policy: tag allocation policy
*
* Queue lock must be held here if the function is called to resize an
* existing map.
**/
int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
struct blk_queue_tag *tags, int alloc_policy)
{
int rc;

BUG_ON(tags && q->queue_tags && tags != q->queue_tags);

if (!tags && !q->queue_tags) {
tags = __blk_queue_init_tags(q, depth);
tags = __blk_queue_init_tags(q, depth, alloc_policy);

if (!tags)
return -ENOMEM;
Expand Down Expand Up @@ -344,16 +348,29 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
}

do {
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
} else {
int start = bqt->next_tag;
int size = min_t(int, bqt->max_depth, max_depth + start);
tag = find_next_zero_bit(bqt->tag_map, size, start);
if (tag >= size && start + size > bqt->max_depth) {
size = start + size - bqt->max_depth;
tag = find_first_zero_bit(bqt->tag_map, size);
}
if (tag >= size)
return 1;
}

} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
* We need lock ordering semantics given by test_and_set_bit_lock.
* See blk_queue_end_tag for details.
*/

bqt->next_tag = (tag + 1) % bqt->max_depth;
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
Expand Down
9 changes: 7 additions & 2 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -3656,12 +3656,17 @@ static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct bio *bio, gfp_t gfp_mask)
{
const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;

if (!is_sync) {
if (!ioprio_valid(cic->ioprio)) {
struct task_struct *tsk = current;
ioprio = task_nice_ioprio(tsk);
ioprio_class = task_nice_ioclass(tsk);
}
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
cfqq = *async_cfqq;
}
Expand Down
2 changes: 1 addition & 1 deletion block/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
if (start + len > (i_size_read(bdev->bd_inode) >> 9))
return -EINVAL;

return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL);
return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false);
}

static int put_ushort(unsigned long arg, unsigned short val)
Expand Down
Loading

0 comments on commit a4a1cc1

Please sign in to comment.