Skip to content

Commit

Permalink
Merge tag 'for-6.14/dm-changes' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mikulas Patocka:

 - fix a spelling error in dm-raid

 - change kzalloc to kcalloc

 - remove useless test in alloc_multiple_bios

 - disable REQ_NOWAIT for flushes

 - dm-transaction-manager: use red-black trees instead of linear lists

 - atomic writes support for dm-linear, dm-stripe and dm-mirror

 - dm-crypt: code cleanups and two bugfixes

* tag 'for-6.14/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm-crypt: track tag_offset in convert_context
  dm-crypt: don't initialize cc_sector again
  dm-crypt: don't update io->sector after kcryptd_crypt_write_io_submit()
  dm-crypt: use bi_sector in bio when initialize integrity seed
  dm-crypt: fully initialize clone->bi_iter in crypt_alloc_buffer()
  dm-crypt: set atomic as false when calling crypt_convert() in kworker
  dm-mirror: Support atomic writes
  dm-io: Warn on creating multiple atomic write bios for a region
  dm-stripe: Enable atomic writes
  dm-linear: Enable atomic writes
  dm: Ensure cloned bio is same length for atomic write
  dm-table: atomic writes support
  dm-transaction-manager: use red-black trees instead of linear lists
  dm: disable REQ_NOWAIT for flushes
  dm: remove useless test in alloc_multiple_bios
  dm: change kzalloc to kcalloc
  dm raid: fix spelling errors in raid_ctr()
  • Loading branch information
torvalds committed Jan 28, 2025
2 parents 13845bd + 8b8f803 commit 9629d83
Show file tree
Hide file tree
Showing 12 changed files with 120 additions and 63 deletions.
42 changes: 16 additions & 26 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ struct convert_context {
struct bio *bio_out;
struct bvec_iter iter_out;
atomic_t cc_pending;
unsigned int tag_offset;
u64 cc_sector;
union {
struct skcipher_request *req;
Expand Down Expand Up @@ -1187,7 +1188,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)

tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift);

bip->bip_iter.bi_sector = io->cc->start + io->sector;
bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;

ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
tag_len, offset_in_page(io->integrity_metadata));
Expand Down Expand Up @@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
if (bio_out)
ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
ctx->tag_offset = 0;
init_completion(&ctx->restart);
}

Expand Down Expand Up @@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;

Expand All @@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->cc_pending);

if (crypt_integrity_aead(cc))
r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
else
r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);

switch (r) {
/*
Expand All @@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* exit and continue processing in a workqueue
*/
ctx->r.req = NULL;
ctx->tag_offset++;
ctx->cc_sector += sector_step;
tag_offset++;
return BLK_STS_DEV_RESOURCE;
}
} else {
Expand All @@ -1648,16 +1649,16 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
*/
case -EINPROGRESS:
ctx->r.req = NULL;
ctx->tag_offset++;
ctx->cc_sector += sector_step;
tag_offset++;
continue;
/*
* The request was already processed (synchronously).
*/
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector += sector_step;
tag_offset++;
ctx->tag_offset++;
if (!atomic)
cond_resched();
continue;
Expand Down Expand Up @@ -1719,6 +1720,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_ioprio = io->base_bio->bi_ioprio;
clone->bi_iter.bi_sector = cc->start + io->sector;

remaining_size = size;

Expand Down Expand Up @@ -1909,7 +1911,6 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_dec_pending(io);
return 1;
}
clone->bi_iter.bi_sector = cc->start + io->sector;
crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
io->saved_bi_iter = clone->bi_iter;
dm_submit_bio_remap(io->base_bio, clone);
Expand All @@ -1925,13 +1926,13 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;

clone->bi_iter.bi_sector = cc->start + io->sector;
clone->bi_private = io;
clone->bi_end_io = crypt_endio;

crypt_inc_pending(io);

clone->bi_iter.bi_sector = cc->start + io->sector;

if (dm_crypt_integrity_io_alloc(io, clone)) {
crypt_dec_pending(io);
bio_put(clone);
Expand Down Expand Up @@ -2039,8 +2040,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
/* crypt_convert should have filled the clone bio */
BUG_ON(io->ctx.iter_out.bi_size);

clone->bi_iter.bi_sector = cc->start + io->sector;

if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
dm_submit_bio_remap(io->base_bio, clone);
Expand Down Expand Up @@ -2092,13 +2091,12 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
struct crypt_config *cc = io->cc;
struct convert_context *ctx = &io->ctx;
int crypt_finished;
sector_t sector = io->sector;
blk_status_t r;

wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);

r = crypt_convert(cc, &io->ctx, true, false);
r = crypt_convert(cc, &io->ctx, false, false);
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
Expand All @@ -2109,10 +2107,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
}

/* Encryption was already finished, submit io now */
if (crypt_finished) {
if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
io->sector = sector;
}

crypt_dec_pending(io);
}
Expand All @@ -2123,14 +2119,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct convert_context *ctx = &io->ctx;
struct bio *clone;
int crypt_finished;
sector_t sector = io->sector;
blk_status_t r;

/*
* Prevent io from disappearing until this function completes.
*/
crypt_inc_pending(io);
crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);

clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
if (unlikely(!clone)) {
Expand All @@ -2147,8 +2142,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.iter_in = clone->bi_iter;
}

sector += bio_sectors(clone);

crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
Expand All @@ -2172,10 +2165,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}

/* Encryption was already finished, submit io now */
if (crypt_finished) {
if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
io->sector = sector;
}

dec:
crypt_dec_pending(io);
Expand Down Expand Up @@ -2203,7 +2194,7 @@ static void kcryptd_crypt_read_continue(struct work_struct *work)
wait_for_completion(&io->ctx.restart);
reinit_completion(&io->ctx.restart);

r = crypt_convert(cc, &io->ctx, true, false);
r = crypt_convert(cc, &io->ctx, false, false);
if (r)
io->error = r;

Expand All @@ -2221,7 +2212,6 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);

if (io->ctx.aead_recheck) {
io->ctx.cc_sector = io->sector + cc->iv_offset;
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
} else {
Expand Down
1 change: 1 addition & 0 deletions drivers/md/dm-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,

atomic_inc(&io->count);
submit_bio(bio);
WARN_ON_ONCE(opf & REQ_ATOMIC && remaining);
} while (remaining);
}

Expand Down
5 changes: 3 additions & 2 deletions drivers/md/dm-linear.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,9 +199,10 @@ static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,

static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
.version = {1, 5, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO |
DM_TARGET_ATOMIC_WRITES,
.report_zones = linear_report_zones,
.module = THIS_MODULE,
.ctr = linear_ctr,
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-ps-io-affinity.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
if (!s)
return -ENOMEM;

s->path_map = kzalloc(nr_cpu_ids * sizeof(struct path_info *),
s->path_map = kcalloc(nr_cpu_ids, sizeof(struct path_info *),
GFP_KERNEL);
if (!s->path_map)
goto free_selector;
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-raid.c
Original file line number Diff line number Diff line change
Expand Up @@ -3196,7 +3196,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (reshape_sectors || rs_is_raid1(rs)) {
/*
* We can only prepare for a reshape here, because the
* raid set needs to run to provide the repective reshape
* raid set needs to run to provide the respective reshape
* check functions via its MD personality instance.
*
* So do the reshape check after md_run() succeeded.
Expand Down
5 changes: 3 additions & 2 deletions drivers/md/dm-raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m;
blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH | REQ_ATOMIC);
struct dm_io_request io_req = {
.bi_opf = REQ_OP_WRITE | op_flags,
.mem.type = DM_IO_BIO,
Expand Down Expand Up @@ -1483,8 +1483,9 @@ static int mirror_iterate_devices(struct dm_target *ti,

static struct target_type mirror_target = {
.name = "mirror",
.version = {1, 14, 0},
.version = {1, 15, 0},
.module = THIS_MODULE,
.features = DM_TARGET_ATOMIC_WRITES,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
.map = mirror_map,
Expand Down
5 changes: 3 additions & 2 deletions drivers/md/dm-stripe.c
Original file line number Diff line number Diff line change
Expand Up @@ -465,8 +465,9 @@ static void stripe_io_hints(struct dm_target *ti,

static struct target_type stripe_target = {
.name = "striped",
.version = {1, 6, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
.version = {1, 7, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
DM_TARGET_ATOMIC_WRITES,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
Expand Down
29 changes: 29 additions & 0 deletions drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -1806,6 +1806,32 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}

static int device_not_atomic_write_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
return !bdev_can_atomic_write(dev->bdev);
}

static bool dm_table_supports_atomic_writes(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);

if (!dm_target_supports_atomic_writes(ti->type))
return false;

if (!ti->type->iterate_devices)
return false;

if (ti->type->iterate_devices(ti,
device_not_atomic_write_capable, NULL)) {
return false;
}
}
return true;
}

int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
Expand Down Expand Up @@ -1854,6 +1880,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return r;
}

if (dm_table_supports_atomic_writes(t))
limits->features |= BLK_FEAT_ATOMIC_WRITES;

r = queue_limits_set(q, limits);
if (r)
return r;
Expand Down
Loading

0 comments on commit 9629d83

Please sign in to comment.