Lines Matching refs:cc

67 	struct crypt_config *cc;
95 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
97 void (*dtr)(struct crypt_config *cc);
98 int (*init)(struct crypt_config *cc);
99 int (*wipe)(struct crypt_config *cc);
100 int (*generator)(struct crypt_config *cc, u8 *iv,
102 int (*post)(struct crypt_config *cc, u8 *iv,
237 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
240 static bool crypt_integrity_aead(struct crypt_config *cc);
245 static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
247 return cc->cipher_tfm.tfms[0];
250 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
252 return cc->cipher_tfm.tfms_aead[0];
311 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
314 memset(iv, 0, cc->iv_size);
320 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
323 memset(iv, 0, cc->iv_size);
329 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
332 memset(iv, 0, cc->iv_size);
334 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
339 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
346 memset(iv, 0, cc->iv_size);
352 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
358 if (crypt_integrity_aead(cc))
359 bs = crypto_aead_blocksize(any_tfm_aead(cc));
361 bs = crypto_skcipher_blocksize(any_tfm(cc));
377 cc->iv_gen_private.benbi.shift = 9 - log;
382 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
386 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
391 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
393 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
394 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
399 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
402 memset(iv, 0, cc->iv_size);
407 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
409 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
419 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
422 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
424 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
437 if (cc->key_parts == cc->tfms_count) {
444 crypt_iv_lmk_dtr(cc);
452 static int crypt_iv_lmk_init(struct crypt_config *cc)
454 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
455 int subkey_size = cc->key_size / cc->key_parts;
459 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
465 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
467 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
475 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
479 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
518 memcpy(iv, &md5state.hash, cc->iv_size);
523 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
531 sg = crypt_get_sg_data(cc, dmreq->sg_in);
533 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
536 memset(iv, 0, cc->iv_size);
541 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
551 sg = crypt_get_sg_data(cc, dmreq->sg_out);
553 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
557 crypto_xor(dst + sg->offset, iv, cc->iv_size);
563 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
565 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
577 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
580 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
582 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
587 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
599 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
602 crypt_iv_tcw_dtr(cc);
610 static int crypt_iv_tcw_init(struct crypt_config *cc)
612 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
613 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
615 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
616 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
622 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
624 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
626 memset(tcw->iv_seed, 0, cc->iv_size);
632 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
636 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
670 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
674 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
681 sg = crypt_get_sg_data(cc, dmreq->sg_in);
683 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
689 if (cc->iv_size > 8)
691 cc->iv_size - 8);
696 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
707 sg = crypt_get_sg_data(cc, dmreq->sg_out);
709 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
715 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
719 get_random_bytes(iv, cc->iv_size);
723 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
726 if (crypt_integrity_aead(cc)) {
731 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
740 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
749 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
753 memset(buf, 0, cc->iv_size);
754 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
756 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
757 sg_init_one(&dst, iv, cc->iv_size);
758 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
766 static void crypt_iv_elephant_dtr(struct crypt_config *cc)
768 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
774 static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
777 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
788 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
790 crypt_iv_elephant_dtr(cc);
938 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
940 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
956 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
974 sg = crypt_get_sg_data(cc, dmreq->sg_out);
980 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
982 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
987 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
988 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
989 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
990 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
993 for (i = 0; i < (cc->sector_size / 32); i++)
997 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
998 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
999 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
1000 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
1011 static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1017 r = crypt_iv_elephant(cc, dmreq);
1022 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1025 static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1029 return crypt_iv_elephant(cc, dmreq);
1034 static int crypt_iv_elephant_init(struct crypt_config *cc)
1036 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1037 int key_offset = cc->key_size - cc->key_extra_size;
1039 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1042 static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1044 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1047 memset(key, 0, cc->key_extra_size);
1048 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1116 static bool crypt_integrity_aead(struct crypt_config *cc)
1118 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1121 static bool crypt_integrity_hmac(struct crypt_config *cc)
1123 return crypt_integrity_aead(cc) && cc->key_mac_size;
1127 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1130 if (unlikely(crypt_integrity_aead(cc)))
1142 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1149 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1152 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1162 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1165 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1174 if (bi->tag_size != cc->on_disk_tag_size ||
1175 bi->tuple_size != cc->on_disk_tag_size) {
1179 if (1 << bi->interval_exp != cc->sector_size) {
1184 if (crypt_integrity_aead(cc)) {
1185 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1187 cc->integrity_tag_size, cc->integrity_iv_size);
1189 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1193 } else if (cc->integrity_iv_size)
1195 cc->integrity_iv_size);
1197 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1209 static void crypt_convert_init(struct crypt_config *cc,
1220 ctx->cc_sector = sector + cc->iv_offset;
1224 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1227 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1230 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1232 return (void *)((char *)dmreq - cc->dmreq_start);
1235 static u8 *iv_of_dmreq(struct crypt_config *cc,
1238 if (crypt_integrity_aead(cc))
1240 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1243 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1246 static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1249 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1252 static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1255 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1259 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1262 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1263 cc->iv_size + sizeof(uint64_t);
1267 static void *tag_from_dmreq(struct crypt_config *cc,
1273 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1274 cc->on_disk_tag_size];
1277 static void *iv_tag_from_dmreq(struct crypt_config *cc,
1280 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1283 static int crypt_convert_block_aead(struct crypt_config *cc,
1295 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1298 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1301 dmreq = dmreq_of_req(cc, req);
1303 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1304 dmreq->iv_sector >>= cc->sector_shift;
1307 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1309 sector = org_sector_of_dmreq(cc, dmreq);
1310 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1312 iv = iv_of_dmreq(cc, dmreq);
1313 org_iv = org_iv_of_dmreq(cc, dmreq);
1314 tag = tag_from_dmreq(cc, dmreq);
1315 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1324 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1325 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1326 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1330 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1331 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1332 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1334 if (cc->iv_gen_ops) {
1336 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1337 memcpy(org_iv, tag_iv, cc->iv_size);
1339 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1343 if (cc->integrity_iv_size)
1344 memcpy(tag_iv, org_iv, cc->iv_size);
1347 memcpy(iv, org_iv, cc->iv_size);
1350 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1353 cc->sector_size, iv);
1355 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1356 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1357 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1360 cc->sector_size + cc->integrity_tag_size, iv);
1370 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1371 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1373 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1374 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1379 static int crypt_convert_block_skcipher(struct crypt_config *cc,
1393 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1396 dmreq = dmreq_of_req(cc, req);
1398 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1399 dmreq->iv_sector >>= cc->sector_shift;
1402 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1404 iv = iv_of_dmreq(cc, dmreq);
1405 org_iv = org_iv_of_dmreq(cc, dmreq);
1406 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1408 sector = org_sector_of_dmreq(cc, dmreq);
1409 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1416 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1419 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1421 if (cc->iv_gen_ops) {
1423 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1424 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1426 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1430 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1433 if (cc->integrity_iv_size)
1434 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1437 memcpy(iv, org_iv, cc->iv_size);
1440 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1447 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1448 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1450 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1451 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1459 static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1462 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1465 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1470 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1478 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1483 static int crypt_alloc_req_aead(struct crypt_config *cc,
1487 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1492 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1500 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1505 static int crypt_alloc_req(struct crypt_config *cc,
1508 if (crypt_integrity_aead(cc))
1509 return crypt_alloc_req_aead(cc, ctx);
1511 return crypt_alloc_req_skcipher(cc, ctx);
1514 static void crypt_free_req_skcipher(struct crypt_config *cc,
1517 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1520 mempool_free(req, &cc->req_pool);
1523 static void crypt_free_req_aead(struct crypt_config *cc,
1526 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1529 mempool_free(req, &cc->req_pool);
1532 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1534 if (crypt_integrity_aead(cc))
1535 crypt_free_req_aead(cc, req, base_bio);
1537 crypt_free_req_skcipher(cc, req, base_bio);
1543 static blk_status_t crypt_convert(struct crypt_config *cc,
1547 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1560 r = crypt_alloc_req(cc, ctx);
1568 if (crypt_integrity_aead(cc))
1569 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1571 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1637 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1658 struct crypt_config *cc = io->cc;
1667 mutex_lock(&cc->bio_alloc_lock);
1669 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
1678 page = mempool_alloc(&cc->page_pool, gfp_mask);
1680 crypt_free_buffer_pages(cc, clone);
1695 crypt_free_buffer_pages(cc, clone);
1701 mutex_unlock(&cc->bio_alloc_lock);
1706 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1713 mempool_free(bv->bv_page, &cc->page_pool);
1717 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1720 io->cc = cc;
1748 struct crypt_config *cc = io->cc;
1756 crypt_free_req(cc, io->ctx.r.req, base_bio);
1759 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1775 queue_work(cc->io_queue, &io->work);
1802 struct crypt_config *cc = io->cc;
1810 crypt_free_buffer_pages(cc, clone);
1828 struct crypt_config *cc = io->cc;
1832 bio_set_dev(clone, cc->dev->bdev);
1838 struct crypt_config *cc = io->cc;
1847 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
1854 clone->bi_iter.bi_sector = cc->start + io->sector;
1878 struct crypt_config *cc = io->cc;
1881 queue_work(cc->io_queue, &io->work);
1895 struct crypt_config *cc = data;
1902 spin_lock_irq(&cc->write_thread_lock);
1905 if (!RB_EMPTY_ROOT(&cc->write_tree))
1910 spin_unlock_irq(&cc->write_thread_lock);
1920 spin_lock_irq(&cc->write_thread_lock);
1924 write_tree = cc->write_tree;
1925 cc->write_tree = RB_ROOT;
1926 spin_unlock_irq(&cc->write_thread_lock);
1949 struct crypt_config *cc = io->cc;
1955 crypt_free_buffer_pages(cc, clone);
1964 clone->bi_iter.bi_sector = cc->start + io->sector;
1966 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1967 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1972 spin_lock_irqsave(&cc->write_thread_lock, flags);
1973 if (RB_EMPTY_ROOT(&cc->write_tree))
1974 wake_up_process(cc->write_thread);
1975 rbp = &cc->write_tree.rb_node;
1986 rb_insert_color(&io->rb_node, &cc->write_tree);
1987 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1990 static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1994 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
2015 struct crypt_config *cc = io->cc;
2024 r = crypt_convert(cc, &io->ctx, true, false);
2028 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2045 struct crypt_config *cc = io->cc;
2056 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2070 r = crypt_convert(cc, ctx,
2071 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2079 queue_work(cc->crypt_queue, &io->work);
2085 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2109 struct crypt_config *cc = io->cc;
2115 r = crypt_convert(cc, &io->ctx, true, false);
2127 struct crypt_config *cc = io->cc;
2132 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2135 r = crypt_convert(cc, &io->ctx,
2136 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2143 queue_work(cc->crypt_queue, &io->work);
2161 struct crypt_config *cc = io->cc;
2173 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2174 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2179 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
2184 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2198 if (kcryptd_crypt_write_inline(cc, ctx)) {
2223 struct crypt_config *cc = io->cc;
2225 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2226 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2244 queue_work(cc->crypt_queue, &io->work);
2247 static void crypt_free_tfms_aead(struct crypt_config *cc)
2249 if (!cc->cipher_tfm.tfms_aead)
2252 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2253 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2254 cc->cipher_tfm.tfms_aead[0] = NULL;
2257 kfree(cc->cipher_tfm.tfms_aead);
2258 cc->cipher_tfm.tfms_aead = NULL;
2261 static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2265 if (!cc->cipher_tfm.tfms)
2268 for (i = 0; i < cc->tfms_count; i++)
2269 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2270 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2271 cc->cipher_tfm.tfms[i] = NULL;
2274 kfree(cc->cipher_tfm.tfms);
2275 cc->cipher_tfm.tfms = NULL;
2278 static void crypt_free_tfms(struct crypt_config *cc)
2280 if (crypt_integrity_aead(cc))
2281 crypt_free_tfms_aead(cc);
2283 crypt_free_tfms_skcipher(cc);
2286 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2291 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2294 if (!cc->cipher_tfm.tfms)
2297 for (i = 0; i < cc->tfms_count; i++) {
2298 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2300 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2301 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2302 crypt_free_tfms(cc);
2313 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2317 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2321 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2322 if (!cc->cipher_tfm.tfms)
2325 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2327 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2328 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2329 crypt_free_tfms(cc);
2334 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2338 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2340 if (crypt_integrity_aead(cc))
2341 return crypt_alloc_tfms_aead(cc, ciphermode);
2343 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2346 static unsigned crypt_subkey_size(struct crypt_config *cc)
2348 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2351 static unsigned crypt_authenckey_size(struct crypt_config *cc)
2353 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2359 * This funcion converts cc->key to this special format.
2378 static int crypt_setkey(struct crypt_config *cc)
2384 subkey_size = crypt_subkey_size(cc);
2386 if (crypt_integrity_hmac(cc)) {
2387 if (subkey_size < cc->key_mac_size)
2390 crypt_copy_authenckey(cc->authenc_key, cc->key,
2391 subkey_size - cc->key_mac_size,
2392 cc->key_mac_size);
2395 for (i = 0; i < cc->tfms_count; i++) {
2396 if (crypt_integrity_hmac(cc))
2397 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2398 cc->authenc_key, crypt_authenckey_size(cc));
2399 else if (crypt_integrity_aead(cc))
2400 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2401 cc->key + (i * subkey_size),
2404 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2405 cc->key + (i * subkey_size),
2411 if (crypt_integrity_hmac(cc))
2412 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2427 static int set_key_user(struct crypt_config *cc, struct key *key)
2435 if (cc->key_size != ukp->datalen)
2438 memcpy(cc->key, ukp->data, cc->key_size);
2444 static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2452 if (cc->key_size != ekp->decrypted_datalen)
2455 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2461 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2467 int (*set_key)(struct crypt_config *cc, struct key *key);
2510 ret = set_key(cc, key);
2522 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2524 ret = crypt_setkey(cc);
2527 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2528 kfree_sensitive(cc->key_string);
2529 cc->key_string = new_key_string;
2561 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2573 static int crypt_set_key(struct crypt_config *cc, char *key)
2579 if (!cc->key_size && strcmp(key, "-"))
2584 r = crypt_set_keyring_key(cc, key + 1);
2589 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2592 kfree_sensitive(cc->key_string);
2593 cc->key_string = NULL;
2596 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2599 r = crypt_setkey(cc);
2601 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2610 static int crypt_wipe_key(struct crypt_config *cc)
2614 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2615 get_random_bytes(&cc->key, cc->key_size);
2618 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2619 r = cc->iv_gen_ops->wipe(cc);
2624 kfree_sensitive(cc->key_string);
2625 cc->key_string = NULL;
2626 r = crypt_setkey(cc);
2627 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2647 struct crypt_config *cc = pool_data;
2655 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2661 percpu_counter_add(&cc->n_allocated_pages, 1);
2668 struct crypt_config *cc = pool_data;
2671 percpu_counter_sub(&cc->n_allocated_pages, 1);
2676 struct crypt_config *cc = ti->private;
2680 if (!cc)
2683 if (cc->write_thread)
2684 kthread_stop(cc->write_thread);
2686 if (cc->io_queue)
2687 destroy_workqueue(cc->io_queue);
2688 if (cc->crypt_queue)
2689 destroy_workqueue(cc->crypt_queue);
2691 crypt_free_tfms(cc);
2693 bioset_exit(&cc->bs);
2695 mempool_exit(&cc->page_pool);
2696 mempool_exit(&cc->req_pool);
2697 mempool_exit(&cc->tag_pool);
2699 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2700 percpu_counter_destroy(&cc->n_allocated_pages);
2702 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2703 cc->iv_gen_ops->dtr(cc);
2705 if (cc->dev)
2706 dm_put_device(ti, cc->dev);
2708 kfree_sensitive(cc->cipher_string);
2709 kfree_sensitive(cc->key_string);
2710 kfree_sensitive(cc->cipher_auth);
2711 kfree_sensitive(cc->authenc_key);
2713 mutex_destroy(&cc->bio_alloc_lock);
2716 kfree_sensitive(cc);
2727 struct crypt_config *cc = ti->private;
2729 if (crypt_integrity_aead(cc))
2730 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2732 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2734 if (cc->iv_size)
2736 cc->iv_size = max(cc->iv_size,
2745 cc->iv_gen_ops = NULL;
2747 cc->iv_gen_ops = &crypt_iv_plain_ops;
2749 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2751 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2753 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2755 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2757 cc->iv_gen_ops = &crypt_iv_null_ops;
2759 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2761 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2762 cc->key_parts = 2;
2763 cc->key_extra_size = cc->key_size / 2;
2764 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2766 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2768 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2775 if (cc->key_size % cc->key_parts) {
2776 cc->key_parts++;
2777 cc->key_extra_size = cc->key_size / cc->key_parts;
2780 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2781 cc->key_parts += 2; /* IV + whitening */
2782 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2784 cc->iv_gen_ops = &crypt_iv_random_ops;
2786 cc->integrity_iv_size = cc->iv_size;
2800 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2824 cc->key_mac_size = crypto_ahash_digestsize(mac);
2827 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2828 if (!cc->authenc_key)
2837 struct crypt_config *cc = ti->private;
2841 cc->tfms_count = 1;
2865 if (crypt_integrity_aead(cc)) {
2866 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2874 cc->tfms_count = 64;
2890 cc->key_parts = cc->tfms_count;
2893 ret = crypt_alloc_tfms(cc, cipher_api);
2899 if (crypt_integrity_aead(cc))
2900 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2902 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2910 struct crypt_config *cc = ti->private;
2916 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2930 cc->tfms_count = 1;
2931 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2932 !is_power_of_2(cc->tfms_count)) {
2936 cc->key_parts = cc->tfms_count;
2978 ret = crypt_alloc_tfms(cc, cipher_api);
2994 struct crypt_config *cc = ti->private;
2998 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2999 if (!cc->cipher_string) {
3017 ret = crypt_set_key(cc, key);
3024 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3025 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3033 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3034 ret = cc->iv_gen_ops->init(cc);
3042 if (cc->key_string)
3043 memset(cc->key, 0, cc->key_size * sizeof(u8));
3050 struct crypt_config *cc = ti->private;
3079 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3082 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3084 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3086 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3092 cc->on_disk_tag_size = val;
3095 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3101 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3102 if (!cc->cipher_auth)
3104 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3105 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3106 cc->sector_size > 4096 ||
3107 (cc->sector_size & (cc->sector_size - 1))) {
3111 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3115 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3117 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3132 struct crypt_config *cc = ti->private;
3133 sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
3135 args->start = cc->start;
3136 return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
3148 struct crypt_config *cc;
3168 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3169 if (!cc) {
3173 cc->key_size = key_size;
3174 cc->sector_size = (1 << SECTOR_SHIFT);
3175 cc->sector_shift = 0;
3177 ti->private = cc;
3184 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3199 if (crypt_integrity_aead(cc)) {
3200 cc->dmreq_start = sizeof(struct aead_request);
3201 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3202 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3204 cc->dmreq_start = sizeof(struct skcipher_request);
3205 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3206 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3208 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3212 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3225 iv_size_padding + cc->iv_size +
3226 cc->iv_size +
3230 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3236 cc->per_bio_data_size = ti->per_io_data_size =
3237 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3240 ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
3246 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3252 mutex_init(&cc->bio_alloc_lock);
3256 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3260 cc->iv_offset = tmpll;
3262 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3273 cc->start = tmpll;
3280 if (bdev_is_zoned(cc->dev->bdev)) {
3281 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3282 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3285 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3286 ret = crypt_integrity_ctr(cc, ti);
3290 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3291 if (!cc->tag_pool_max_sectors)
3292 cc->tag_pool_max_sectors = 1;
3294 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3295 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3301 cc->tag_pool_max_sectors <<= cc->sector_shift;
3305 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
3306 if (!cc->io_queue) {
3311 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3312 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3315 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3318 if (!cc->crypt_queue) {
3323 spin_lock_init(&cc->write_thread_lock);
3324 cc->write_tree = RB_ROOT;
3326 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3327 if (IS_ERR(cc->write_thread)) {
3328 ret = PTR_ERR(cc->write_thread);
3329 cc->write_thread = NULL;
3333 wake_up_process(cc->write_thread);
3348 struct crypt_config *cc = ti->private;
3357 bio_set_dev(bio, cc->dev->bdev);
3359 bio->bi_iter.bi_sector = cc->start +
3368 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3375 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3378 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3381 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3382 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3384 if (cc->on_disk_tag_size) {
3385 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3390 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3391 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3392 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3397 if (crypt_integrity_aead(cc))
3419 struct crypt_config *cc = ti->private;
3429 DMEMIT("%s ", cc->cipher_string);
3431 if (cc->key_size > 0) {
3432 if (cc->key_string)
3433 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3435 for (i = 0; i < cc->key_size; i++) {
3436 DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3437 hex2asc(cc->key[i] & 0xf));
3443 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3444 cc->dev->name, (unsigned long long)cc->start);
3447 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3448 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3449 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3450 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3451 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3452 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3453 if (cc->on_disk_tag_size)
3459 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3461 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3463 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3465 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3467 if (cc->on_disk_tag_size)
3468 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3469 if (cc->sector_size != (1 << SECTOR_SHIFT))
3470 DMEMIT(" sector_size:%d", cc->sector_size);
3471 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3481 struct crypt_config *cc = ti->private;
3483 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3488 struct crypt_config *cc = ti->private;
3490 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3500 struct crypt_config *cc = ti->private;
3502 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3512 struct crypt_config *cc = ti->private;
3519 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3526 if (key_size < 0 || cc->key_size != key_size) {
3531 ret = crypt_set_key(cc, argv[2]);
3534 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3535 ret = cc->iv_gen_ops->init(cc);
3537 if (cc->key_string)
3538 memset(cc->key, 0, cc->key_size * sizeof(u8));
3542 return crypt_wipe_key(cc);
3553 struct crypt_config *cc = ti->private;
3555 return fn(ti, cc->dev, cc->start, ti->len, data);
3560 struct crypt_config *cc = ti->private;
3571 max_t(unsigned, limits->logical_block_size, cc->sector_size);
3573 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3574 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);