Lines Matching defs:dmreq
108 struct dm_crypt_request *dmreq);
110 struct dm_crypt_request *dmreq);
319 struct dm_crypt_request *dmreq)
322 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
328 struct dm_crypt_request *dmreq)
331 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
337 struct dm_crypt_request *dmreq)
341 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
347 struct dm_crypt_request *dmreq)
354 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
395 struct dm_crypt_request *dmreq)
401 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
408 struct dm_crypt_request *dmreq)
484 struct dm_crypt_request *dmreq,
511 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
512 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
532 struct dm_crypt_request *dmreq)
538 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
539 sg = crypt_get_sg_data(cc, dmreq->sg_in);
541 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
550 struct dm_crypt_request *dmreq)
556 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
559 sg = crypt_get_sg_data(cc, dmreq->sg_out);
561 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
641 struct dm_crypt_request *dmreq,
645 __le64 sector = cpu_to_le64(dmreq->iv_sector);
679 struct dm_crypt_request *dmreq)
683 __le64 sector = cpu_to_le64(dmreq->iv_sector);
688 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
689 sg = crypt_get_sg_data(cc, dmreq->sg_in);
691 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
705 struct dm_crypt_request *dmreq)
711 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
715 sg = crypt_get_sg_data(cc, dmreq->sg_out);
717 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
724 struct dm_crypt_request *dmreq)
748 struct dm_crypt_request *dmreq)
769 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
953 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
971 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
989 sg = crypt_get_sg_data(cc, dmreq->sg_out);
994 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
995 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
1001 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
1011 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1027 struct dm_crypt_request *dmreq)
1031 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1032 r = crypt_iv_elephant(cc, dmreq);
1037 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1041 struct dm_crypt_request *dmreq)
1043 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1044 return crypt_iv_elephant(cc, dmreq);
1244 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1246 return (void *)((char *)dmreq - cc->dmreq_start);
1250 struct dm_crypt_request *dmreq)
1253 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1256 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1261 struct dm_crypt_request *dmreq)
1263 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1267 struct dm_crypt_request *dmreq)
1269 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1275 struct dm_crypt_request *dmreq)
1277 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1284 struct dm_crypt_request *dmreq)
1286 struct convert_context *ctx = dmreq->ctx;
1289 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1294 struct dm_crypt_request *dmreq)
1296 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1306 struct dm_crypt_request *dmreq;
1317 dmreq = dmreq_of_req(cc, req);
1318 dmreq->iv_sector = ctx->cc_sector;
1320 dmreq->iv_sector >>= cc->sector_shift;
1321 dmreq->ctx = ctx;
1323 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1325 sector = org_sector_of_dmreq(cc, dmreq);
1328 iv = iv_of_dmreq(cc, dmreq);
1329 org_iv = org_iv_of_dmreq(cc, dmreq);
1330 tag = tag_from_dmreq(cc, dmreq);
1331 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1338 sg_init_table(dmreq->sg_in, 4);
1339 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1340 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1341 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1342 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1344 sg_init_table(dmreq->sg_out, 4);
1345 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1346 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1347 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1348 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1355 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1368 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1375 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1393 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1409 struct dm_crypt_request *dmreq;
1418 dmreq = dmreq_of_req(cc, req);
1419 dmreq->iv_sector = ctx->cc_sector;
1421 dmreq->iv_sector >>= cc->sector_shift;
1422 dmreq->ctx = ctx;
1424 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1426 iv = iv_of_dmreq(cc, dmreq);
1427 org_iv = org_iv_of_dmreq(cc, dmreq);
1428 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1430 sector = org_sector_of_dmreq(cc, dmreq);
1434 sg_in = &dmreq->sg_in[0];
1435 sg_out = &dmreq->sg_out[0];
1448 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1470 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
2230 struct dm_crypt_request *dmreq = data;
2231 struct convert_context *ctx = dmreq->ctx;
2246 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2249 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2262 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);