Lines Matching defs:dmreq

101 			 struct dm_crypt_request *dmreq);
103 struct dm_crypt_request *dmreq);
312 struct dm_crypt_request *dmreq)
315 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
321 struct dm_crypt_request *dmreq)
324 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
330 struct dm_crypt_request *dmreq)
334 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
340 struct dm_crypt_request *dmreq)
347 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
387 struct dm_crypt_request *dmreq)
393 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
400 struct dm_crypt_request *dmreq)
476 struct dm_crypt_request *dmreq,
503 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
504 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
524 struct dm_crypt_request *dmreq)
530 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
531 sg = crypt_get_sg_data(cc, dmreq->sg_in);
533 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
542 struct dm_crypt_request *dmreq)
548 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
551 sg = crypt_get_sg_data(cc, dmreq->sg_out);
553 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
633 struct dm_crypt_request *dmreq,
637 __le64 sector = cpu_to_le64(dmreq->iv_sector);
671 struct dm_crypt_request *dmreq)
675 __le64 sector = cpu_to_le64(dmreq->iv_sector);
680 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
681 sg = crypt_get_sg_data(cc, dmreq->sg_in);
683 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
697 struct dm_crypt_request *dmreq)
703 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
707 sg = crypt_get_sg_data(cc, dmreq->sg_out);
709 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
716 struct dm_crypt_request *dmreq)
741 struct dm_crypt_request *dmreq)
754 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
938 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
956 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
974 sg = crypt_get_sg_data(cc, dmreq->sg_out);
979 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
980 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
986 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
996 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1012 struct dm_crypt_request *dmreq)
1016 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1017 r = crypt_iv_elephant(cc, dmreq);
1022 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1026 struct dm_crypt_request *dmreq)
1028 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1029 return crypt_iv_elephant(cc, dmreq);
1230 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1232 return (void *)((char *)dmreq - cc->dmreq_start);
1236 struct dm_crypt_request *dmreq)
1239 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1242 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1247 struct dm_crypt_request *dmreq)
1249 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1253 struct dm_crypt_request *dmreq)
1255 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1260 struct dm_crypt_request *dmreq)
1262 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1268 struct dm_crypt_request *dmreq)
1270 struct convert_context *ctx = dmreq->ctx;
1273 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1278 struct dm_crypt_request *dmreq)
1280 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1290 struct dm_crypt_request *dmreq;
1301 dmreq = dmreq_of_req(cc, req);
1302 dmreq->iv_sector = ctx->cc_sector;
1304 dmreq->iv_sector >>= cc->sector_shift;
1305 dmreq->ctx = ctx;
1307 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1309 sector = org_sector_of_dmreq(cc, dmreq);
1312 iv = iv_of_dmreq(cc, dmreq);
1313 org_iv = org_iv_of_dmreq(cc, dmreq);
1314 tag = tag_from_dmreq(cc, dmreq);
1315 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1322 sg_init_table(dmreq->sg_in, 4);
1323 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1324 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1325 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1326 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1328 sg_init_table(dmreq->sg_out, 4);
1329 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1330 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1331 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1332 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1339 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1352 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1359 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1371 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1387 struct dm_crypt_request *dmreq;
1396 dmreq = dmreq_of_req(cc, req);
1397 dmreq->iv_sector = ctx->cc_sector;
1399 dmreq->iv_sector >>= cc->sector_shift;
1400 dmreq->ctx = ctx;
1402 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1404 iv = iv_of_dmreq(cc, dmreq);
1405 org_iv = org_iv_of_dmreq(cc, dmreq);
1406 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1408 sector = org_sector_of_dmreq(cc, dmreq);
1412 sg_in = &dmreq->sg_in[0];
1413 sg_out = &dmreq->sg_out[0];
1426 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1448 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
2158 struct dm_crypt_request *dmreq = async_req->data;
2159 struct convert_context *ctx = dmreq->ctx;
2174 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2179 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
2184 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);