Lines Matching refs:req
793 * @assoclen_dma: bus physical mapped address of req->assoclen
890 struct aead_request *req)
892 struct crypto_aead *aead = crypto_aead_reqtfm(req);
895 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
902 struct skcipher_request *req)
904 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
907 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
936 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
939 struct crypto_aead *aead = crypto_aead_reqtfm(req);
944 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
968 if (likely(req->src == req->dst)) {
969 src_len = req->assoclen + req->cryptlen +
972 src_nents = sg_nents_for_len(req->src, src_len);
980 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
988 src_len = req->assoclen + req->cryptlen;
991 src_nents = sg_nents_for_len(req->src, src_len);
999 dst_nents = sg_nents_for_len(req->dst, dst_len);
1008 mapped_src_nents = dma_map_sg(qidev, req->src,
1020 mapped_dst_nents = dma_map_sg(qidev, req->dst,
1025 dma_unmap_sg(qidev, req->src, src_nents,
1039 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1053 else if ((req->src == req->dst) && (mapped_src_nents > 1))
1065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1075 memcpy(iv, req->iv, ivsize);
1080 caam_unmap(qidev, req->src, req->dst, src_nents,
1090 edesc->drv_req.app_ctx = req;
1094 edesc->assoclen = cpu_to_caam32(req->assoclen);
1099 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1111 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
1115 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
1121 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1130 out_len = req->assoclen + req->cryptlen +
1132 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1137 if (req->dst == req->src) {
1139 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1146 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1156 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1159 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1167 edesc = aead_edesc_alloc(req, encrypt);
1176 aead_unmap(ctx->qidev, edesc, req);
1183 static int aead_encrypt(struct aead_request *req)
1185 return aead_crypt(req, true);
1188 static int aead_decrypt(struct aead_request *req)
1190 return aead_crypt(req, false);
1193 static int ipsec_gcm_encrypt(struct aead_request *req)
1195 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1199 static int ipsec_gcm_decrypt(struct aead_request *req)
1201 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1208 struct skcipher_request *req = drv_req->app_ctx;
1209 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1223 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1226 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1227 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1229 skcipher_unmap(qidev, edesc, req);
1232 * The crypto API expects us to set the IV (req->iv) to the last
1237 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1241 skcipher_request_complete(req, ecode);
1244 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1247 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1250 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1265 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1268 req->cryptlen);
1272 if (unlikely(req->src != req->dst)) {
1273 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1276 req->cryptlen);
1280 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1287 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1291 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1295 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1314 if (req->src != req->dst)
1324 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1333 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1341 memcpy(iv, req->iv, ivsize);
1346 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1356 edesc->drv_req.app_ctx = req;
1361 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1363 if (req->src != req->dst)
1364 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1373 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1382 ivsize + req->cryptlen, 0);
1384 if (req->src == req->dst)
1386 sizeof(*sg_table), req->cryptlen + ivsize,
1390 sizeof(*sg_table), req->cryptlen + ivsize,
1396 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1398 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1401 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1404 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1407 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1417 if (!req->cryptlen && !ctx->fallback)
1420 if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1422 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1426 req->base.flags,
1427 req->base.complete,
1428 req->base.data);
1429 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1430 req->dst, req->cryptlen, req->iv);
1440 edesc = skcipher_edesc_alloc(req, encrypt);
1448 skcipher_unmap(ctx->qidev, edesc, req);
1455 static int skcipher_encrypt(struct skcipher_request *req)
1457 return skcipher_crypt(req, true);
1460 static int skcipher_decrypt(struct skcipher_request *req)
1462 return skcipher_crypt(req, false);