Lines Matching refs:req
798 * @assoclen_dma: bus physical mapped address of req->assoclen
895 struct aead_request *req)
897 struct crypto_aead *aead = crypto_aead_reqtfm(req);
900 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
907 struct skcipher_request *req)
909 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
912 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
941 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
944 struct crypto_aead *aead = crypto_aead_reqtfm(req);
949 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
973 if (likely(req->src == req->dst)) {
974 src_len = req->assoclen + req->cryptlen +
977 src_nents = sg_nents_for_len(req->src, src_len);
985 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
993 src_len = req->assoclen + req->cryptlen;
996 src_nents = sg_nents_for_len(req->src, src_len);
1004 dst_nents = sg_nents_for_len(req->dst, dst_len);
1013 mapped_src_nents = dma_map_sg(qidev, req->src,
1025 mapped_dst_nents = dma_map_sg(qidev, req->dst,
1030 dma_unmap_sg(qidev, req->src, src_nents,
1044 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1058 else if ((req->src == req->dst) && (mapped_src_nents > 1))
1070 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1080 memcpy(iv, req->iv, ivsize);
1085 caam_unmap(qidev, req->src, req->dst, src_nents,
1095 edesc->drv_req.app_ctx = req;
1099 edesc->assoclen = cpu_to_caam32(req->assoclen);
1104 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1116 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
1120 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
1126 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1135 out_len = req->assoclen + req->cryptlen +
1137 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1142 if (req->dst == req->src) {
1144 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1151 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1161 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1164 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1172 edesc = aead_edesc_alloc(req, encrypt);
1181 aead_unmap(ctx->qidev, edesc, req);
1188 static int aead_encrypt(struct aead_request *req)
1190 return aead_crypt(req, true);
1193 static int aead_decrypt(struct aead_request *req)
1195 return aead_crypt(req, false);
1198 static int ipsec_gcm_encrypt(struct aead_request *req)
1200 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1204 static int ipsec_gcm_decrypt(struct aead_request *req)
1206 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1219 struct skcipher_request *req = drv_req->app_ctx;
1220 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1234 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1237 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1238 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1240 skcipher_unmap(qidev, edesc, req);
1243 * The crypto API expects us to set the IV (req->iv) to the last
1248 memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
1251 skcipher_request_complete(req, ecode);
1254 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1257 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1260 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1276 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1279 req->cryptlen);
1283 if (unlikely(req->src != req->dst)) {
1284 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1287 req->cryptlen);
1291 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1298 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1302 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1306 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1325 if (req->src != req->dst)
1339 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1348 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1356 edesc->drv_req.app_ctx = req;
1363 memcpy(iv, req->iv, ivsize);
1368 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1377 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1379 if (req->src != req->dst)
1380 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1389 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1398 ivsize + req->cryptlen, 0);
1400 if (req->src == req->dst)
1402 sizeof(*sg_table), req->cryptlen + ivsize,
1406 sizeof(*sg_table), req->cryptlen + ivsize,
1412 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1414 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1417 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1420 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1423 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1433 if (!req->cryptlen && !ctx->fallback)
1436 if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1438 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1442 req->base.flags,
1443 req->base.complete,
1444 req->base.data);
1445 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1446 req->dst, req->cryptlen, req->iv);
1456 edesc = skcipher_edesc_alloc(req, encrypt);
1464 skcipher_unmap(ctx->qidev, edesc, req);
1471 static int skcipher_encrypt(struct skcipher_request *req)
1473 return skcipher_crypt(req, true);
1476 static int skcipher_decrypt(struct skcipher_request *req)
1478 return skcipher_crypt(req, false);