Lines Matching defs:edesc

894 		       struct aead_edesc *edesc,
900 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
901 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
902 edesc->qm_sg_bytes);
903 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
906 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
912 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
913 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
914 edesc->qm_sg_bytes);
920 struct aead_edesc *edesc;
931 edesc = container_of(drv_req, typeof(*edesc), drv_req);
932 aead_unmap(qidev, edesc, aead_req);
935 qi_cache_free(edesc);
953 struct aead_edesc *edesc;
966 /* allocate space for base edesc and hw desc commands, link tables */
967 edesc = qi_cache_alloc(flags);
968 if (unlikely(!edesc)) {
981 qi_cache_free(edesc);
989 qi_cache_free(edesc);
1000 qi_cache_free(edesc);
1008 qi_cache_free(edesc);
1017 qi_cache_free(edesc);
1032 qi_cache_free(edesc);
1064 sg_table = &edesc->sgt[0];
1072 qi_cache_free(edesc);
1087 qi_cache_free(edesc);
1092 edesc->src_nents = src_nents;
1093 edesc->dst_nents = dst_nents;
1094 edesc->iv_dma = iv_dma;
1095 edesc->drv_req.app_ctx = req;
1096 edesc->drv_req.cbk = aead_done;
1097 edesc->drv_req.drv_ctx = drv_ctx;
1099 edesc->assoclen = cpu_to_caam32(req->assoclen);
1100 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1102 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1106 qi_cache_free(edesc);
1110 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1125 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1128 qi_cache_free(edesc);
1132 edesc->qm_sg_dma = qm_sg_dma;
1133 edesc->qm_sg_bytes = qm_sg_bytes;
1139 fd_sgt = &edesc->drv_req.fd_sgt[0];
1158 return edesc;
1163 struct aead_edesc *edesc;
1172 edesc = aead_edesc_alloc(req, encrypt);
1173 if (IS_ERR(edesc))
1174 return PTR_ERR(edesc);
1177 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1181 aead_unmap(ctx->qidev, edesc, req);
1182 qi_cache_free(edesc);
1210 static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
1212 return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1218 struct skcipher_edesc *edesc;
1228 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1235 edesc->src_nents > 1 ? 100 : ivsize, 1);
1238 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1240 skcipher_unmap(qidev, edesc, req);
1248 memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
1250 qi_cache_free(edesc);
1263 struct skcipher_edesc *edesc;
1344 /* allocate space for base edesc, link tables and IV */
1345 edesc = qi_cache_alloc(flags);
1346 if (unlikely(!edesc)) {
1353 edesc->src_nents = src_nents;
1354 edesc->dst_nents = dst_nents;
1355 edesc->qm_sg_bytes = qm_sg_bytes;
1356 edesc->drv_req.app_ctx = req;
1357 edesc->drv_req.cbk = skcipher_done;
1358 edesc->drv_req.drv_ctx = drv_ctx;
1361 sg_table = &edesc->sgt[0];
1362 iv = skcipher_edesc_iv(edesc);
1370 qi_cache_free(edesc);
1374 edesc->iv_dma = iv_dma;
1385 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1387 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1391 qi_cache_free(edesc);
1395 fd_sgt = &edesc->drv_req.fd_sgt[0];
1397 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1401 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1405 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1409 return edesc;
1422 struct skcipher_edesc *edesc;
1456 edesc = skcipher_edesc_alloc(req, encrypt);
1457 if (IS_ERR(edesc))
1458 return PTR_ERR(edesc);
1460 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1464 skcipher_unmap(ctx->qidev, edesc, req);
1465 qi_cache_free(edesc);