Lines Matching defs:edesc
889 struct aead_edesc *edesc,
895 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
896 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
897 edesc->qm_sg_bytes);
898 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
901 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
907 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
908 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
909 edesc->qm_sg_bytes);
915 struct aead_edesc *edesc;
926 edesc = container_of(drv_req, typeof(*edesc), drv_req);
927 aead_unmap(qidev, edesc, aead_req);
930 qi_cache_free(edesc);
948 struct aead_edesc *edesc;
961 /* allocate space for base edesc and hw desc commands, link tables */
962 edesc = qi_cache_alloc(GFP_DMA | flags);
963 if (unlikely(!edesc)) {
976 qi_cache_free(edesc);
984 qi_cache_free(edesc);
995 qi_cache_free(edesc);
1003 qi_cache_free(edesc);
1012 qi_cache_free(edesc);
1027 qi_cache_free(edesc);
1059 sg_table = &edesc->sgt[0];
1067 qi_cache_free(edesc);
1082 qi_cache_free(edesc);
1087 edesc->src_nents = src_nents;
1088 edesc->dst_nents = dst_nents;
1089 edesc->iv_dma = iv_dma;
1090 edesc->drv_req.app_ctx = req;
1091 edesc->drv_req.cbk = aead_done;
1092 edesc->drv_req.drv_ctx = drv_ctx;
1094 edesc->assoclen = cpu_to_caam32(req->assoclen);
1095 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1097 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1101 qi_cache_free(edesc);
1105 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1120 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1123 qi_cache_free(edesc);
1127 edesc->qm_sg_dma = qm_sg_dma;
1128 edesc->qm_sg_bytes = qm_sg_bytes;
1134 fd_sgt = &edesc->drv_req.fd_sgt[0];
1153 return edesc;
1158 struct aead_edesc *edesc;
1167 edesc = aead_edesc_alloc(req, encrypt);
1168 if (IS_ERR_OR_NULL(edesc))
1169 return PTR_ERR(edesc);
1172 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1176 aead_unmap(ctx->qidev, edesc, req);
1177 qi_cache_free(edesc);
1207 struct skcipher_edesc *edesc;
1217 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1224 edesc->src_nents > 1 ? 100 : ivsize, 1);
1227 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1229 skcipher_unmap(qidev, edesc, req);
1237 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1240 qi_cache_free(edesc);
1253 struct skcipher_edesc *edesc;
1329 /* allocate space for base edesc, link tables and IV */
1330 edesc = qi_cache_alloc(GFP_DMA | flags);
1331 if (unlikely(!edesc)) {
1339 sg_table = &edesc->sgt[0];
1348 qi_cache_free(edesc);
1352 edesc->src_nents = src_nents;
1353 edesc->dst_nents = dst_nents;
1354 edesc->iv_dma = iv_dma;
1355 edesc->qm_sg_bytes = qm_sg_bytes;
1356 edesc->drv_req.app_ctx = req;
1357 edesc->drv_req.cbk = skcipher_done;
1358 edesc->drv_req.drv_ctx = drv_ctx;
1369 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1371 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1375 qi_cache_free(edesc);
1379 fd_sgt = &edesc->drv_req.fd_sgt[0];
1381 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1385 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1389 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1393 return edesc;
1406 struct skcipher_edesc *edesc;
1440 edesc = skcipher_edesc_alloc(req, encrypt);
1441 if (IS_ERR(edesc))
1442 return PTR_ERR(edesc);
1444 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1448 skcipher_unmap(ctx->qidev, edesc, req);
1449 qi_cache_free(edesc);