Lines Matching defs:edesc
361 struct aead_edesc *edesc;
369 /* allocate space for base edesc, link tables and IV */
370 edesc = qi_cache_zalloc(GFP_DMA | flags);
371 if (unlikely(!edesc)) {
384 qi_cache_free(edesc);
392 qi_cache_free(edesc);
401 qi_cache_free(edesc);
415 qi_cache_free(edesc);
429 qi_cache_free(edesc);
437 qi_cache_free(edesc);
467 sg_table = &edesc->sgt[0];
475 qi_cache_free(edesc);
490 qi_cache_free(edesc);
495 edesc->src_nents = src_nents;
496 edesc->dst_nents = dst_nents;
497 edesc->iv_dma = iv_dma;
505 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
507 edesc->assoclen = cpu_to_caam32(req->assoclen);
508 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
510 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
514 qi_cache_free(edesc);
518 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
536 qi_cache_free(edesc);
540 edesc->qm_sg_dma = qm_sg_dma;
541 edesc->qm_sg_bytes = qm_sg_bytes;
582 return edesc;
1118 struct skcipher_edesc *edesc;
1189 /* allocate space for base edesc, link tables and IV */
1190 edesc = qi_cache_zalloc(GFP_DMA | flags);
1191 if (unlikely(!edesc)) {
1199 sg_table = &edesc->sgt[0];
1208 qi_cache_free(edesc);
1212 edesc->src_nents = src_nents;
1213 edesc->dst_nents = dst_nents;
1214 edesc->iv_dma = iv_dma;
1215 edesc->qm_sg_bytes = qm_sg_bytes;
1226 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1228 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1232 qi_cache_free(edesc);
1242 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1247 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1250 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1253 return edesc;
1256 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1262 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1263 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1264 edesc->qm_sg_bytes);
1265 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1268 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1274 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1275 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1276 edesc->qm_sg_bytes);
1285 struct aead_edesc *edesc = req_ctx->edesc;
1295 aead_unmap(ctx->dev, edesc, req);
1296 qi_cache_free(edesc);
1306 struct aead_edesc *edesc = req_ctx->edesc;
1316 aead_unmap(ctx->dev, edesc, req);
1317 qi_cache_free(edesc);
1323 struct aead_edesc *edesc;
1330 edesc = aead_edesc_alloc(req, true);
1331 if (IS_ERR(edesc))
1332 return PTR_ERR(edesc);
1338 caam_req->edesc = edesc;
1342 aead_unmap(ctx->dev, edesc, req);
1343 qi_cache_free(edesc);
1351 struct aead_edesc *edesc;
1358 edesc = aead_edesc_alloc(req, false);
1359 if (IS_ERR(edesc))
1360 return PTR_ERR(edesc);
1366 caam_req->edesc = edesc;
1370 aead_unmap(ctx->dev, edesc, req);
1371 qi_cache_free(edesc);
1394 struct skcipher_edesc *edesc = req_ctx->edesc;
1405 edesc->src_nents > 1 ? 100 : ivsize, 1);
1408 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1410 skcipher_unmap(ctx->dev, edesc, req);
1418 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1421 qi_cache_free(edesc);
1432 struct skcipher_edesc *edesc = req_ctx->edesc;
1443 edesc->src_nents > 1 ? 100 : ivsize, 1);
1446 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1448 skcipher_unmap(ctx->dev, edesc, req);
1456 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1459 qi_cache_free(edesc);
1473 struct skcipher_edesc *edesc;
1502 edesc = skcipher_edesc_alloc(req);
1503 if (IS_ERR(edesc))
1504 return PTR_ERR(edesc);
1510 caam_req->edesc = edesc;
1514 skcipher_unmap(ctx->dev, edesc, req);
1515 qi_cache_free(edesc);
1523 struct skcipher_edesc *edesc;
1552 edesc = skcipher_edesc_alloc(req);
1553 if (IS_ERR(edesc))
1554 return PTR_ERR(edesc);
1560 caam_req->edesc = edesc;
1564 skcipher_unmap(ctx->dev, edesc, req);
1565 qi_cache_free(edesc);
3354 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3359 if (edesc->src_nents)
3360 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3362 if (edesc->qm_sg_bytes)
3363 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3374 struct ahash_edesc *edesc,
3383 ahash_unmap(dev, edesc, req);
3392 struct ahash_edesc *edesc = state->caam_req.edesc;
3402 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3404 qi_cache_free(edesc);
3419 struct ahash_edesc *edesc = state->caam_req.edesc;
3428 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3429 qi_cache_free(edesc);
3457 struct ahash_edesc *edesc = state->caam_req.edesc;
3467 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3469 qi_cache_free(edesc);
3484 struct ahash_edesc *edesc = state->caam_req.edesc;
3493 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3494 qi_cache_free(edesc);
3531 struct ahash_edesc *edesc;
3558 /* allocate space for base edesc and link tables */
3559 edesc = qi_cache_zalloc(GFP_DMA | flags);
3560 if (!edesc) {
3566 edesc->src_nents = src_nents;
3570 sg_table = &edesc->sgt[0];
3589 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3591 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3596 edesc->qm_sg_bytes = qm_sg_bytes;
3601 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3611 req_ctx->edesc = edesc;
3630 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3631 qi_cache_free(edesc);
3648 struct ahash_edesc *edesc;
3652 /* allocate space for base edesc and link tables */
3653 edesc = qi_cache_zalloc(GFP_DMA | flags);
3654 if (!edesc)
3658 sg_table = &edesc->sgt[0];
3671 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3673 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3678 edesc->qm_sg_bytes = qm_sg_bytes;
3683 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3693 req_ctx->edesc = edesc;
3701 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3702 qi_cache_free(edesc);
3720 struct ahash_edesc *edesc;
3741 /* allocate space for base edesc and link tables */
3742 edesc = qi_cache_zalloc(GFP_DMA | flags);
3743 if (!edesc) {
3748 edesc->src_nents = src_nents;
3752 sg_table = &edesc->sgt[0];
3765 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3767 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3772 edesc->qm_sg_bytes = qm_sg_bytes;
3777 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3787 req_ctx->edesc = edesc;
3795 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3796 qi_cache_free(edesc);
3812 struct ahash_edesc *edesc;
3834 /* allocate space for base edesc and link tables */
3835 edesc = qi_cache_zalloc(GFP_DMA | flags);
3836 if (!edesc) {
3841 edesc->src_nents = src_nents;
3846 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3850 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3852 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3856 edesc->qm_sg_bytes = qm_sg_bytes;
3858 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3883 req_ctx->edesc = edesc;
3890 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3891 qi_cache_free(edesc);
3908 struct ahash_edesc *edesc;
3911 /* allocate space for base edesc and link tables */
3912 edesc = qi_cache_zalloc(GFP_DMA | flags);
3913 if (!edesc)
3955 req_ctx->edesc = edesc;
3963 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3964 qi_cache_free(edesc);
3983 struct ahash_edesc *edesc;
4010 /* allocate space for base edesc and link tables */
4011 edesc = qi_cache_zalloc(GFP_DMA | flags);
4012 if (!edesc) {
4018 edesc->src_nents = src_nents;
4021 sg_table = &edesc->sgt[0];
4029 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4031 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4036 edesc->qm_sg_bytes = qm_sg_bytes;
4051 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4061 req_ctx->edesc = edesc;
4084 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4085 qi_cache_free(edesc);
4102 struct ahash_edesc *edesc;
4123 /* allocate space for base edesc and link tables */
4124 edesc = qi_cache_zalloc(GFP_DMA | flags);
4125 if (!edesc) {
4130 edesc->src_nents = src_nents;
4132 sg_table = &edesc->sgt[0];
4140 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4142 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4147 edesc->qm_sg_bytes = qm_sg_bytes;
4162 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4172 req_ctx->edesc = edesc;
4180 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4181 qi_cache_free(edesc);
4200 struct ahash_edesc *edesc;
4228 /* allocate space for base edesc and link tables */
4229 edesc = qi_cache_zalloc(GFP_DMA | flags);
4230 if (!edesc) {
4236 edesc->src_nents = src_nents;
4237 sg_table = &edesc->sgt[0];
4249 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4252 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4257 edesc->qm_sg_bytes = qm_sg_bytes;
4259 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4283 req_ctx->edesc = edesc;
4309 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4310 qi_cache_free(edesc);