Lines Matching defs:edesc

366 	struct aead_edesc *edesc;
374 /* allocate space for base edesc, link tables and IV */
375 edesc = qi_cache_zalloc(flags);
376 if (unlikely(!edesc)) {
389 qi_cache_free(edesc);
397 qi_cache_free(edesc);
406 qi_cache_free(edesc);
420 qi_cache_free(edesc);
434 qi_cache_free(edesc);
442 qi_cache_free(edesc);
472 sg_table = &edesc->sgt[0];
480 qi_cache_free(edesc);
495 qi_cache_free(edesc);
500 edesc->src_nents = src_nents;
501 edesc->dst_nents = dst_nents;
502 edesc->iv_dma = iv_dma;
510 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
512 edesc->assoclen = cpu_to_caam32(req->assoclen);
513 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
515 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
519 qi_cache_free(edesc);
523 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
538 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
541 qi_cache_free(edesc);
545 edesc->qm_sg_dma = qm_sg_dma;
546 edesc->qm_sg_bytes = qm_sg_bytes;
587 return edesc;
1123 struct skcipher_edesc *edesc;
1194 /* allocate space for base edesc, link tables and IV */
1195 edesc = qi_cache_zalloc(flags);
1196 if (unlikely(!edesc)) {
1204 sg_table = &edesc->sgt[0];
1213 qi_cache_free(edesc);
1217 edesc->src_nents = src_nents;
1218 edesc->dst_nents = dst_nents;
1219 edesc->iv_dma = iv_dma;
1220 edesc->qm_sg_bytes = qm_sg_bytes;
1231 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1233 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1237 qi_cache_free(edesc);
1247 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1252 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1255 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1258 return edesc;
1261 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1267 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1268 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1269 edesc->qm_sg_bytes);
1270 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1273 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1279 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1280 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1281 edesc->qm_sg_bytes);
1290 struct aead_edesc *edesc = req_ctx->edesc;
1300 aead_unmap(ctx->dev, edesc, req);
1301 qi_cache_free(edesc);
1311 struct aead_edesc *edesc = req_ctx->edesc;
1321 aead_unmap(ctx->dev, edesc, req);
1322 qi_cache_free(edesc);
1328 struct aead_edesc *edesc;
1335 edesc = aead_edesc_alloc(req, true);
1336 if (IS_ERR(edesc))
1337 return PTR_ERR(edesc);
1343 caam_req->edesc = edesc;
1347 aead_unmap(ctx->dev, edesc, req);
1348 qi_cache_free(edesc);
1356 struct aead_edesc *edesc;
1363 edesc = aead_edesc_alloc(req, false);
1364 if (IS_ERR(edesc))
1365 return PTR_ERR(edesc);
1371 caam_req->edesc = edesc;
1375 aead_unmap(ctx->dev, edesc, req);
1376 qi_cache_free(edesc);
1399 struct skcipher_edesc *edesc = req_ctx->edesc;
1410 edesc->src_nents > 1 ? 100 : ivsize, 1);
1413 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1415 skcipher_unmap(ctx->dev, edesc, req);
1423 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1426 qi_cache_free(edesc);
1437 struct skcipher_edesc *edesc = req_ctx->edesc;
1448 edesc->src_nents > 1 ? 100 : ivsize, 1);
1451 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1453 skcipher_unmap(ctx->dev, edesc, req);
1461 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1464 qi_cache_free(edesc);
1478 struct skcipher_edesc *edesc;
1507 edesc = skcipher_edesc_alloc(req);
1508 if (IS_ERR(edesc))
1509 return PTR_ERR(edesc);
1515 caam_req->edesc = edesc;
1519 skcipher_unmap(ctx->dev, edesc, req);
1520 qi_cache_free(edesc);
1528 struct skcipher_edesc *edesc;
1557 edesc = skcipher_edesc_alloc(req);
1558 if (IS_ERR(edesc))
1559 return PTR_ERR(edesc);
1565 caam_req->edesc = edesc;
1569 skcipher_unmap(ctx->dev, edesc, req);
1570 qi_cache_free(edesc);
3367 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3372 if (edesc->src_nents)
3373 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3375 if (edesc->qm_sg_bytes)
3376 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3387 struct ahash_edesc *edesc,
3396 ahash_unmap(dev, edesc, req);
3405 struct ahash_edesc *edesc = state->caam_req.edesc;
3415 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3417 qi_cache_free(edesc);
3432 struct ahash_edesc *edesc = state->caam_req.edesc;
3441 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3442 qi_cache_free(edesc);
3470 struct ahash_edesc *edesc = state->caam_req.edesc;
3480 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3482 qi_cache_free(edesc);
3497 struct ahash_edesc *edesc = state->caam_req.edesc;
3506 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3507 qi_cache_free(edesc);
3544 struct ahash_edesc *edesc;
3571 /* allocate space for base edesc and link tables */
3572 edesc = qi_cache_zalloc(flags);
3573 if (!edesc) {
3579 edesc->src_nents = src_nents;
3583 sg_table = &edesc->sgt[0];
3602 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3604 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3609 edesc->qm_sg_bytes = qm_sg_bytes;
3614 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3624 req_ctx->edesc = edesc;
3643 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3644 qi_cache_free(edesc);
3661 struct ahash_edesc *edesc;
3665 /* allocate space for base edesc and link tables */
3666 edesc = qi_cache_zalloc(flags);
3667 if (!edesc)
3671 sg_table = &edesc->sgt[0];
3684 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3686 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3691 edesc->qm_sg_bytes = qm_sg_bytes;
3696 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3706 req_ctx->edesc = edesc;
3714 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3715 qi_cache_free(edesc);
3733 struct ahash_edesc *edesc;
3754 /* allocate space for base edesc and link tables */
3755 edesc = qi_cache_zalloc(flags);
3756 if (!edesc) {
3761 edesc->src_nents = src_nents;
3765 sg_table = &edesc->sgt[0];
3778 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3780 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3785 edesc->qm_sg_bytes = qm_sg_bytes;
3790 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3800 req_ctx->edesc = edesc;
3808 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3809 qi_cache_free(edesc);
3825 struct ahash_edesc *edesc;
3847 /* allocate space for base edesc and link tables */
3848 edesc = qi_cache_zalloc(flags);
3849 if (!edesc) {
3854 edesc->src_nents = src_nents;
3859 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3863 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3865 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3869 edesc->qm_sg_bytes = qm_sg_bytes;
3871 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3896 req_ctx->edesc = edesc;
3903 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3904 qi_cache_free(edesc);
3921 struct ahash_edesc *edesc;
3924 /* allocate space for base edesc and link tables */
3925 edesc = qi_cache_zalloc(flags);
3926 if (!edesc)
3968 req_ctx->edesc = edesc;
3976 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3977 qi_cache_free(edesc);
3996 struct ahash_edesc *edesc;
4023 /* allocate space for base edesc and link tables */
4024 edesc = qi_cache_zalloc(flags);
4025 if (!edesc) {
4031 edesc->src_nents = src_nents;
4034 sg_table = &edesc->sgt[0];
4042 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4044 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4049 edesc->qm_sg_bytes = qm_sg_bytes;
4064 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4074 req_ctx->edesc = edesc;
4097 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4098 qi_cache_free(edesc);
4115 struct ahash_edesc *edesc;
4136 /* allocate space for base edesc and link tables */
4137 edesc = qi_cache_zalloc(flags);
4138 if (!edesc) {
4143 edesc->src_nents = src_nents;
4145 sg_table = &edesc->sgt[0];
4153 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4155 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4160 edesc->qm_sg_bytes = qm_sg_bytes;
4175 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4185 req_ctx->edesc = edesc;
4193 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4194 qi_cache_free(edesc);
4213 struct ahash_edesc *edesc;
4241 /* allocate space for base edesc and link tables */
4242 edesc = qi_cache_zalloc(flags);
4243 if (!edesc) {
4249 edesc->src_nents = src_nents;
4250 sg_table = &edesc->sgt[0];
4262 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4265 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4270 edesc->qm_sg_bytes = qm_sg_bytes;
4272 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4296 req_ctx->edesc = edesc;
4322 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4323 qi_cache_free(edesc);