Lines Matching defs:edesc
124 struct skcipher_edesc *edesc;
129 struct aead_edesc *edesc;
950 struct aead_edesc *edesc,
954 edesc->src_nents, edesc->dst_nents, 0, 0,
955 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
958 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
965 edesc->src_nents, edesc->dst_nents,
966 edesc->iv_dma, ivsize,
967 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
976 struct aead_edesc *edesc;
982 edesc = rctx->edesc;
983 has_bklog = edesc->bklog;
988 aead_unmap(jrdev, edesc, req);
990 kfree(edesc);
1006 struct skcipher_edesc *edesc;
1016 edesc = rctx->edesc;
1017 has_bklog = edesc->bklog;
1021 skcipher_unmap(jrdev, edesc, req);
1029 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1039 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1041 kfree(edesc);
1057 struct aead_edesc *edesc,
1063 u32 *desc = edesc->hw_desc;
1077 src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1081 src_dma = edesc->sec4_sg_dma;
1082 sec4_sg_index += edesc->mapped_src_nents;
1093 if (!edesc->mapped_dst_nents) {
1096 } else if (edesc->mapped_dst_nents == 1) {
1100 dst_dma = edesc->sec4_sg_dma +
1118 struct aead_edesc *edesc,
1124 u32 *desc = edesc->hw_desc;
1128 init_aead_job(req, edesc, all_contig, encrypt);
1148 struct aead_edesc *edesc, bool all_contig,
1154 u32 *desc = edesc->hw_desc;
1157 init_aead_job(req, edesc, all_contig, encrypt);
1182 struct aead_edesc *edesc,
1194 u32 *desc = edesc->hw_desc;
1212 init_aead_job(req, edesc, all_contig, encrypt);
1234 struct skcipher_edesc *edesc,
1241 u32 *desc = edesc->hw_desc;
1250 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1254 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1262 if (ivsize || edesc->mapped_src_nents > 1) {
1263 src_dma = edesc->sec4_sg_dma;
1264 sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
1275 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1278 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1301 struct aead_edesc *edesc;
1382 /* allocate space for base edesc and hw desc commands, link tables */
1383 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1385 if (!edesc) {
1391 edesc->src_nents = src_nents;
1392 edesc->dst_nents = dst_nents;
1393 edesc->mapped_src_nents = mapped_src_nents;
1394 edesc->mapped_dst_nents = mapped_dst_nents;
1395 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1398 rctx->edesc = edesc;
1405 edesc->sec4_sg + sec4_sg_index, 0);
1410 edesc->sec4_sg + sec4_sg_index, 0);
1414 return edesc;
1416 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1418 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1420 aead_unmap(jrdev, edesc, req);
1421 kfree(edesc);
1425 edesc->sec4_sg_bytes = sec4_sg_bytes;
1427 return edesc;
1434 struct aead_edesc *edesc = rctx->edesc;
1435 u32 *desc = edesc->hw_desc;
1450 aead_unmap(jrdev, edesc, req);
1451 kfree(rctx->edesc);
1459 struct aead_edesc *edesc;
1466 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1468 if (IS_ERR(edesc))
1469 return PTR_ERR(edesc);
1471 desc = edesc->hw_desc;
1473 init_chachapoly_job(req, edesc, all_contig, encrypt);
1493 struct aead_edesc *edesc;
1500 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1502 if (IS_ERR(edesc))
1503 return PTR_ERR(edesc);
1506 init_authenc_job(req, edesc, all_contig, encrypt);
1509 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1510 desc_bytes(edesc->hw_desc), 1);
1530 u32 *desc = rctx->edesc->hw_desc;
1533 rctx->edesc->bklog = true;
1538 aead_unmap(ctx->jrdev, rctx->edesc, req);
1539 kfree(rctx->edesc);
1549 struct aead_edesc *edesc;
1556 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
1558 if (IS_ERR(edesc))
1559 return PTR_ERR(edesc);
1562 init_gcm_job(req, edesc, all_contig, encrypt);
1565 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1566 desc_bytes(edesc->hw_desc), 1);
1604 struct skcipher_edesc *edesc;
1680 * allocate space for base edesc and hw desc commands, link tables, IV
1682 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1684 if (!edesc) {
1691 edesc->src_nents = src_nents;
1692 edesc->dst_nents = dst_nents;
1693 edesc->mapped_src_nents = mapped_src_nents;
1694 edesc->mapped_dst_nents = mapped_dst_nents;
1695 edesc->sec4_sg_bytes = sec4_sg_bytes;
1696 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1698 rctx->edesc = edesc;
1702 iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
1710 kfree(edesc);
1714 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1717 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1721 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1725 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1729 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1733 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1736 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1740 kfree(edesc);
1745 edesc->iv_dma = iv_dma;
1748 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1751 return edesc;
1759 u32 *desc = rctx->edesc->hw_desc;
1762 rctx->edesc->bklog = true;
1767 skcipher_unmap(ctx->jrdev, rctx->edesc, req);
1768 kfree(rctx->edesc);
1786 struct skcipher_edesc *edesc;
1820 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1821 if (IS_ERR(edesc))
1822 return PTR_ERR(edesc);
1825 init_skcipher_job(req, edesc, encrypt);
1828 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1829 desc_bytes(edesc->hw_desc), 1);
1831 desc = edesc->hw_desc;
1844 skcipher_unmap(jrdev, edesc, req);
1845 kfree(edesc);