Lines Matching defs:edesc
132 struct skcipher_edesc *edesc;
137 struct aead_edesc *edesc;
959 struct aead_edesc *edesc,
963 edesc->src_nents, edesc->dst_nents, 0, 0,
964 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
967 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
974 edesc->src_nents, edesc->dst_nents,
975 edesc->iv_dma, ivsize,
976 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
985 struct aead_edesc *edesc;
991 edesc = rctx->edesc;
992 has_bklog = edesc->bklog;
997 aead_unmap(jrdev, edesc, req);
999 kfree(edesc);
1011 static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
1014 return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1022 struct skcipher_edesc *edesc;
1032 edesc = rctx->edesc;
1033 has_bklog = edesc->bklog;
1037 skcipher_unmap(jrdev, edesc, req);
1045 memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
1054 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1056 kfree(edesc);
1072 struct aead_edesc *edesc,
1078 u32 *desc = edesc->hw_desc;
1092 src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1096 src_dma = edesc->sec4_sg_dma;
1097 sec4_sg_index += edesc->mapped_src_nents;
1108 if (!edesc->mapped_dst_nents) {
1111 } else if (edesc->mapped_dst_nents == 1) {
1115 dst_dma = edesc->sec4_sg_dma +
1133 struct aead_edesc *edesc,
1139 u32 *desc = edesc->hw_desc;
1143 init_aead_job(req, edesc, all_contig, encrypt);
1163 struct aead_edesc *edesc, bool all_contig,
1169 u32 *desc = edesc->hw_desc;
1172 init_aead_job(req, edesc, all_contig, encrypt);
1197 struct aead_edesc *edesc,
1210 u32 *desc = edesc->hw_desc;
1228 init_aead_job(req, edesc, all_contig, encrypt);
1250 struct skcipher_edesc *edesc,
1257 u32 *desc = edesc->hw_desc;
1266 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1270 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1278 if (ivsize || edesc->mapped_src_nents > 1) {
1279 src_dma = edesc->sec4_sg_dma;
1280 sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
1291 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1294 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1317 struct aead_edesc *edesc;
1398 /* allocate space for base edesc and hw desc commands, link tables */
1399 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
1400 if (!edesc) {
1406 edesc->src_nents = src_nents;
1407 edesc->dst_nents = dst_nents;
1408 edesc->mapped_src_nents = mapped_src_nents;
1409 edesc->mapped_dst_nents = mapped_dst_nents;
1410 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1413 rctx->edesc = edesc;
1420 edesc->sec4_sg + sec4_sg_index, 0);
1425 edesc->sec4_sg + sec4_sg_index, 0);
1429 return edesc;
1431 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1433 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1435 aead_unmap(jrdev, edesc, req);
1436 kfree(edesc);
1440 edesc->sec4_sg_bytes = sec4_sg_bytes;
1442 return edesc;
1449 struct aead_edesc *edesc = rctx->edesc;
1450 u32 *desc = edesc->hw_desc;
1465 aead_unmap(jrdev, edesc, req);
1466 kfree(rctx->edesc);
1474 struct aead_edesc *edesc;
1481 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1483 if (IS_ERR(edesc))
1484 return PTR_ERR(edesc);
1486 desc = edesc->hw_desc;
1488 init_chachapoly_job(req, edesc, all_contig, encrypt);
1508 struct aead_edesc *edesc;
1515 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1517 if (IS_ERR(edesc))
1518 return PTR_ERR(edesc);
1521 init_authenc_job(req, edesc, all_contig, encrypt);
1524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1525 desc_bytes(edesc->hw_desc), 1);
1545 u32 *desc = rctx->edesc->hw_desc;
1548 rctx->edesc->bklog = true;
1556 aead_unmap(ctx->jrdev, rctx->edesc, req);
1557 kfree(rctx->edesc);
1567 struct aead_edesc *edesc;
1574 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
1576 if (IS_ERR(edesc))
1577 return PTR_ERR(edesc);
1580 init_gcm_job(req, edesc, all_contig, encrypt);
1583 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1584 desc_bytes(edesc->hw_desc), 1);
1622 struct skcipher_edesc *edesc;
1699 * allocate space for base edesc and hw desc commands, link tables, IV
1701 aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
1706 edesc = kzalloc(aligned_size, flags);
1707 if (!edesc) {
1714 edesc->src_nents = src_nents;
1715 edesc->dst_nents = dst_nents;
1716 edesc->mapped_src_nents = mapped_src_nents;
1717 edesc->mapped_dst_nents = mapped_dst_nents;
1718 edesc->sec4_sg_bytes = sec4_sg_bytes;
1719 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1721 rctx->edesc = edesc;
1725 iv = skcipher_edesc_iv(edesc);
1733 kfree(edesc);
1737 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1740 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1744 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1748 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1752 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1756 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1759 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1763 kfree(edesc);
1768 edesc->iv_dma = iv_dma;
1771 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1774 return edesc;
1782 u32 *desc = rctx->edesc->hw_desc;
1785 rctx->edesc->bklog = true;
1793 skcipher_unmap(ctx->jrdev, rctx->edesc, req);
1794 kfree(rctx->edesc);
1812 struct skcipher_edesc *edesc;
1846 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1847 if (IS_ERR(edesc))
1848 return PTR_ERR(edesc);
1851 init_skcipher_job(req, edesc, encrypt);
1854 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1855 desc_bytes(edesc->hw_desc), 1);
1857 desc = edesc->hw_desc;
1870 skcipher_unmap(jrdev, edesc, req);
1871 kfree(edesc);