Lines Matching defs:edesc
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
48 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
51 if (edesc->sec4_sg_bytes)
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
122 struct rsa_edesc *edesc;
129 edesc = req_ctx->edesc;
130 has_bklog = edesc->bklog;
132 rsa_pub_unmap(dev, edesc, req);
133 rsa_io_unmap(dev, edesc, req);
134 kfree(edesc);
155 struct rsa_edesc *edesc;
162 edesc = req_ctx->edesc;
163 has_bklog = edesc->bklog;
167 rsa_priv_f1_unmap(dev, edesc, req);
170 rsa_priv_f2_unmap(dev, edesc, req);
173 rsa_priv_f3_unmap(dev, edesc, req);
176 rsa_io_unmap(dev, edesc, req);
177 kfree(edesc);
250 struct rsa_edesc *edesc;
313 /* allocate space for base edesc, hw desc commands and link tables */
314 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
316 if (!edesc)
319 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
321 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
326 edesc->sec4_sg + !!diff_size, 0);
330 edesc->sec4_sg + sec4_sg_index, 0);
333 edesc->src_nents = src_nents;
334 edesc->dst_nents = dst_nents;
336 req_ctx->edesc = edesc;
339 return edesc;
341 edesc->mapped_src_nents = mapped_src_nents;
342 edesc->mapped_dst_nents = mapped_dst_nents;
344 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
346 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
351 edesc->sec4_sg_bytes = sec4_sg_bytes;
354 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
355 edesc->sec4_sg_bytes, 1);
357 return edesc;
360 kfree(edesc);
377 u32 *desc = req_ctx->edesc->hw_desc;
380 req_ctx->edesc->bklog = true;
385 rsa_pub_unmap(jrdev, req_ctx->edesc, req);
386 rsa_io_unmap(jrdev, req_ctx->edesc, req);
387 kfree(req_ctx->edesc);
396 struct rsa_edesc *edesc)
403 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
419 if (edesc->mapped_src_nents > 1) {
421 pdb->f_dma = edesc->sec4_sg_dma;
422 sec4_sg_index += edesc->mapped_src_nents;
427 if (edesc->mapped_dst_nents > 1) {
429 pdb->g_dma = edesc->sec4_sg_dma +
442 struct rsa_edesc *edesc)
448 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
464 if (edesc->mapped_src_nents > 1) {
466 pdb->g_dma = edesc->sec4_sg_dma;
467 sec4_sg_index += edesc->mapped_src_nents;
475 if (edesc->mapped_dst_nents > 1) {
477 pdb->f_dma = edesc->sec4_sg_dma +
489 struct rsa_edesc *edesc)
495 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
530 if (edesc->mapped_src_nents > 1) {
532 pdb->g_dma = edesc->sec4_sg_dma;
533 sec4_sg_index += edesc->mapped_src_nents;
540 if (edesc->mapped_dst_nents > 1) {
542 pdb->f_dma = edesc->sec4_sg_dma +
566 struct rsa_edesc *edesc)
572 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
619 if (edesc->mapped_src_nents > 1) {
621 pdb->g_dma = edesc->sec4_sg_dma;
622 sec4_sg_index += edesc->mapped_src_nents;
629 if (edesc->mapped_dst_nents > 1) {
631 pdb->f_dma = edesc->sec4_sg_dma +
668 struct rsa_edesc *edesc = req_ctx->edesc;
669 u32 *desc = edesc->hw_desc;
687 rsa_priv_f1_unmap(jrdev, edesc, req);
690 rsa_priv_f2_unmap(jrdev, edesc, req);
693 rsa_priv_f3_unmap(jrdev, edesc, req);
696 rsa_pub_unmap(jrdev, edesc, req);
698 rsa_io_unmap(jrdev, edesc, req);
699 kfree(edesc);
711 struct rsa_edesc *edesc;
724 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
725 if (IS_ERR(edesc))
726 return PTR_ERR(edesc);
729 ret = set_rsa_pub_pdb(req, edesc);
734 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
739 rsa_io_unmap(jrdev, edesc, req);
740 kfree(edesc);
749 struct rsa_edesc *edesc;
753 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
754 if (IS_ERR(edesc))
755 return PTR_ERR(edesc);
758 ret = set_rsa_priv_f1_pdb(req, edesc);
763 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
768 rsa_io_unmap(jrdev, edesc, req);
769 kfree(edesc);
778 struct rsa_edesc *edesc;
782 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
783 if (IS_ERR(edesc))
784 return PTR_ERR(edesc);
787 ret = set_rsa_priv_f2_pdb(req, edesc);
792 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
797 rsa_io_unmap(jrdev, edesc, req);
798 kfree(edesc);
807 struct rsa_edesc *edesc;
811 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
812 if (IS_ERR(edesc))
813 return PTR_ERR(edesc);
816 ret = set_rsa_priv_f3_pdb(req, edesc);
821 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
826 rsa_io_unmap(jrdev, edesc, req);
827 kfree(edesc);