Lines Matching defs:edesc

124 	struct ahash_edesc *edesc;
552 struct ahash_edesc *edesc,
557 if (edesc->src_nents)
558 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
560 if (edesc->sec4_sg_bytes)
561 dma_unmap_single(dev, edesc->sec4_sg_dma,
562 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
572 struct ahash_edesc *edesc,
581 ahash_unmap(dev, edesc, req, dst_len);
589 struct ahash_edesc *edesc;
599 edesc = state->edesc;
600 has_bklog = edesc->bklog;
605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
607 kfree(edesc);
640 struct ahash_edesc *edesc;
650 edesc = state->edesc;
651 has_bklog = edesc->bklog;
655 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
656 kfree(edesc);
709 struct ahash_edesc *edesc;
711 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
712 if (!edesc)
715 state->edesc = edesc;
717 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
720 return edesc;
724 struct ahash_edesc *edesc,
733 struct sec4_sg_entry *sg = edesc->sec4_sg;
745 edesc->sec4_sg_bytes = sgsize;
746 edesc->sec4_sg_dma = src_dma;
753 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
765 u32 *desc = state->edesc->hw_desc;
768 state->edesc->bklog = true;
776 ahash_unmap(jrdev, state->edesc, req, 0);
777 kfree(state->edesc);
793 struct ahash_edesc *edesc = state->edesc;
794 u32 *desc = edesc->hw_desc;
811 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
812 kfree(edesc);
832 struct ahash_edesc *edesc;
875 * allocate space for base edesc and hw desc commands,
878 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
880 if (!edesc) {
885 edesc->src_nents = src_nents;
886 edesc->sec4_sg_bytes = sec4_sg_bytes;
889 edesc->sec4_sg, DMA_BIDIRECTIONAL);
893 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
899 edesc->sec4_sg + sec4_sg_src_index,
902 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
905 desc = edesc->hw_desc;
907 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
910 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
916 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
939 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
940 kfree(edesc);
954 struct ahash_edesc *edesc;
960 /* allocate space for base edesc and hw desc commands, link tables */
961 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
963 if (!edesc)
966 desc = edesc->hw_desc;
968 edesc->sec4_sg_bytes = sec4_sg_bytes;
971 edesc->sec4_sg, DMA_BIDIRECTIONAL);
975 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
979 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
981 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
983 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
989 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
1000 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1001 kfree(edesc);
1016 struct ahash_edesc *edesc;
1038 /* allocate space for base edesc and hw desc commands, link tables */
1039 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1041 if (!edesc) {
1046 desc = edesc->hw_desc;
1048 edesc->src_nents = src_nents;
1051 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1055 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1059 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1074 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1075 kfree(edesc);
1088 struct ahash_edesc *edesc;
1110 /* allocate space for base edesc and hw desc commands, link tables */
1111 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1113 if (!edesc) {
1118 edesc->src_nents = src_nents;
1120 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1123 ahash_unmap(jrdev, edesc, req, digestsize);
1124 kfree(edesc);
1128 desc = edesc->hw_desc;
1132 ahash_unmap(jrdev, edesc, req, digestsize);
1133 kfree(edesc);
1156 struct ahash_edesc *edesc;
1159 /* allocate space for base edesc and hw desc commands, link tables */
1160 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1162 if (!edesc)
1165 desc = edesc->hw_desc;
1189 ahash_unmap(jrdev, edesc, req, digestsize);
1190 kfree(edesc);
1207 struct ahash_edesc *edesc;
1250 * allocate space for base edesc and hw desc commands,
1253 edesc = ahash_edesc_alloc(req, pad_nents,
1256 if (!edesc) {
1261 edesc->src_nents = src_nents;
1262 edesc->sec4_sg_bytes = sec4_sg_bytes;
1264 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1268 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1270 desc = edesc->hw_desc;
1272 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1275 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1281 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1310 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1311 kfree(edesc);
1326 struct ahash_edesc *edesc;
1350 /* allocate space for base edesc and hw desc commands, link tables */
1351 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1353 if (!edesc) {
1358 desc = edesc->hw_desc;
1360 edesc->src_nents = src_nents;
1361 edesc->sec4_sg_bytes = sec4_sg_bytes;
1363 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1367 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1385 ahash_unmap(jrdev, edesc, req, digestsize);
1386 kfree(edesc);
1405 struct ahash_edesc *edesc;
1442 * allocate space for base edesc and hw desc commands,
1445 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1449 if (!edesc) {
1454 edesc->src_nents = src_nents;
1456 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1461 desc = edesc->hw_desc;
1493 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1494 kfree(edesc);