Lines Matching defs:edesc
119 struct ahash_edesc *edesc;
543 struct ahash_edesc *edesc,
548 if (edesc->src_nents)
549 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
551 if (edesc->sec4_sg_bytes)
552 dma_unmap_single(dev, edesc->sec4_sg_dma,
553 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
563 struct ahash_edesc *edesc,
572 ahash_unmap(dev, edesc, req, dst_len);
580 struct ahash_edesc *edesc;
590 edesc = state->edesc;
591 has_bklog = edesc->bklog;
596 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
598 kfree(edesc);
631 struct ahash_edesc *edesc;
641 edesc = state->edesc;
642 has_bklog = edesc->bklog;
646 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
647 kfree(edesc);
702 struct ahash_edesc *edesc;
705 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
706 if (!edesc) {
711 state->edesc = edesc;
713 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
716 return edesc;
720 struct ahash_edesc *edesc,
729 struct sec4_sg_entry *sg = edesc->sec4_sg;
741 edesc->sec4_sg_bytes = sgsize;
742 edesc->sec4_sg_dma = src_dma;
749 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
761 u32 *desc = state->edesc->hw_desc;
764 state->edesc->bklog = true;
769 ahash_unmap(jrdev, state->edesc, req, 0);
770 kfree(state->edesc);
786 struct ahash_edesc *edesc = state->edesc;
787 u32 *desc = edesc->hw_desc;
804 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
805 kfree(edesc);
825 struct ahash_edesc *edesc;
868 * allocate space for base edesc and hw desc commands,
871 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
873 if (!edesc) {
878 edesc->src_nents = src_nents;
879 edesc->sec4_sg_bytes = sec4_sg_bytes;
882 edesc->sec4_sg, DMA_BIDIRECTIONAL);
886 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
892 edesc->sec4_sg + sec4_sg_src_index,
895 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
898 desc = edesc->hw_desc;
900 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
903 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
909 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
932 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
933 kfree(edesc);
947 struct ahash_edesc *edesc;
953 /* allocate space for base edesc and hw desc commands, link tables */
954 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
956 if (!edesc)
959 desc = edesc->hw_desc;
961 edesc->sec4_sg_bytes = sec4_sg_bytes;
964 edesc->sec4_sg, DMA_BIDIRECTIONAL);
968 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
972 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
974 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
976 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
982 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
993 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
994 kfree(edesc);
1009 struct ahash_edesc *edesc;
1031 /* allocate space for base edesc and hw desc commands, link tables */
1032 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1034 if (!edesc) {
1039 desc = edesc->hw_desc;
1041 edesc->src_nents = src_nents;
1044 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1048 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1052 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1067 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1068 kfree(edesc);
1081 struct ahash_edesc *edesc;
1103 /* allocate space for base edesc and hw desc commands, link tables */
1104 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1106 if (!edesc) {
1111 edesc->src_nents = src_nents;
1113 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1116 ahash_unmap(jrdev, edesc, req, digestsize);
1117 kfree(edesc);
1121 desc = edesc->hw_desc;
1125 ahash_unmap(jrdev, edesc, req, digestsize);
1126 kfree(edesc);
1149 struct ahash_edesc *edesc;
1152 /* allocate space for base edesc and hw desc commands, link tables */
1153 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1155 if (!edesc)
1158 desc = edesc->hw_desc;
1182 ahash_unmap(jrdev, edesc, req, digestsize);
1183 kfree(edesc);
1200 struct ahash_edesc *edesc;
1243 * allocate space for base edesc and hw desc commands,
1246 edesc = ahash_edesc_alloc(req, pad_nents,
1249 if (!edesc) {
1254 edesc->src_nents = src_nents;
1255 edesc->sec4_sg_bytes = sec4_sg_bytes;
1257 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1261 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1263 desc = edesc->hw_desc;
1265 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1268 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1274 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1303 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1304 kfree(edesc);
1319 struct ahash_edesc *edesc;
1343 /* allocate space for base edesc and hw desc commands, link tables */
1344 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1346 if (!edesc) {
1351 desc = edesc->hw_desc;
1353 edesc->src_nents = src_nents;
1354 edesc->sec4_sg_bytes = sec4_sg_bytes;
1356 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1360 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1378 ahash_unmap(jrdev, edesc, req, digestsize);
1379 kfree(edesc);
1398 struct ahash_edesc *edesc;
1435 * allocate space for base edesc and hw desc commands,
1438 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1442 if (!edesc) {
1447 edesc->src_nents = src_nents;
1449 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1454 desc = edesc->hw_desc;
1486 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1487 kfree(edesc);