Lines Matching refs:qidev
69 struct device *qidev;
854 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
914 struct device *qidev;
921 qidev = caam_ctx->qidev;
924 ecode = caam_jr_strstatus(qidev, status);
927 aead_unmap(qidev, edesc, aead_req);
943 struct device *qidev = ctx->qidev;
964 dev_err(qidev, "could not allocate extended descriptor\n");
974 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
980 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
983 dev_err(qidev, "unable to map source\n");
993 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1001 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1008 mapped_src_nents = dma_map_sg(qidev, req->src,
1011 dev_err(qidev, "unable to map source\n");
1020 mapped_dst_nents = dma_map_sg(qidev, req->dst,
1024 dev_err(qidev, "unable to map destination\n");
1025 dma_unmap_sg(qidev, req->src, src_nents,
1063 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1077 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1078 if (dma_mapping_error(qidev, iv_dma)) {
1079 dev_err(qidev, "unable to map IV\n");
1080 caam_unmap(qidev, req->src, req->dst, src_nents,
1095 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1097 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1098 dev_err(qidev, "unable to map assoclen\n");
1099 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1117 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1118 if (dma_mapping_error(qidev, qm_sg_dma)) {
1119 dev_err(qidev, "unable to map S/G table\n");
1120 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1121 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1172 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1176 aead_unmap(ctx->qidev, edesc, req);
1211 struct device *qidev = caam_ctx->qidev;
1215 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1220 ecode = caam_jr_strstatus(qidev, status);
1229 skcipher_unmap(qidev, edesc, req);
1249 struct device *qidev = ctx->qidev;
1267 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1275 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1280 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1283 dev_err(qidev, "unable to map source\n");
1287 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1290 dev_err(qidev, "unable to map destination\n");
1291 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1295 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1298 dev_err(qidev, "unable to map source\n");
1322 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1324 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1332 dev_err(qidev, "could not allocate extended descriptor\n");
1333 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1343 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
1344 if (dma_mapping_error(qidev, iv_dma)) {
1345 dev_err(qidev, "unable to map IV\n");
1346 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1369 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1371 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1372 dev_err(qidev, "unable to map S/G table\n");
1373 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1444 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1448 skcipher_unmap(ctx->qidev, edesc, req);
2480 ctx->qidev = dev;