Lines Matching refs:qidev

74 	struct device *qidev;
859 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
919 struct device *qidev;
926 qidev = caam_ctx->qidev;
929 ecode = caam_jr_strstatus(qidev, status);
932 aead_unmap(qidev, edesc, aead_req);
948 struct device *qidev = ctx->qidev;
969 dev_err(qidev, "could not allocate extended descriptor\n");
979 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
985 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
988 dev_err(qidev, "unable to map source\n");
998 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1006 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1013 mapped_src_nents = dma_map_sg(qidev, req->src,
1016 dev_err(qidev, "unable to map source\n");
1025 mapped_dst_nents = dma_map_sg(qidev, req->dst,
1029 dev_err(qidev, "unable to map destination\n");
1030 dma_unmap_sg(qidev, req->src, src_nents,
1068 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1070 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1082 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1083 if (dma_mapping_error(qidev, iv_dma)) {
1084 dev_err(qidev, "unable to map IV\n");
1085 caam_unmap(qidev, req->src, req->dst, src_nents,
1100 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1102 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1103 dev_err(qidev, "unable to map assoclen\n");
1104 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1122 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1123 if (dma_mapping_error(qidev, qm_sg_dma)) {
1124 dev_err(qidev, "unable to map S/G table\n");
1125 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1126 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1177 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1181 aead_unmap(ctx->qidev, edesc, req);
1222 struct device *qidev = caam_ctx->qidev;
1226 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1231 ecode = caam_jr_strstatus(qidev, status);
1240 skcipher_unmap(qidev, edesc, req);
1259 struct device *qidev = ctx->qidev;
1278 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1286 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1291 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1294 dev_err(qidev, "unable to map source\n");
1298 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1301 dev_err(qidev, "unable to map destination\n");
1302 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1306 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1309 dev_err(qidev, "unable to map source\n");
1337 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1339 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1347 dev_err(qidev, "could not allocate extended descriptor\n");
1348 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1365 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
1366 if (dma_mapping_error(qidev, iv_dma)) {
1367 dev_err(qidev, "unable to map IV\n");
1368 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1385 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1387 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1388 dev_err(qidev, "unable to map S/G table\n");
1389 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1460 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1464 skcipher_unmap(ctx->qidev, edesc, req);
2496 ctx->qidev = dev;