Lines Matching refs:req

118 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
120 if (req->c_req.encrypt)
128 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
130 if (req->c_req.encrypt)
136 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
144 dev_err(req->ctx->dev, "alloc req id fail!\n");
148 req->qp_ctx = qp_ctx;
149 qp_ctx->req_list[req_id] = req;
154 static void sec_free_req_id(struct sec_req *req)
156 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
157 int req_id = req->req_id;
160 dev_err(req->ctx->dev, "free request id invalid!\n");
165 req->qp_ctx = NULL;
200 static int sec_cb_status_check(struct sec_req *req,
203 struct sec_ctx *ctx = req->ctx;
205 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
207 req->err_type, status->done);
237 struct sec_req *req;
243 req = qp_ctx->req_list[status.tag];
246 req = (void *)(uintptr_t)status.tag;
255 if (unlikely(!req)) {
261 req->err_type = status.err_type;
262 ctx = req->ctx;
263 err = sec_cb_status_check(req, &status);
269 ctx->req_op->buf_unmap(ctx, req);
271 ctx->req_op->callback(ctx, req, err);
274 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
276 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
281 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
285 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
288 list_add_tail(&req->backlog_head, &qp_ctx->backlog);
890 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
893 struct sec_aead_req *a_req = &req->aead_req;
895 struct sec_cipher_req *c_req = &req->c_req;
896 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
899 int req_id = req->req_id;
922 req->in_dma = qp_ctx->res[req_id].pbuf_dma;
923 c_req->c_out_dma = req->in_dma;
928 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
931 struct aead_request *aead_req = req->aead_req.aead_req;
932 struct sec_cipher_req *c_req = &req->c_req;
933 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
935 int req_id = req->req_id;
948 static int sec_aead_mac_init(struct sec_aead_req *req)
950 struct aead_request *aead_req = req->aead_req;
953 u8 *mac_out = req->out_mac;
968 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
971 struct sec_cipher_req *c_req = &req->c_req;
972 struct sec_aead_req *a_req = &req->aead_req;
973 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
974 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
978 if (req->use_pbuf) {
988 ret = sec_cipher_pbuf_map(ctx, req, src);
1001 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
1003 req->req_id,
1004 &req->in_dma);
1005 if (IS_ERR(req->in)) {
1007 return PTR_ERR(req->in);
1019 c_req->c_out = req->in;
1020 c_req->c_out_dma = req->in_dma;
1024 req->req_id,
1029 hisi_acc_sg_buf_unmap(dev, src, req->in);
1037 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1040 struct sec_cipher_req *c_req = &req->c_req;
1043 if (req->use_pbuf) {
1044 sec_cipher_pbuf_unmap(ctx, req, dst);
1047 hisi_acc_sg_buf_unmap(dev, src, req->in);
1053 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1055 struct skcipher_request *sq = req->c_req.sk_req;
1057 return sec_cipher_map(ctx, req, sq->src, sq->dst);
1060 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1062 struct skcipher_request *sq = req->c_req.sk_req;
1064 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1226 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1228 struct aead_request *aq = req->aead_req.aead_req;
1230 return sec_cipher_map(ctx, req, aq->src, aq->dst);
1233 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1235 struct aead_request *aq = req->aead_req.aead_req;
1237 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1240 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1244 ret = ctx->req_op->buf_map(ctx, req);
1248 ctx->req_op->do_transfer(ctx, req);
1250 ret = ctx->req_op->bd_fill(ctx, req);
1257 ctx->req_op->buf_unmap(ctx, req);
1261 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1263 ctx->req_op->buf_unmap(ctx, req);
1266 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1268 struct skcipher_request *sk_req = req->c_req.sk_req;
1269 struct sec_cipher_req *c_req = &req->c_req;
1274 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1277 struct sec_cipher_req *c_req = &req->c_req;
1278 struct sec_sqe *sec_sqe = &req->sec_sqe;
1287 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1304 if (req->use_pbuf) {
1314 if (req->in_dma != c_req->c_out_dma)
1320 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1325 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1327 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1329 struct sec_cipher_req *c_req = &req->c_req;
1337 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1355 if (req->use_pbuf) {
1364 if (req->in_dma != c_req->c_out_dma)
1371 sec_sqe3->tag = cpu_to_le64(req);
1387 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1389 struct aead_request *aead_req = req->aead_req.aead_req;
1390 struct skcipher_request *sk_req = req->c_req.sk_req;
1391 u32 iv_size = req->ctx->c_ctx.ivsize;
1397 if (req->c_req.encrypt)
1410 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1414 dev_err(req->ctx->dev, "copy output iv error!\n");
1441 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1444 struct skcipher_request *sk_req = req->c_req.sk_req;
1445 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1449 sec_free_req_id(req);
1453 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1454 sec_update_iv(req, SEC_SKCIPHER);
1469 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1471 struct aead_request *aead_req = req->aead_req.aead_req;
1472 struct sec_cipher_req *c_req = &req->c_req;
1473 struct sec_aead_req *a_req = &req->aead_req;
1512 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1514 struct aead_request *aead_req = req->aead_req.aead_req;
1517 struct sec_cipher_req *c_req = &req->c_req;
1518 struct sec_aead_req *a_req = &req->aead_req;
1529 set_aead_auth_iv(ctx, req);
1540 struct sec_req *req, struct sec_sqe *sec_sqe)
1542 struct sec_aead_req *a_req = &req->aead_req;
1566 struct sec_req *req, struct sec_sqe3 *sqe3)
1568 struct sec_aead_req *a_req = &req->aead_req;
1591 struct sec_req *req, struct sec_sqe *sec_sqe)
1593 struct sec_aead_req *a_req = &req->aead_req;
1594 struct sec_cipher_req *c_req = &req->c_req;
1623 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1626 struct sec_sqe *sec_sqe = &req->sec_sqe;
1629 ret = sec_skcipher_bd_fill(ctx, req);
1637 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1639 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1645 struct sec_req *req, struct sec_sqe3 *sqe3)
1647 struct sec_aead_req *a_req = &req->aead_req;
1648 struct sec_cipher_req *c_req = &req->c_req;
1678 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1681 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1684 ret = sec_skcipher_bd_fill_v3(ctx, req);
1692 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1693 req, sec_sqe3);
1695 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1696 req, sec_sqe3);
1701 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1703 struct aead_request *a_req = req->aead_req.aead_req;
1705 struct sec_aead_req *aead_req = &req->aead_req;
1706 struct sec_cipher_req *c_req = &req->c_req;
1708 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1714 sec_update_iv(req, SEC_AEAD);
1730 sec_free_req_id(req);
1745 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1747 sec_free_req_id(req);
1748 sec_free_queue_id(ctx, req);
1751 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1757 queue_id = sec_alloc_queue_id(ctx, req);
1760 req->req_id = sec_alloc_req_id(req, qp_ctx);
1761 if (unlikely(req->req_id < 0)) {
1762 sec_free_queue_id(ctx, req);
1763 return req->req_id;
1769 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1771 struct sec_cipher_req *c_req = &req->c_req;
1774 ret = sec_request_init(ctx, req);
1778 ret = sec_request_transfer(ctx, req);
1783 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1785 sec_update_iv(req, ctx->alg_type);
1787 ret = ctx->req_op->bd_send(ctx, req);
1789 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1798 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1800 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1803 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1807 sec_request_untransfer(ctx, req);
1809 sec_request_uninit(ctx, req);
2112 struct sec_req *req = skcipher_request_ctx(sk_req);
2122 req->flag = sk_req->base.flags;
2123 req->c_req.sk_req = sk_req;
2124 req->c_req.encrypt = encrypt;
2125 req->ctx = ctx;
2127 ret = sec_skcipher_param_check(ctx, req);
2134 return ctx->req_op->process(ctx, req);
2260 struct aead_request *req = sreq->aead_req.aead_req;
2261 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2267 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2268 req->assoclen > SEC_MAX_AAD_LEN)) {
2281 if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
2285 ret = aead_iv_demension_check(req);
2293 sreq->c_req.c_len = req->cryptlen;
2295 sreq->c_req.c_len = req->cryptlen - authsize;
2308 struct aead_request *req = sreq->aead_req.aead_req;
2309 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2314 if (unlikely(!req->src || !req->dst)) {
2320 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
2321 req->cryptlen <= authsize))) {
2336 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2383 struct sec_req *req = aead_request_ctx(a_req);
2387 req->flag = a_req->base.flags;
2388 req->aead_req.aead_req = a_req;
2389 req->c_req.encrypt = encrypt;
2390 req->ctx = ctx;
2392 ret = sec_aead_param_check(ctx, req);
2399 return ctx->req_op->process(ctx, req);