Lines Matching refs:ctx

69 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
72 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
73 ctx->hlf_q_num;
75 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
76 ctx->hlf_q_num;
79 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
82 atomic_dec(&ctx->enc_qcyclic);
84 atomic_dec(&ctx->dec_qcyclic);
97 dev_err(req->ctx->dev, "alloc req id fail!\n");
112 dev_err(req->ctx->dev, "free request id invalid!\n");
138 dev_err(req->ctx->dev, "aead verify failure!\n");
148 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
150 struct sec_ctx *ctx;
170 ctx = req->ctx;
175 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
176 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
177 dev_err_ratelimited(ctx->dev,
184 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
189 ctx->req_op->buf_unmap(ctx, req);
191 ctx->req_op->callback(ctx, req, err);
194 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
199 if (ctx->fake_req_limit <=
207 if (ctx->fake_req_limit <=
210 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
211 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
222 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
322 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
326 struct device *dev = ctx->dev;
333 if (ctx->alg_type == SEC_AEAD) {
338 if (ctx->pbuf_supported) {
348 if (ctx->alg_type == SEC_AEAD)
356 static void sec_alg_resource_free(struct sec_ctx *ctx,
359 struct device *dev = ctx->dev;
363 if (ctx->pbuf_supported)
365 if (ctx->alg_type == SEC_AEAD)
369 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
372 struct device *dev = ctx->dev;
377 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
378 qp = ctx->qps[qp_ctx_id];
383 qp_ctx->ctx = ctx;
403 ret = sec_alg_resource_alloc(ctx, qp_ctx);
414 sec_alg_resource_free(ctx, qp_ctx);
425 static void sec_release_qp_ctx(struct sec_ctx *ctx,
428 struct device *dev = ctx->dev;
431 sec_alg_resource_free(ctx, qp_ctx);
439 static int sec_ctx_base_init(struct sec_ctx *ctx)
444 ctx->qps = sec_create_qps();
445 if (!ctx->qps) {
450 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
451 ctx->sec = sec;
452 ctx->dev = &sec->qm.pdev->dev;
453 ctx->hlf_q_num = sec->ctx_q_num >> 1;
455 ctx->pbuf_supported = ctx->sec->iommu_used;
458 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
459 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
461 if (!ctx->qp_ctx) {
467 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
476 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
477 kfree(ctx->qp_ctx);
479 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
483 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
487 for (i = 0; i < ctx->sec->ctx_q_num; i++)
488 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
490 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
491 kfree(ctx->qp_ctx);
494 static int sec_cipher_init(struct sec_ctx *ctx)
496 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
498 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
506 static void sec_cipher_uninit(struct sec_ctx *ctx)
508 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
511 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
515 static int sec_auth_init(struct sec_ctx *ctx)
517 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
519 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
527 static void sec_auth_uninit(struct sec_ctx *ctx)
529 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
532 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
538 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
541 ctx->alg_type = SEC_SKCIPHER;
543 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
544 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
549 ret = sec_ctx_base_init(ctx);
553 ret = sec_cipher_init(ctx);
559 sec_ctx_base_uninit(ctx);
566 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
568 sec_cipher_uninit(ctx);
569 sec_ctx_base_uninit(ctx);
630 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
631 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
632 struct device *dev = ctx->dev;
685 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
691 struct device *dev = ctx->dev;
695 if (ctx->alg_type == SEC_AEAD)
720 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
726 struct device *dev = ctx->dev;
730 if (ctx->alg_type == SEC_AEAD)
743 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
750 struct device *dev = ctx->dev;
754 ret = sec_cipher_pbuf_map(ctx, req, src);
757 if (ctx->alg_type == SEC_AEAD) {
767 if (ctx->alg_type == SEC_AEAD) {
801 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
805 struct device *dev = ctx->dev;
808 sec_cipher_pbuf_unmap(ctx, req, dst);
817 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
821 return sec_cipher_map(ctx, req, sq->src, sq->dst);
824 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
828 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
853 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
856 struct crypto_shash *hash_tfm = ctx->hash_tfm;
867 keys->authkeylen, ctx->a_key);
872 ctx->a_key_len = blocksize;
874 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
875 ctx->a_key_len = keys->authkeylen;
887 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
888 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
889 struct device *dev = ctx->dev;
893 ctx->a_ctx.a_alg = a_alg;
894 ctx->c_ctx.c_alg = c_alg;
895 ctx->a_ctx.mac_len = mac_len;
907 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
935 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
939 return sec_cipher_map(ctx, req, aq->src, aq->dst);
942 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
946 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
949 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
953 ret = ctx->req_op->buf_map(ctx, req);
957 ctx->req_op->do_transfer(ctx, req);
959 ret = ctx->req_op->bd_fill(ctx, req);
966 ctx->req_op->buf_unmap(ctx, req);
971 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
973 ctx->req_op->buf_unmap(ctx, req);
976 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
981 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
984 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
986 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1040 u32 iv_size = req->ctx->c_ctx.ivsize;
1062 dev_err(req->ctx->dev, "copy output iv error!\n");
1065 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1071 if (ctx->fake_req_limit >=
1083 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1094 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
1098 backlog_req = sec_back_req_clear(ctx, qp_ctx);
1105 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1112 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1117 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1120 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1127 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1130 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1133 cpu_to_le32((u32)((ctx->a_key_len) /
1137 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1153 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1155 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1159 ret = sec_skcipher_bd_fill(ctx, req);
1161 dev_err(ctx->dev, "skcipher bd fill is error!\n");
1216 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1219 sec_free_queue_id(ctx, req);
1222 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1228 queue_id = sec_alloc_queue_id(ctx, req);
1229 qp_ctx = &ctx->qp_ctx[queue_id];
1233 sec_free_queue_id(ctx, req);
1240 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1245 ret = sec_request_init(ctx, req);
1249 ret = sec_request_transfer(ctx, req);
1254 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
1255 sec_update_iv(req, ctx->alg_type);
1257 ret = ctx->req_op->bd_send(ctx, req);
1260 dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1268 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1269 if (ctx->alg_type == SEC_SKCIPHER)
1271 ctx->c_ctx.ivsize);
1274 ctx->c_ctx.ivsize);
1277 sec_request_untransfer(ctx, req);
1279 sec_request_uninit(ctx, req);
1306 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1308 ctx->req_op = &sec_skcipher_req_ops;
1320 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1324 ctx->alg_type = SEC_AEAD;
1325 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1326 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1327 dev_err(ctx->dev, "get error aead iv size!\n");
1331 ctx->req_op = &sec_aead_req_ops;
1332 ret = sec_ctx_base_init(ctx);
1336 ret = sec_auth_init(ctx);
1340 ret = sec_cipher_init(ctx);
1347 sec_auth_uninit(ctx);
1349 sec_ctx_base_uninit(ctx);
1356 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1358 sec_cipher_uninit(ctx);
1359 sec_auth_uninit(ctx);
1360 sec_ctx_base_uninit(ctx);
1365 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1366 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1377 dev_err(ctx->dev, "aead alloc shash error!\n");
1387 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1389 crypto_free_shash(ctx->a_ctx.hash_tfm);
1408 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1411 struct device *dev = ctx->dev;
1412 u8 c_alg = ctx->c_ctx.c_alg;
1420 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
1447 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1456 req->ctx = ctx;
1458 ret = sec_skcipher_param_check(ctx, req);
1462 return ctx->req_op->process(ctx, req);
1532 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1537 struct device *dev = ctx->dev;
1538 u8 c_alg = ctx->c_ctx.c_alg;
1546 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
1575 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1581 req->ctx = ctx;
1583 ret = sec_aead_param_check(ctx, req);
1587 return ctx->req_op->process(ctx, req);