1// SPDX-License-Identifier: GPL-2.0 2/* Copyright (c) 2019 HiSilicon Limited. */ 3 4#include <crypto/aes.h> 5#include <crypto/algapi.h> 6#include <crypto/authenc.h> 7#include <crypto/des.h> 8#include <crypto/hash.h> 9#include <crypto/internal/aead.h> 10#include <crypto/sha.h> 11#include <crypto/skcipher.h> 12#include <crypto/xts.h> 13#include <linux/crypto.h> 14#include <linux/dma-mapping.h> 15#include <linux/idr.h> 16 17#include "sec.h" 18#include "sec_crypto.h" 19 20#define SEC_PRIORITY 4001 21#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 22#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 23#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 24#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 25 26/* SEC sqe(bd) bit operational relative MACRO */ 27#define SEC_DE_OFFSET 1 28#define SEC_CIPHER_OFFSET 4 29#define SEC_SCENE_OFFSET 3 30#define SEC_DST_SGL_OFFSET 2 31#define SEC_SRC_SGL_OFFSET 7 32#define SEC_CKEY_OFFSET 9 33#define SEC_CMODE_OFFSET 12 34#define SEC_AKEY_OFFSET 5 35#define SEC_AEAD_ALG_OFFSET 11 36#define SEC_AUTH_OFFSET 6 37 38#define SEC_FLAG_OFFSET 7 39#define SEC_FLAG_MASK 0x0780 40#define SEC_TYPE_MASK 0x0F 41#define SEC_DONE_MASK 0x0001 42 43#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) 44#define SEC_SGL_SGE_NR 128 45#define SEC_CIPHER_AUTH 0xfe 46#define SEC_AUTH_CIPHER 0x1 47#define SEC_MAX_MAC_LEN 64 48#define SEC_MAX_AAD_LEN 65535 49#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) 50 51#define SEC_PBUF_SZ 512 52#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ 53#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) 54#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 55 SEC_MAX_MAC_LEN * 2) 56#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 57#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) 58#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ 59 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) 60#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ 61 SEC_PBUF_LEFT_SZ) 62 63#define SEC_SQE_LEN_RATE 4 64#define SEC_SQE_CFLAG 2 65#define SEC_SQE_AEAD_FLAG 3 66#define SEC_SQE_DONE 0x1 67 68/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 69static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 70{ 71 if (req->c_req.encrypt) 72 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 73 ctx->hlf_q_num; 74 75 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 76 ctx->hlf_q_num; 77} 78 79static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 80{ 81 if (req->c_req.encrypt) 82 atomic_dec(&ctx->enc_qcyclic); 83 else 84 atomic_dec(&ctx->dec_qcyclic); 85} 86 87static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 88{ 89 int req_id; 90 91 spin_lock_bh(&qp_ctx->req_lock); 92 93 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 94 0, QM_Q_DEPTH, GFP_ATOMIC); 95 spin_unlock_bh(&qp_ctx->req_lock); 96 if (unlikely(req_id < 0)) { 97 dev_err(req->ctx->dev, "alloc req id fail!\n"); 98 return req_id; 99 } 100 101 req->qp_ctx = qp_ctx; 102 qp_ctx->req_list[req_id] = req; 103 return req_id; 104} 105 106static void sec_free_req_id(struct sec_req *req) 107{ 108 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 109 int req_id = req->req_id; 110 111 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { 112 dev_err(req->ctx->dev, "free request id invalid!\n"); 113 return; 114 } 115 116 qp_ctx->req_list[req_id] = NULL; 117 req->qp_ctx = NULL; 118 119 spin_lock_bh(&qp_ctx->req_lock); 120 idr_remove(&qp_ctx->req_idr, req_id); 121 spin_unlock_bh(&qp_ctx->req_lock); 122} 123 124static int sec_aead_verify(struct sec_req *req) 125{ 126 struct aead_request *aead_req = req->aead_req.aead_req; 127 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 128 size_t authsize = crypto_aead_authsize(tfm); 129 u8 *mac_out = req->aead_req.out_mac; 130 u8 *mac = mac_out + SEC_MAX_MAC_LEN; 131 struct scatterlist *sgl = aead_req->src; 132 size_t sz; 133 134 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, 135 aead_req->cryptlen + aead_req->assoclen - 136 authsize); 137 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { 138 dev_err(req->ctx->dev, "aead verify failure!\n"); 139 return -EBADMSG; 140 } 141 142 return 0; 143} 144 145static void sec_req_cb(struct hisi_qp *qp, void *resp) 146{ 147 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 148 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; 149 struct sec_sqe *bd = resp; 150 struct sec_ctx *ctx; 151 struct sec_req *req; 152 u16 done, flag; 153 int err = 0; 154 u8 type; 155 156 type = bd->type_cipher_auth & SEC_TYPE_MASK; 157 if (unlikely(type != SEC_BD_TYPE2)) { 158 atomic64_inc(&dfx->err_bd_cnt); 159 pr_err("err bd type [%d]\n", type); 160 return; 161 } 162 163 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; 164 if (unlikely(!req)) { 165 atomic64_inc(&dfx->invalid_req_cnt); 166 atomic_inc(&qp->qp_status.used); 167 return; 168 } 169 req->err_type = bd->type2.error_type; 170 ctx = req->ctx; 171 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 172 flag = (le16_to_cpu(bd->type2.done_flag) & 173 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 174 if (unlikely(req->err_type || done != SEC_SQE_DONE || 175 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || 176 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { 177 dev_err_ratelimited(ctx->dev, 178 "err_type[%d],done[%d],flag[%d]\n", 179 req->err_type, done, flag); 180 err = -EIO; 181 atomic64_inc(&dfx->done_flag_cnt); 182 } 183 184 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) 185 err = sec_aead_verify(req); 186 187 atomic64_inc(&dfx->recv_cnt); 188 189 ctx->req_op->buf_unmap(ctx, req); 190 191 ctx->req_op->callback(ctx, req, err); 192} 193 194static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 195{ 196 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 197 int ret; 198 199 if (ctx->fake_req_limit <= 200 atomic_read(&qp_ctx->qp->qp_status.used) && 201 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) 202 return -EBUSY; 203 204 spin_lock_bh(&qp_ctx->req_lock); 205 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 206 207 if (ctx->fake_req_limit <= 208 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { 209 list_add_tail(&req->backlog_head, &qp_ctx->backlog); 210 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 211 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); 212 spin_unlock_bh(&qp_ctx->req_lock); 213 return -EBUSY; 214 } 215 spin_unlock_bh(&qp_ctx->req_lock); 216 217 if (unlikely(ret == -EBUSY)) 218 return -ENOBUFS; 219 220 if (likely(!ret)) { 221 ret = -EINPROGRESS; 222 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 223 } 224 225 return ret; 226} 227 228/* Get DMA memory resources */ 229static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 230{ 231 int i; 232 233 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 234 &res->c_ivin_dma, GFP_KERNEL); 235 if (!res->c_ivin) 236 return -ENOMEM; 237 238 for (i = 1; i < QM_Q_DEPTH; i++) { 239 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 240 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 241 } 242 243 return 0; 244} 245 246static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 247{ 248 if (res->c_ivin) 249 dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 250 res->c_ivin, res->c_ivin_dma); 251} 252 253static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 254{ 255 int i; 256 257 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 258 &res->out_mac_dma, GFP_KERNEL); 259 if (!res->out_mac) 260 return -ENOMEM; 261 262 for (i = 1; i < QM_Q_DEPTH; i++) { 263 res[i].out_mac_dma = res->out_mac_dma + 264 i * (SEC_MAX_MAC_LEN << 1); 265 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); 266 } 267 268 return 0; 269} 270 271static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 272{ 273 if (res->out_mac) 274 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 275 res->out_mac, res->out_mac_dma); 276} 277 278static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 279{ 280 if (res->pbuf) 281 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, 282 res->pbuf, res->pbuf_dma); 283} 284 285/* 286 * To improve performance, pbuffer is used for 287 * small packets (< 512Bytes) as IOMMU translation using. 288 */ 289static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 290{ 291 int pbuf_page_offset; 292 int i, j, k; 293 294 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, 295 &res->pbuf_dma, GFP_KERNEL); 296 if (!res->pbuf) 297 return -ENOMEM; 298 299 /* 300 * SEC_PBUF_PKG contains data pbuf, iv and 301 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> 302 * Every PAGE contains six SEC_PBUF_PKG 303 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG 304 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 305 * for the SEC_TOTAL_PBUF_SZ 306 */ 307 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { 308 pbuf_page_offset = PAGE_SIZE * i; 309 for (j = 0; j < SEC_PBUF_NUM; j++) { 310 k = i * SEC_PBUF_NUM + j; 311 if (k == QM_Q_DEPTH) 312 break; 313 res[k].pbuf = res->pbuf + 314 j * SEC_PBUF_PKG + pbuf_page_offset; 315 res[k].pbuf_dma = res->pbuf_dma + 316 j * SEC_PBUF_PKG + pbuf_page_offset; 317 } 318 } 319 return 0; 320} 321 322static int sec_alg_resource_alloc(struct sec_ctx *ctx, 323 struct sec_qp_ctx *qp_ctx) 324{ 325 struct sec_alg_res *res = qp_ctx->res; 326 struct device *dev = ctx->dev; 327 int ret; 328 329 ret = sec_alloc_civ_resource(dev, res); 330 if (ret) 331 return ret; 332 333 if (ctx->alg_type == SEC_AEAD) { 334 ret = sec_alloc_mac_resource(dev, res); 335 if (ret) 336 goto alloc_fail; 337 } 338 if (ctx->pbuf_supported) { 339 ret = sec_alloc_pbuf_resource(dev, res); 340 if (ret) { 341 dev_err(dev, "fail to alloc pbuf dma resource!\n"); 342 goto alloc_pbuf_fail; 343 } 344 } 345 346 return 0; 347alloc_pbuf_fail: 348 if (ctx->alg_type == SEC_AEAD) 349 sec_free_mac_resource(dev, qp_ctx->res); 350alloc_fail: 351 sec_free_civ_resource(dev, res); 352 353 return ret; 354} 355 356static void sec_alg_resource_free(struct sec_ctx *ctx, 357 struct sec_qp_ctx *qp_ctx) 358{ 359 struct device *dev = ctx->dev; 360 361 sec_free_civ_resource(dev, qp_ctx->res); 362 363 if (ctx->pbuf_supported) 364 sec_free_pbuf_resource(dev, qp_ctx->res); 365 if (ctx->alg_type == SEC_AEAD) 366 sec_free_mac_resource(dev, qp_ctx->res); 367} 368 369static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 370 int qp_ctx_id, int alg_type) 371{ 372 struct device *dev = ctx->dev; 373 struct sec_qp_ctx *qp_ctx; 374 struct hisi_qp *qp; 375 int ret = -ENOMEM; 376 377 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 378 qp = ctx->qps[qp_ctx_id]; 379 qp->req_type = 0; 380 qp->qp_ctx = qp_ctx; 381 qp->req_cb = sec_req_cb; 382 qp_ctx->qp = qp; 383 qp_ctx->ctx = ctx; 384 385 spin_lock_init(&qp_ctx->req_lock); 386 idr_init(&qp_ctx->req_idr); 387 INIT_LIST_HEAD(&qp_ctx->backlog); 388 389 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 390 SEC_SGL_SGE_NR); 391 if (IS_ERR(qp_ctx->c_in_pool)) { 392 dev_err(dev, "fail to create sgl pool for input!\n"); 393 goto err_destroy_idr; 394 } 395 396 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 397 SEC_SGL_SGE_NR); 398 if (IS_ERR(qp_ctx->c_out_pool)) { 399 dev_err(dev, "fail to create sgl pool for output!\n"); 400 goto err_free_c_in_pool; 401 } 402 403 ret = sec_alg_resource_alloc(ctx, qp_ctx); 404 if (ret) 405 goto err_free_c_out_pool; 406 407 ret = hisi_qm_start_qp(qp, 0); 408 if (ret < 0) 409 goto err_queue_free; 410 411 return 0; 412 413err_queue_free: 414 sec_alg_resource_free(ctx, qp_ctx); 415err_free_c_out_pool: 416 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 417err_free_c_in_pool: 418 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 419err_destroy_idr: 420 idr_destroy(&qp_ctx->req_idr); 421 422 return ret; 423} 424 425static void sec_release_qp_ctx(struct sec_ctx *ctx, 426 struct sec_qp_ctx *qp_ctx) 427{ 428 struct device *dev = ctx->dev; 429 430 hisi_qm_stop_qp(qp_ctx->qp); 431 sec_alg_resource_free(ctx, qp_ctx); 432 433 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 434 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 435 436 idr_destroy(&qp_ctx->req_idr); 437} 438 439static int sec_ctx_base_init(struct sec_ctx *ctx) 440{ 441 struct sec_dev *sec; 442 int i, ret; 443 444 ctx->qps = sec_create_qps(); 445 if (!ctx->qps) { 446 pr_err("Can not create sec qps!\n"); 447 return -ENODEV; 448 } 449 450 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); 451 ctx->sec = sec; 452 ctx->dev = &sec->qm.pdev->dev; 453 ctx->hlf_q_num = sec->ctx_q_num >> 1; 454 455 ctx->pbuf_supported = ctx->sec->iommu_used; 456 457 /* Half of queue depth is taken as fake requests limit in the queue. */ 458 ctx->fake_req_limit = QM_Q_DEPTH >> 1; 459 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 460 GFP_KERNEL); 461 if (!ctx->qp_ctx) { 462 ret = -ENOMEM; 463 goto err_destroy_qps; 464 } 465 466 for (i = 0; i < sec->ctx_q_num; i++) { 467 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); 468 if (ret) 469 goto err_sec_release_qp_ctx; 470 } 471 472 return 0; 473 474err_sec_release_qp_ctx: 475 for (i = i - 1; i >= 0; i--) 476 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 477 kfree(ctx->qp_ctx); 478err_destroy_qps: 479 sec_destroy_qps(ctx->qps, sec->ctx_q_num); 480 return ret; 481} 482 483static void sec_ctx_base_uninit(struct sec_ctx *ctx) 484{ 485 int i; 486 487 for (i = 0; i < ctx->sec->ctx_q_num; i++) 488 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 489 490 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); 491 kfree(ctx->qp_ctx); 492} 493 494static int sec_cipher_init(struct sec_ctx *ctx) 495{ 496 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 497 498 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, 499 &c_ctx->c_key_dma, GFP_KERNEL); 500 if (!c_ctx->c_key) 501 return -ENOMEM; 502 503 return 0; 504} 505 506static void sec_cipher_uninit(struct sec_ctx *ctx) 507{ 508 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 509 510 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); 511 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, 512 c_ctx->c_key, c_ctx->c_key_dma); 513} 514 515static int sec_auth_init(struct sec_ctx *ctx) 516{ 517 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 518 519 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, 520 &a_ctx->a_key_dma, GFP_KERNEL); 521 if (!a_ctx->a_key) 522 return -ENOMEM; 523 524 return 0; 525} 526 527static void sec_auth_uninit(struct sec_ctx *ctx) 528{ 529 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 530 531 memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); 532 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, 533 a_ctx->a_key, a_ctx->a_key_dma); 534} 535 536static int sec_skcipher_init(struct crypto_skcipher *tfm) 537{ 538 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 539 int ret; 540 541 ctx->alg_type = SEC_SKCIPHER; 542 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 543 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); 544 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 545 pr_err("get error skcipher iv size!\n"); 546 return -EINVAL; 547 } 548 549 ret = sec_ctx_base_init(ctx); 550 if (ret) 551 return ret; 552 553 ret = sec_cipher_init(ctx); 554 if (ret) 555 goto err_cipher_init; 556 557 return 0; 558err_cipher_init: 559 sec_ctx_base_uninit(ctx); 560 561 return ret; 562} 563 564static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 565{ 566 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 567 568 sec_cipher_uninit(ctx); 569 sec_ctx_base_uninit(ctx); 570} 571 572static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, 573 const u32 keylen, 574 const enum sec_cmode c_mode) 575{ 576 switch (keylen) { 577 case SEC_DES3_2KEY_SIZE: 578 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 579 break; 580 case SEC_DES3_3KEY_SIZE: 581 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 582 break; 583 default: 584 return -EINVAL; 585 } 586 587 return 0; 588} 589 590static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 591 const u32 keylen, 592 const enum sec_cmode c_mode) 593{ 594 if (c_mode == SEC_CMODE_XTS) { 595 switch (keylen) { 596 case SEC_XTS_MIN_KEY_SIZE: 597 c_ctx->c_key_len = SEC_CKEY_128BIT; 598 break; 599 case SEC_XTS_MAX_KEY_SIZE: 600 c_ctx->c_key_len = SEC_CKEY_256BIT; 601 break; 602 default: 603 pr_err("hisi_sec2: xts mode key error!\n"); 604 return -EINVAL; 605 } 606 } else { 607 switch (keylen) { 608 case AES_KEYSIZE_128: 609 c_ctx->c_key_len = SEC_CKEY_128BIT; 610 break; 611 case AES_KEYSIZE_192: 612 c_ctx->c_key_len = SEC_CKEY_192BIT; 613 break; 614 case AES_KEYSIZE_256: 615 c_ctx->c_key_len = SEC_CKEY_256BIT; 616 break; 617 default: 618 pr_err("hisi_sec2: aes key error!\n"); 619 return -EINVAL; 620 } 621 } 622 623 return 0; 624} 625 626static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 627 const u32 keylen, const enum sec_calg c_alg, 628 const enum sec_cmode c_mode) 629{ 630 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 631 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 632 struct device *dev = ctx->dev; 633 int ret; 634 635 if (c_mode == SEC_CMODE_XTS) { 636 ret = xts_verify_key(tfm, key, keylen); 637 if (ret) { 638 dev_err(dev, "xts mode key err!\n"); 639 return ret; 640 } 641 } 642 643 c_ctx->c_alg = c_alg; 644 c_ctx->c_mode = c_mode; 645 646 switch (c_alg) { 647 case SEC_CALG_3DES: 648 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); 649 break; 650 case SEC_CALG_AES: 651 case SEC_CALG_SM4: 652 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 653 break; 654 default: 655 return -EINVAL; 656 } 657 658 if (ret) { 659 dev_err(dev, "set sec key err!\n"); 660 return ret; 661 } 662 663 memcpy(c_ctx->c_key, key, keylen); 664 665 return 0; 666} 667 668#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 669static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 670 u32 keylen) \ 671{ \ 672 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 673} 674 675GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 676GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 677GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 678 679GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 680GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 681 682GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 683GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 684 685static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, 686 struct scatterlist *src) 687{ 688 struct aead_request *aead_req = req->aead_req.aead_req; 689 struct sec_cipher_req *c_req = &req->c_req; 690 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 691 struct device *dev = ctx->dev; 692 int copy_size, pbuf_length; 693 int req_id = req->req_id; 694 695 if (ctx->alg_type == SEC_AEAD) 696 copy_size = aead_req->cryptlen + aead_req->assoclen; 697 else 698 copy_size = c_req->c_len; 699 700 pbuf_length = sg_copy_to_buffer(src, sg_nents(src), 701 qp_ctx->res[req_id].pbuf, 702 copy_size); 703 if (unlikely(pbuf_length != copy_size)) { 704 dev_err(dev, "copy src data to pbuf error!\n"); 705 return -EINVAL; 706 } 707 708 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; 709 710 if (!c_req->c_in_dma) { 711 dev_err(dev, "fail to set pbuffer address!\n"); 712 return -ENOMEM; 713 } 714 715 c_req->c_out_dma = c_req->c_in_dma; 716 717 return 0; 718} 719 720static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, 721 struct scatterlist *dst) 722{ 723 struct aead_request *aead_req = req->aead_req.aead_req; 724 struct sec_cipher_req *c_req = &req->c_req; 725 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 726 struct device *dev = ctx->dev; 727 int copy_size, pbuf_length; 728 int req_id = req->req_id; 729 730 if (ctx->alg_type == SEC_AEAD) 731 copy_size = c_req->c_len + aead_req->assoclen; 732 else 733 copy_size = c_req->c_len; 734 735 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), 736 qp_ctx->res[req_id].pbuf, 737 copy_size); 738 if (unlikely(pbuf_length != copy_size)) 739 dev_err(dev, "copy pbuf data to dst error!\n"); 740 741} 742 743static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, 744 struct scatterlist *src, struct scatterlist *dst) 745{ 746 struct sec_cipher_req *c_req = &req->c_req; 747 struct sec_aead_req *a_req = &req->aead_req; 748 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 749 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; 750 struct device *dev = ctx->dev; 751 int ret; 752 753 if (req->use_pbuf) { 754 ret = sec_cipher_pbuf_map(ctx, req, src); 755 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; 756 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; 757 if (ctx->alg_type == SEC_AEAD) { 758 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; 759 a_req->out_mac_dma = res->pbuf_dma + 760 SEC_PBUF_MAC_OFFSET; 761 } 762 763 return ret; 764 } 765 c_req->c_ivin = res->c_ivin; 766 c_req->c_ivin_dma = res->c_ivin_dma; 767 if (ctx->alg_type == SEC_AEAD) { 768 a_req->out_mac = res->out_mac; 769 a_req->out_mac_dma = res->out_mac_dma; 770 } 771 772 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 773 qp_ctx->c_in_pool, 774 req->req_id, 775 &c_req->c_in_dma); 776 777 if (IS_ERR(c_req->c_in)) { 778 dev_err(dev, "fail to dma map input sgl buffers!\n"); 779 return PTR_ERR(c_req->c_in); 780 } 781 782 if (dst == src) { 783 c_req->c_out = c_req->c_in; 784 c_req->c_out_dma = c_req->c_in_dma; 785 } else { 786 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 787 qp_ctx->c_out_pool, 788 req->req_id, 789 &c_req->c_out_dma); 790 791 if (IS_ERR(c_req->c_out)) { 792 dev_err(dev, "fail to dma map output sgl buffers!\n"); 793 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 794 return PTR_ERR(c_req->c_out); 795 } 796 } 797 798 return 0; 799} 800 801static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, 802 struct scatterlist *src, struct scatterlist *dst) 803{ 804 struct sec_cipher_req *c_req = &req->c_req; 805 struct device *dev = ctx->dev; 806 807 if (req->use_pbuf) { 808 sec_cipher_pbuf_unmap(ctx, req, dst); 809 } else { 810 if (dst != src) 811 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 812 813 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); 814 } 815} 816 817static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 818{ 819 struct skcipher_request *sq = req->c_req.sk_req; 820 821 return sec_cipher_map(ctx, req, sq->src, sq->dst); 822} 823 824static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 825{ 826 struct skcipher_request *sq = req->c_req.sk_req; 827 828 sec_cipher_unmap(ctx, req, sq->src, sq->dst); 829} 830 831static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, 832 struct crypto_authenc_keys *keys) 833{ 834 switch (keys->enckeylen) { 835 case AES_KEYSIZE_128: 836 c_ctx->c_key_len = SEC_CKEY_128BIT; 837 break; 838 case AES_KEYSIZE_192: 839 c_ctx->c_key_len = SEC_CKEY_192BIT; 840 break; 841 case AES_KEYSIZE_256: 842 c_ctx->c_key_len = SEC_CKEY_256BIT; 843 break; 844 default: 845 pr_err("hisi_sec2: aead aes key error!\n"); 846 return -EINVAL; 847 } 848 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); 849 850 return 0; 851} 852 853static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, 854 struct crypto_authenc_keys *keys) 855{ 856 struct crypto_shash *hash_tfm = ctx->hash_tfm; 857 int blocksize, ret; 858 859 if (!keys->authkeylen) { 860 pr_err("hisi_sec2: aead auth key error!\n"); 861 return -EINVAL; 862 } 863 864 blocksize = crypto_shash_blocksize(hash_tfm); 865 if (keys->authkeylen > blocksize) { 866 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, 867 keys->authkeylen, ctx->a_key); 868 if (ret) { 869 pr_err("hisi_sec2: aead auth digest error!\n"); 870 return -EINVAL; 871 } 872 ctx->a_key_len = blocksize; 873 } else { 874 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); 875 ctx->a_key_len = keys->authkeylen; 876 } 877 878 return 0; 879} 880 881static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, 882 const u32 keylen, const enum sec_hash_alg a_alg, 883 const enum sec_calg c_alg, 884 const enum sec_mac_len mac_len, 885 const enum sec_cmode c_mode) 886{ 887 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 888 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 889 struct device *dev = ctx->dev; 890 struct crypto_authenc_keys keys; 891 int ret; 892 893 ctx->a_ctx.a_alg = a_alg; 894 ctx->c_ctx.c_alg = c_alg; 895 ctx->a_ctx.mac_len = mac_len; 896 c_ctx->c_mode = c_mode; 897 898 if (crypto_authenc_extractkeys(&keys, key, keylen)) 899 goto bad_key; 900 901 ret = sec_aead_aes_set_key(c_ctx, &keys); 902 if (ret) { 903 dev_err(dev, "set sec cipher key err!\n"); 904 goto bad_key; 905 } 906 907 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); 908 if (ret) { 909 dev_err(dev, "set sec auth key err!\n"); 910 goto bad_key; 911 } 912 913 return 0; 914bad_key: 915 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); 916 917 return -EINVAL; 918} 919 920 921#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ 922static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ 923 u32 keylen) \ 924{ \ 925 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ 926} 927 928GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, 929 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) 930GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, 931 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) 932GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, 933 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) 934 935static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 936{ 937 struct aead_request *aq = req->aead_req.aead_req; 938 939 return sec_cipher_map(ctx, req, aq->src, aq->dst); 940} 941 942static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 943{ 944 struct aead_request *aq = req->aead_req.aead_req; 945 946 sec_cipher_unmap(ctx, req, aq->src, aq->dst); 947} 948 949static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 950{ 951 int ret; 952 953 ret = ctx->req_op->buf_map(ctx, req); 954 if (unlikely(ret)) 955 return ret; 956 957 ctx->req_op->do_transfer(ctx, req); 958 959 ret = ctx->req_op->bd_fill(ctx, req); 960 if (unlikely(ret)) 961 goto unmap_req_buf; 962 963 return ret; 964 965unmap_req_buf: 966 ctx->req_op->buf_unmap(ctx, req); 967 968 return ret; 969} 970 971static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 972{ 973 ctx->req_op->buf_unmap(ctx, req); 974} 975 976static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 977{ 978 struct skcipher_request *sk_req = req->c_req.sk_req; 979 struct sec_cipher_req *c_req = &req->c_req; 980 981 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 982} 983 984static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 985{ 986 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 987 struct sec_cipher_req *c_req = &req->c_req; 988 struct sec_sqe *sec_sqe = &req->sec_sqe; 989 u8 scene, sa_type, da_type; 990 u8 bd_type, cipher; 991 u8 de = 0; 992 993 memset(sec_sqe, 0, sizeof(struct sec_sqe)); 994 995 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 996 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 997 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); 998 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 999 1000 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 1001 SEC_CMODE_OFFSET); 1002 sec_sqe->type2.c_alg = c_ctx->c_alg; 1003 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1004 SEC_CKEY_OFFSET); 1005 1006 bd_type = SEC_BD_TYPE2; 1007 if (c_req->encrypt) 1008 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 1009 else 1010 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 1011 sec_sqe->type_cipher_auth = bd_type | cipher; 1012 1013 if (req->use_pbuf) 1014 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; 1015 else 1016 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 1017 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 1018 if (c_req->c_in_dma != c_req->c_out_dma) 1019 de = 0x1 << SEC_DE_OFFSET; 1020 1021 sec_sqe->sds_sa_type = (de | scene | sa_type); 1022 1023 /* Just set DST address type */ 1024 if (req->use_pbuf) 1025 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; 1026 else 1027 da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 1028 sec_sqe->sdm_addr_type |= da_type; 1029 1030 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 1031 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 1032 1033 return 0; 1034} 1035 1036static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) 1037{ 1038 struct aead_request *aead_req = req->aead_req.aead_req; 1039 struct skcipher_request *sk_req = req->c_req.sk_req; 1040 u32 iv_size = req->ctx->c_ctx.ivsize; 1041 struct scatterlist *sgl; 1042 unsigned int cryptlen; 1043 size_t sz; 1044 u8 *iv; 1045 1046 if (req->c_req.encrypt) 1047 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; 1048 else 1049 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; 1050 1051 if (alg_type == SEC_SKCIPHER) { 1052 iv = sk_req->iv; 1053 cryptlen = sk_req->cryptlen; 1054 } else { 1055 iv = aead_req->iv; 1056 cryptlen = aead_req->cryptlen; 1057 } 1058 1059 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, 1060 cryptlen - iv_size); 1061 if (unlikely(sz != iv_size)) 1062 dev_err(req->ctx->dev, "copy output iv error!\n"); 1063} 1064 1065static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, 1066 struct sec_qp_ctx *qp_ctx) 1067{ 1068 struct sec_req *backlog_req = NULL; 1069 1070 spin_lock_bh(&qp_ctx->req_lock); 1071 if (ctx->fake_req_limit >= 1072 atomic_read(&qp_ctx->qp->qp_status.used) && 1073 !list_empty(&qp_ctx->backlog)) { 1074 backlog_req = list_first_entry(&qp_ctx->backlog, 1075 typeof(*backlog_req), backlog_head); 1076 list_del(&backlog_req->backlog_head); 1077 } 1078 spin_unlock_bh(&qp_ctx->req_lock); 1079 1080 return backlog_req; 1081} 1082 1083static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, 1084 int err) 1085{ 1086 struct skcipher_request *sk_req = req->c_req.sk_req; 1087 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1088 struct skcipher_request *backlog_sk_req; 1089 struct sec_req *backlog_req; 1090 1091 sec_free_req_id(req); 1092 1093 /* IV output at encrypto of CBC mode */ 1094 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) 1095 sec_update_iv(req, SEC_SKCIPHER); 1096 1097 while (1) { 1098 backlog_req = sec_back_req_clear(ctx, qp_ctx); 1099 if (!backlog_req) 1100 break; 1101 1102 backlog_sk_req = backlog_req->c_req.sk_req; 1103 backlog_sk_req->base.complete(&backlog_sk_req->base, 1104 -EINPROGRESS); 1105 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); 1106 } 1107 1108 1109 sk_req->base.complete(&sk_req->base, err); 1110} 1111 1112static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 1113{ 1114 struct aead_request *aead_req = req->aead_req.aead_req; 1115 struct sec_cipher_req *c_req = &req->c_req; 1116 1117 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); 1118} 1119 1120static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, 1121 struct sec_req *req, struct sec_sqe *sec_sqe) 1122{ 1123 struct sec_aead_req *a_req = &req->aead_req; 1124 struct sec_cipher_req *c_req = &req->c_req; 1125 struct aead_request *aq = a_req->aead_req; 1126 1127 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); 1128 1129 sec_sqe->type2.mac_key_alg = 1130 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); 1131 1132 sec_sqe->type2.mac_key_alg |= 1133 cpu_to_le32((u32)((ctx->a_key_len) / 1134 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); 1135 1136 sec_sqe->type2.mac_key_alg |= 1137 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); 1138 1139 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; 1140 1141 if (dir) 1142 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1143 else 1144 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1145 1146 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); 1147 1148 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1149 1150 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1151} 1152 1153static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1154{ 1155 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1156 struct sec_sqe *sec_sqe = &req->sec_sqe; 1157 int ret; 1158 1159 ret = sec_skcipher_bd_fill(ctx, req); 1160 if (unlikely(ret)) { 1161 dev_err(ctx->dev, "skcipher bd fill is error!\n"); 1162 return ret; 1163 } 1164 1165 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1166 1167 return 0; 1168} 1169 1170static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) 1171{ 1172 struct aead_request *a_req = req->aead_req.aead_req; 1173 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1174 struct sec_aead_req *aead_req = &req->aead_req; 1175 struct sec_cipher_req *c_req = &req->c_req; 1176 size_t authsize = crypto_aead_authsize(tfm); 1177 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1178 struct aead_request *backlog_aead_req; 1179 struct sec_req *backlog_req; 1180 size_t sz; 1181 1182 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) 1183 sec_update_iv(req, SEC_AEAD); 1184 1185 /* Copy output mac */ 1186 if (!err && c_req->encrypt) { 1187 struct scatterlist *sgl = a_req->dst; 1188 1189 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), 1190 aead_req->out_mac, 1191 authsize, a_req->cryptlen + 1192 a_req->assoclen); 1193 1194 if (unlikely(sz != authsize)) { 1195 dev_err(c->dev, "copy out mac err!\n"); 1196 err = -EINVAL; 1197 } 1198 } 1199 1200 sec_free_req_id(req); 1201 1202 while (1) { 1203 backlog_req = sec_back_req_clear(c, qp_ctx); 1204 if (!backlog_req) 1205 break; 1206 1207 backlog_aead_req = backlog_req->aead_req.aead_req; 1208 backlog_aead_req->base.complete(&backlog_aead_req->base, 1209 -EINPROGRESS); 1210 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); 1211 } 1212 1213 a_req->base.complete(&a_req->base, err); 1214} 1215 1216static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 1217{ 1218 sec_free_req_id(req); 1219 sec_free_queue_id(ctx, req); 1220} 1221 1222static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 1223{ 1224 struct sec_qp_ctx *qp_ctx; 1225 int queue_id; 1226 1227 /* To load balance */ 1228 queue_id = sec_alloc_queue_id(ctx, req); 1229 qp_ctx = &ctx->qp_ctx[queue_id]; 1230 1231 req->req_id = sec_alloc_req_id(req, qp_ctx); 1232 if (unlikely(req->req_id < 0)) { 1233 sec_free_queue_id(ctx, req); 1234 return req->req_id; 1235 } 1236 1237 return 0; 1238} 1239 1240static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 1241{ 1242 struct sec_cipher_req *c_req = &req->c_req; 1243 int ret; 1244 1245 ret = sec_request_init(ctx, req); 1246 if (unlikely(ret)) 1247 return ret; 1248 1249 ret = sec_request_transfer(ctx, req); 1250 if (unlikely(ret)) 1251 goto err_uninit_req; 1252 1253 /* Output IV as decrypto */ 1254 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) 1255 sec_update_iv(req, ctx->alg_type); 1256 1257 ret = ctx->req_op->bd_send(ctx, req); 1258 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || 1259 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1260 dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); 1261 goto err_send_req; 1262 } 1263 1264 return ret; 1265 1266err_send_req: 1267 /* As failing, restore the IV from user */ 1268 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { 1269 if (ctx->alg_type == SEC_SKCIPHER) 1270 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, 1271 ctx->c_ctx.ivsize); 1272 else 1273 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, 1274 ctx->c_ctx.ivsize); 1275 } 1276 1277 sec_request_untransfer(ctx, req); 1278err_uninit_req: 1279 sec_request_uninit(ctx, req); 1280 1281 return ret; 1282} 1283 1284static const struct sec_req_op sec_skcipher_req_ops = { 1285 .buf_map = sec_skcipher_sgl_map, 1286 .buf_unmap = sec_skcipher_sgl_unmap, 1287 .do_transfer = sec_skcipher_copy_iv, 1288 .bd_fill = sec_skcipher_bd_fill, 1289 .bd_send = sec_bd_send, 1290 .callback = sec_skcipher_callback, 1291 .process = sec_process, 1292}; 1293 1294static const struct sec_req_op sec_aead_req_ops = { 1295 .buf_map = sec_aead_sgl_map, 1296 .buf_unmap = sec_aead_sgl_unmap, 1297 .do_transfer = sec_aead_copy_iv, 1298 .bd_fill = sec_aead_bd_fill, 1299 .bd_send = sec_bd_send, 1300 .callback = sec_aead_callback, 1301 .process = sec_process, 1302}; 1303 1304static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 1305{ 1306 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1307 1308 ctx->req_op = &sec_skcipher_req_ops; 1309 1310 return sec_skcipher_init(tfm); 1311} 1312 1313static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 1314{ 1315 sec_skcipher_uninit(tfm); 1316} 1317 1318static int sec_aead_init(struct crypto_aead *tfm) 1319{ 1320 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1321 int ret; 1322 1323 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); 1324 ctx->alg_type = SEC_AEAD; 1325 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); 1326 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 1327 dev_err(ctx->dev, "get error aead iv size!\n"); 1328 return -EINVAL; 1329 } 1330 1331 ctx->req_op = &sec_aead_req_ops; 1332 ret = sec_ctx_base_init(ctx); 1333 if (ret) 1334 return ret; 1335 1336 ret = sec_auth_init(ctx); 1337 if (ret) 1338 goto err_auth_init; 1339 1340 ret = sec_cipher_init(ctx); 1341 if (ret) 1342 goto err_cipher_init; 1343 1344 return ret; 1345 1346err_cipher_init: 1347 sec_auth_uninit(ctx); 1348err_auth_init: 1349 sec_ctx_base_uninit(ctx); 1350 1351 return ret; 1352} 1353 1354static void sec_aead_exit(struct crypto_aead *tfm) 1355{ 1356 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1357 1358 sec_cipher_uninit(ctx); 1359 sec_auth_uninit(ctx); 1360 sec_ctx_base_uninit(ctx); 1361} 1362 1363static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) 1364{ 1365 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1366 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1367 int ret; 1368 1369 ret = sec_aead_init(tfm); 1370 if (ret) { 1371 pr_err("hisi_sec2: aead init error!\n"); 1372 return ret; 1373 } 1374 1375 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 1376 if (IS_ERR(auth_ctx->hash_tfm)) { 1377 dev_err(ctx->dev, "aead alloc shash error!\n"); 1378 sec_aead_exit(tfm); 1379 return PTR_ERR(auth_ctx->hash_tfm); 1380 } 1381 1382 return 0; 1383} 1384 1385static void sec_aead_ctx_exit(struct crypto_aead *tfm) 1386{ 1387 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1388 1389 crypto_free_shash(ctx->a_ctx.hash_tfm); 1390 sec_aead_exit(tfm); 1391} 1392 1393static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) 1394{ 1395 return sec_aead_ctx_init(tfm, "sha1"); 1396} 1397 1398static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) 1399{ 1400 return sec_aead_ctx_init(tfm, "sha256"); 1401} 1402 1403static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) 1404{ 1405 return sec_aead_ctx_init(tfm, "sha512"); 1406} 1407 1408static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1409{ 1410 struct skcipher_request *sk_req = sreq->c_req.sk_req; 1411 struct device *dev = ctx->dev; 1412 u8 c_alg = ctx->c_ctx.c_alg; 1413 1414 if (unlikely(!sk_req->src || !sk_req->dst)) { 1415 dev_err(dev, "skcipher input param error!\n"); 1416 return -EINVAL; 1417 } 1418 sreq->c_req.c_len = sk_req->cryptlen; 1419 1420 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) 1421 sreq->use_pbuf = true; 1422 else 1423 sreq->use_pbuf = false; 1424 1425 if (c_alg == SEC_CALG_3DES) { 1426 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { 1427 dev_err(dev, "skcipher 3des input length error!\n"); 1428 return -EINVAL; 1429 } 1430 return 0; 1431 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 1432 if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { 1433 dev_err(dev, "skcipher aes input length error!\n"); 1434 return -EINVAL; 1435 } 1436 return 0; 1437 } 1438 1439 dev_err(dev, "skcipher algorithm error!\n"); 1440 return -EINVAL; 1441} 1442 1443static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 1444{ 1445 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 1446 struct sec_req *req = skcipher_request_ctx(sk_req); 1447 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1448 int ret; 1449 1450 if (!sk_req->cryptlen) 1451 return 0; 1452 1453 req->flag = sk_req->base.flags; 1454 req->c_req.sk_req = sk_req; 1455 req->c_req.encrypt = encrypt; 1456 req->ctx = ctx; 1457 1458 ret = sec_skcipher_param_check(ctx, req); 1459 if (unlikely(ret)) 1460 return -EINVAL; 1461 1462 return ctx->req_op->process(ctx, req); 1463} 1464 1465static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 1466{ 1467 return sec_skcipher_crypto(sk_req, true); 1468} 1469 1470static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 1471{ 1472 return sec_skcipher_crypto(sk_req, false); 1473} 1474 1475#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ 1476 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ 1477{\ 1478 .base = {\ 1479 .cra_name = sec_cra_name,\ 1480 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1481 .cra_priority = SEC_PRIORITY,\ 1482 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ 1483 .cra_blocksize = blk_size,\ 1484 .cra_ctxsize = sizeof(struct sec_ctx),\ 1485 .cra_module = THIS_MODULE,\ 1486 },\ 1487 .init = ctx_init,\ 1488 .exit = ctx_exit,\ 1489 .setkey = sec_set_key,\ 1490 .decrypt = sec_skcipher_decrypt,\ 1491 .encrypt = sec_skcipher_encrypt,\ 1492 .min_keysize = sec_min_key_size,\ 1493 .max_keysize = sec_max_key_size,\ 1494 .ivsize = iv_size,\ 1495}, 1496 1497#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 1498 max_key_size, blk_size, iv_size) \ 1499 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 1500 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 1501 1502static struct skcipher_alg sec_skciphers[] = { 1503 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, 1504 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1505 AES_BLOCK_SIZE, 0) 1506 1507 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, 1508 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1509 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1510 1511 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, 1512 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, 1513 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1514 1515 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, 1516 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, 1517 DES3_EDE_BLOCK_SIZE, 0) 1518 1519 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, 1520 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, 1521 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) 1522 1523 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, 1524 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, 1525 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1526 1527 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, 1528 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 1529 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1530}; 1531 1532static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1533{ 1534 struct aead_request *req = sreq->aead_req.aead_req; 1535 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1536 size_t authsize = crypto_aead_authsize(tfm); 1537 struct device *dev = ctx->dev; 1538 u8 c_alg = ctx->c_ctx.c_alg; 1539 1540 if (unlikely(!req->src || !req->dst || !req->cryptlen || 1541 req->assoclen > SEC_MAX_AAD_LEN)) { 1542 dev_err(dev, "aead input param error!\n"); 1543 return -EINVAL; 1544 } 1545 1546 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= 1547 SEC_PBUF_SZ) 1548 sreq->use_pbuf = true; 1549 else 1550 sreq->use_pbuf = false; 1551 1552 /* Support AES only */ 1553 if (unlikely(c_alg != SEC_CALG_AES)) { 1554 dev_err(dev, "aead crypto alg error!\n"); 1555 return -EINVAL; 1556 1557 } 1558 if (sreq->c_req.encrypt) 1559 sreq->c_req.c_len = req->cryptlen; 1560 else 1561 sreq->c_req.c_len = req->cryptlen - authsize; 1562 1563 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { 1564 dev_err(dev, "aead crypto length error!\n"); 1565 return -EINVAL; 1566 } 1567 1568 return 0; 1569} 1570 1571static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) 1572{ 1573 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1574 struct sec_req *req = aead_request_ctx(a_req); 1575 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1576 int ret; 1577 1578 req->flag = a_req->base.flags; 1579 req->aead_req.aead_req = a_req; 1580 req->c_req.encrypt = encrypt; 1581 req->ctx = ctx; 1582 1583 ret = sec_aead_param_check(ctx, req); 1584 if (unlikely(ret)) 1585 return -EINVAL; 1586 1587 return ctx->req_op->process(ctx, req); 1588} 1589 1590static int sec_aead_encrypt(struct aead_request *a_req) 1591{ 1592 return sec_aead_crypto(a_req, true); 1593} 1594 1595static int sec_aead_decrypt(struct aead_request *a_req) 1596{ 1597 return sec_aead_crypto(a_req, false); 1598} 1599 1600#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ 1601 ctx_exit, blk_size, iv_size, max_authsize)\ 1602{\ 1603 .base = {\ 1604 .cra_name = sec_cra_name,\ 1605 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1606 .cra_priority = SEC_PRIORITY,\ 1607 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ 1608 .cra_blocksize = blk_size,\ 1609 .cra_ctxsize = sizeof(struct sec_ctx),\ 1610 .cra_module = THIS_MODULE,\ 1611 },\ 1612 .init = ctx_init,\ 1613 .exit = ctx_exit,\ 1614 .setkey = sec_set_key,\ 1615 .decrypt = sec_aead_decrypt,\ 1616 .encrypt = sec_aead_encrypt,\ 1617 .ivsize = iv_size,\ 1618 .maxauthsize = max_authsize,\ 1619} 1620 1621#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ 1622 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ 1623 sec_aead_ctx_exit, blksize, ivsize, authsize) 1624 1625static struct aead_alg sec_aeads[] = { 1626 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", 1627 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, 1628 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 1629 1630 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", 1631 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, 1632 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 1633 1634 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", 1635 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, 1636 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 1637}; 1638 1639int sec_register_to_crypto(void) 1640{ 1641 int ret; 1642 1643 /* To avoid repeat register */ 1644 ret = crypto_register_skciphers(sec_skciphers, 1645 ARRAY_SIZE(sec_skciphers)); 1646 if (ret) 1647 return ret; 1648 1649 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1650 if (ret) 1651 crypto_unregister_skciphers(sec_skciphers, 1652 ARRAY_SIZE(sec_skciphers)); 1653 return ret; 1654} 1655 1656void sec_unregister_from_crypto(void) 1657{ 1658 crypto_unregister_skciphers(sec_skciphers, 1659 ARRAY_SIZE(sec_skciphers)); 1660 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1661} 1662