1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Cryptographic API. 4 * 5 * Driver for EIP97 AES acceleration. 6 * 7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 8 * 9 * Some ideas are from atmel-aes.c drivers. 10 */ 11 12#include <crypto/aes.h> 13#include <crypto/gcm.h> 14#include <crypto/internal/skcipher.h> 15#include "mtk-platform.h" 16 17#define AES_QUEUE_SIZE 512 18#define AES_BUF_ORDER 2 19#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ 20 & ~(AES_BLOCK_SIZE - 1)) 21#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \ 22 AES_BLOCK_SIZE * 2) 23#define AES_MAX_CT_SIZE 6 24 25#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) 26 27/* AES-CBC/ECB/CTR/OFB/CFB command token */ 28#define AES_CMD0 cpu_to_le32(0x05000000) 29#define AES_CMD1 cpu_to_le32(0x2d060000) 30#define AES_CMD2 cpu_to_le32(0xe4a63806) 31/* AES-GCM command token */ 32#define AES_GCM_CMD0 cpu_to_le32(0x0b000000) 33#define AES_GCM_CMD1 cpu_to_le32(0xa0800000) 34#define AES_GCM_CMD2 cpu_to_le32(0x25000010) 35#define AES_GCM_CMD3 cpu_to_le32(0x0f020000) 36#define AES_GCM_CMD4 cpu_to_le32(0x21e60000) 37#define AES_GCM_CMD5 cpu_to_le32(0x40e60000) 38#define AES_GCM_CMD6 cpu_to_le32(0xd0070000) 39 40/* AES transform information word 0 fields */ 41#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0) 42#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0) 43#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0) 44#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0) 45#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8) 46#define AES_TFM_128BITS cpu_to_le32(0xb << 16) 47#define AES_TFM_192BITS cpu_to_le32(0xd << 16) 48#define AES_TFM_256BITS cpu_to_le32(0xf << 16) 49#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21) 50#define AES_TFM_GHASH cpu_to_le32(0x4 << 23) 51/* AES transform information word 1 fields */ 52#define AES_TFM_ECB cpu_to_le32(0x0 << 0) 53#define AES_TFM_CBC cpu_to_le32(0x1 << 0) 54#define AES_TFM_OFB cpu_to_le32(0x4 << 0) 55#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0) 56#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ 57#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ 58#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ 59#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ 60#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) 61#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) 62 63/* AES flags */ 64#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0) 65#define AES_FLAGS_ECB BIT(0) 66#define AES_FLAGS_CBC BIT(1) 67#define AES_FLAGS_CTR BIT(2) 68#define AES_FLAGS_OFB BIT(3) 69#define AES_FLAGS_CFB128 BIT(4) 70#define AES_FLAGS_GCM BIT(5) 71#define AES_FLAGS_ENCRYPT BIT(6) 72#define AES_FLAGS_BUSY BIT(7) 73 74#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) 75 76/** 77 * mtk_aes_info - hardware information of AES 78 * @cmd: command token, hardware instruction 79 * @tfm: transform state of cipher algorithm. 80 * @state: contains keys and initial vectors. 81 * 82 * Memory layout of GCM buffer: 83 * /-----------\ 84 * | AES KEY | 128/196/256 bits 85 * |-----------| 86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher 87 * |-----------| 88 * | IVs | 4 * 4 bytes 89 * \-----------/ 90 * 91 * The engine requires all these info to do: 92 * - Commands decoding and control of the engine's data path. 93 * - Coordinating hardware data fetch and store operations. 94 * - Result token construction and output. 95 */ 96struct mtk_aes_info { 97 __le32 cmd[AES_MAX_CT_SIZE]; 98 __le32 tfm[2]; 99 __le32 state[AES_MAX_STATE_BUF_SIZE]; 100}; 101 102struct mtk_aes_reqctx { 103 u64 mode; 104}; 105 106struct mtk_aes_base_ctx { 107 struct mtk_cryp *cryp; 108 u32 keylen; 109 __le32 key[12]; 110 __le32 keymode; 111 112 mtk_aes_fn start; 113 114 struct mtk_aes_info info; 115 dma_addr_t ct_dma; 116 dma_addr_t tfm_dma; 117 118 __le32 ct_hdr; 119 u32 ct_size; 120}; 121 122struct mtk_aes_ctx { 123 struct mtk_aes_base_ctx base; 124}; 125 126struct mtk_aes_ctr_ctx { 127 struct mtk_aes_base_ctx base; 128 129 __be32 iv[AES_BLOCK_SIZE / sizeof(u32)]; 130 size_t offset; 131 struct scatterlist src[2]; 132 struct scatterlist dst[2]; 133}; 134 135struct mtk_aes_gcm_ctx { 136 struct mtk_aes_base_ctx base; 137 138 u32 authsize; 139 size_t textlen; 140}; 141 142struct mtk_aes_drv { 143 struct list_head dev_list; 144 /* Device list lock */ 145 spinlock_t lock; 146}; 147 148static struct mtk_aes_drv mtk_aes = { 149 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list), 150 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock), 151}; 152 153static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset) 154{ 155 return readl_relaxed(cryp->base + offset); 156} 157 158static inline void mtk_aes_write(struct mtk_cryp *cryp, 159 u32 offset, u32 value) 160{ 161 writel_relaxed(value, cryp->base + offset); 162} 163 164static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx) 165{ 166 struct mtk_cryp *cryp = NULL; 167 struct mtk_cryp *tmp; 168 169 spin_lock_bh(&mtk_aes.lock); 170 if (!ctx->cryp) { 171 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) { 172 cryp = tmp; 173 break; 174 } 175 ctx->cryp = cryp; 176 } else { 177 cryp = ctx->cryp; 178 } 179 spin_unlock_bh(&mtk_aes.lock); 180 181 return cryp; 182} 183 184static inline size_t mtk_aes_padlen(size_t len) 185{ 186 len &= AES_BLOCK_SIZE - 1; 187 return len ? AES_BLOCK_SIZE - len : 0; 188} 189 190static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len, 191 struct mtk_aes_dma *dma) 192{ 193 int nents; 194 195 if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) 196 return false; 197 198 for (nents = 0; sg; sg = sg_next(sg), ++nents) { 199 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 200 return false; 201 202 if (len <= sg->length) { 203 if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) 204 return false; 205 206 dma->nents = nents + 1; 207 dma->remainder = sg->length - len; 208 sg->length = len; 209 return true; 210 } 211 212 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 213 return false; 214 215 len -= sg->length; 216 } 217 218 return false; 219} 220 221static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes, 222 const struct mtk_aes_reqctx *rctx) 223{ 224 /* Clear all but persistent flags and set request flags. */ 225 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode; 226} 227 228static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma) 229{ 230 struct scatterlist *sg = dma->sg; 231 int nents = dma->nents; 232 233 if (!dma->remainder) 234 return; 235 236 while (--nents > 0 && sg) 237 sg = sg_next(sg); 238 239 if (!sg) 240 return; 241 242 sg->length += dma->remainder; 243} 244 245static inline int mtk_aes_complete(struct mtk_cryp *cryp, 246 struct mtk_aes_rec *aes, 247 int err) 248{ 249 aes->flags &= ~AES_FLAGS_BUSY; 250 aes->areq->complete(aes->areq, err); 251 /* Handle new request */ 252 tasklet_schedule(&aes->queue_task); 253 return err; 254} 255 256/* 257 * Write descriptors for processing. This will configure the engine, load 258 * the transform information and then start the packet processing. 259 */ 260static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 261{ 262 struct mtk_ring *ring = cryp->ring[aes->id]; 263 struct mtk_desc *cmd = NULL, *res = NULL; 264 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg; 265 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len; 266 int nents; 267 268 /* Write command descriptors */ 269 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) { 270 cmd = ring->cmd_next; 271 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); 272 cmd->buf = cpu_to_le32(sg_dma_address(ssg)); 273 274 if (nents == 0) { 275 cmd->hdr |= MTK_DESC_FIRST | 276 MTK_DESC_CT_LEN(aes->ctx->ct_size); 277 cmd->ct = cpu_to_le32(aes->ctx->ct_dma); 278 cmd->ct_hdr = aes->ctx->ct_hdr; 279 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); 280 } 281 282 /* Shift ring buffer and check boundary */ 283 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) 284 ring->cmd_next = ring->cmd_base; 285 } 286 cmd->hdr |= MTK_DESC_LAST; 287 288 /* Prepare result descriptors */ 289 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) { 290 res = ring->res_next; 291 res->hdr = MTK_DESC_BUF_LEN(dsg->length); 292 res->buf = cpu_to_le32(sg_dma_address(dsg)); 293 294 if (nents == 0) 295 res->hdr |= MTK_DESC_FIRST; 296 297 /* Shift ring buffer and check boundary */ 298 if (++ring->res_next == ring->res_base + MTK_DESC_NUM) 299 ring->res_next = ring->res_base; 300 } 301 res->hdr |= MTK_DESC_LAST; 302 303 /* Pointer to current result descriptor */ 304 ring->res_prev = res; 305 306 /* Prepare enough space for authenticated tag */ 307 if (aes->flags & AES_FLAGS_GCM) 308 le32_add_cpu(&res->hdr, AES_BLOCK_SIZE); 309 310 /* 311 * Make sure that all changes to the DMA ring are done before we 312 * start engine. 313 */ 314 wmb(); 315 /* Start DMA transfer */ 316 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen)); 317 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen)); 318 319 return -EINPROGRESS; 320} 321 322static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 323{ 324 struct mtk_aes_base_ctx *ctx = aes->ctx; 325 326 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 327 DMA_TO_DEVICE); 328 329 if (aes->src.sg == aes->dst.sg) { 330 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 331 DMA_BIDIRECTIONAL); 332 333 if (aes->src.sg != &aes->aligned_sg) 334 mtk_aes_restore_sg(&aes->src); 335 } else { 336 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents, 337 DMA_FROM_DEVICE); 338 339 if (aes->dst.sg != &aes->aligned_sg) 340 mtk_aes_restore_sg(&aes->dst); 341 342 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 343 DMA_TO_DEVICE); 344 345 if (aes->src.sg != &aes->aligned_sg) 346 mtk_aes_restore_sg(&aes->src); 347 } 348 349 if (aes->dst.sg == &aes->aligned_sg) 350 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst), 351 aes->buf, aes->total); 352} 353 354static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 355{ 356 struct mtk_aes_base_ctx *ctx = aes->ctx; 357 struct mtk_aes_info *info = &ctx->info; 358 359 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 360 DMA_TO_DEVICE); 361 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) 362 goto exit; 363 364 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); 365 366 if (aes->src.sg == aes->dst.sg) { 367 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 368 aes->src.nents, 369 DMA_BIDIRECTIONAL); 370 aes->dst.sg_len = aes->src.sg_len; 371 if (unlikely(!aes->src.sg_len)) 372 goto sg_map_err; 373 } else { 374 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 375 aes->src.nents, DMA_TO_DEVICE); 376 if (unlikely(!aes->src.sg_len)) 377 goto sg_map_err; 378 379 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, 380 aes->dst.nents, DMA_FROM_DEVICE); 381 if (unlikely(!aes->dst.sg_len)) { 382 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 383 DMA_TO_DEVICE); 384 goto sg_map_err; 385 } 386 } 387 388 return mtk_aes_xmit(cryp, aes); 389 390sg_map_err: 391 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); 392exit: 393 return mtk_aes_complete(cryp, aes, -EINVAL); 394} 395 396/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */ 397static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 398 size_t len) 399{ 400 struct skcipher_request *req = skcipher_request_cast(aes->areq); 401 struct mtk_aes_base_ctx *ctx = aes->ctx; 402 struct mtk_aes_info *info = &ctx->info; 403 u32 cnt = 0; 404 405 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 406 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); 407 info->cmd[cnt++] = AES_CMD1; 408 409 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; 410 if (aes->flags & AES_FLAGS_ENCRYPT) 411 info->tfm[0] |= AES_TFM_BASIC_OUT; 412 else 413 info->tfm[0] |= AES_TFM_BASIC_IN; 414 415 switch (aes->flags & AES_FLAGS_CIPHER_MSK) { 416 case AES_FLAGS_CBC: 417 info->tfm[1] = AES_TFM_CBC; 418 break; 419 case AES_FLAGS_ECB: 420 info->tfm[1] = AES_TFM_ECB; 421 goto ecb; 422 case AES_FLAGS_CTR: 423 info->tfm[1] = AES_TFM_CTR_LOAD; 424 goto ctr; 425 case AES_FLAGS_OFB: 426 info->tfm[1] = AES_TFM_OFB; 427 break; 428 case AES_FLAGS_CFB128: 429 info->tfm[1] = AES_TFM_CFB128; 430 break; 431 default: 432 /* Should not happen... */ 433 return; 434 } 435 436 memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE); 437ctr: 438 le32_add_cpu(&info->tfm[0], 439 le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)))); 440 info->tfm[1] |= AES_TFM_FULL_IV; 441 info->cmd[cnt++] = AES_CMD2; 442ecb: 443 ctx->ct_size = cnt; 444} 445 446static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 447 struct scatterlist *src, struct scatterlist *dst, 448 size_t len) 449{ 450 size_t padlen = 0; 451 bool src_aligned, dst_aligned; 452 453 aes->total = len; 454 aes->src.sg = src; 455 aes->dst.sg = dst; 456 aes->real_dst = dst; 457 458 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); 459 if (src == dst) 460 dst_aligned = src_aligned; 461 else 462 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); 463 464 if (!src_aligned || !dst_aligned) { 465 padlen = mtk_aes_padlen(len); 466 467 if (len + padlen > AES_BUF_SIZE) 468 return mtk_aes_complete(cryp, aes, -ENOMEM); 469 470 if (!src_aligned) { 471 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 472 aes->src.sg = &aes->aligned_sg; 473 aes->src.nents = 1; 474 aes->src.remainder = 0; 475 } 476 477 if (!dst_aligned) { 478 aes->dst.sg = &aes->aligned_sg; 479 aes->dst.nents = 1; 480 aes->dst.remainder = 0; 481 } 482 483 sg_init_table(&aes->aligned_sg, 1); 484 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); 485 } 486 487 mtk_aes_info_init(cryp, aes, len + padlen); 488 489 return mtk_aes_map(cryp, aes); 490} 491 492static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, 493 struct crypto_async_request *new_areq) 494{ 495 struct mtk_aes_rec *aes = cryp->aes[id]; 496 struct crypto_async_request *areq, *backlog; 497 struct mtk_aes_base_ctx *ctx; 498 unsigned long flags; 499 int ret = 0; 500 501 spin_lock_irqsave(&aes->lock, flags); 502 if (new_areq) 503 ret = crypto_enqueue_request(&aes->queue, new_areq); 504 if (aes->flags & AES_FLAGS_BUSY) { 505 spin_unlock_irqrestore(&aes->lock, flags); 506 return ret; 507 } 508 backlog = crypto_get_backlog(&aes->queue); 509 areq = crypto_dequeue_request(&aes->queue); 510 if (areq) 511 aes->flags |= AES_FLAGS_BUSY; 512 spin_unlock_irqrestore(&aes->lock, flags); 513 514 if (!areq) 515 return ret; 516 517 if (backlog) 518 backlog->complete(backlog, -EINPROGRESS); 519 520 ctx = crypto_tfm_ctx(areq->tfm); 521 /* Write key into state buffer */ 522 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); 523 524 aes->areq = areq; 525 aes->ctx = ctx; 526 527 return ctx->start(cryp, aes); 528} 529 530static int mtk_aes_transfer_complete(struct mtk_cryp *cryp, 531 struct mtk_aes_rec *aes) 532{ 533 return mtk_aes_complete(cryp, aes, 0); 534} 535 536static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 537{ 538 struct skcipher_request *req = skcipher_request_cast(aes->areq); 539 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); 540 541 mtk_aes_set_mode(aes, rctx); 542 aes->resume = mtk_aes_transfer_complete; 543 544 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen); 545} 546 547static inline struct mtk_aes_ctr_ctx * 548mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx) 549{ 550 return container_of(ctx, struct mtk_aes_ctr_ctx, base); 551} 552 553static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 554{ 555 struct mtk_aes_base_ctx *ctx = aes->ctx; 556 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); 557 struct skcipher_request *req = skcipher_request_cast(aes->areq); 558 struct scatterlist *src, *dst; 559 u32 start, end, ctr, blocks; 560 size_t datalen; 561 bool fragmented = false; 562 563 /* Check for transfer completion. */ 564 cctx->offset += aes->total; 565 if (cctx->offset >= req->cryptlen) 566 return mtk_aes_transfer_complete(cryp, aes); 567 568 /* Compute data length. */ 569 datalen = req->cryptlen - cctx->offset; 570 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); 571 ctr = be32_to_cpu(cctx->iv[3]); 572 573 /* Check 32bit counter overflow. */ 574 start = ctr; 575 end = start + blocks - 1; 576 if (end < start) { 577 ctr = 0xffffffff; 578 datalen = AES_BLOCK_SIZE * -start; 579 fragmented = true; 580 } 581 582 /* Jump to offset. */ 583 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset); 584 dst = ((req->src == req->dst) ? src : 585 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); 586 587 /* Write IVs into transform state buffer. */ 588 memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE); 589 590 if (unlikely(fragmented)) { 591 /* 592 * Increment the counter manually to cope with the hardware 593 * counter overflow. 594 */ 595 cctx->iv[3] = cpu_to_be32(ctr); 596 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); 597 } 598 599 return mtk_aes_dma(cryp, aes, src, dst, datalen); 600} 601 602static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 603{ 604 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx); 605 struct skcipher_request *req = skcipher_request_cast(aes->areq); 606 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); 607 608 mtk_aes_set_mode(aes, rctx); 609 610 memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE); 611 cctx->offset = 0; 612 aes->total = 0; 613 aes->resume = mtk_aes_ctr_transfer; 614 615 return mtk_aes_ctr_transfer(cryp, aes); 616} 617 618/* Check and set the AES key to transform state buffer */ 619static int mtk_aes_setkey(struct crypto_skcipher *tfm, 620 const u8 *key, u32 keylen) 621{ 622 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm); 623 624 switch (keylen) { 625 case AES_KEYSIZE_128: 626 ctx->keymode = AES_TFM_128BITS; 627 break; 628 case AES_KEYSIZE_192: 629 ctx->keymode = AES_TFM_192BITS; 630 break; 631 case AES_KEYSIZE_256: 632 ctx->keymode = AES_TFM_256BITS; 633 break; 634 635 default: 636 return -EINVAL; 637 } 638 639 ctx->keylen = SIZE_IN_WORDS(keylen); 640 memcpy(ctx->key, key, keylen); 641 642 return 0; 643} 644 645static int mtk_aes_crypt(struct skcipher_request *req, u64 mode) 646{ 647 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 648 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher); 649 struct mtk_aes_reqctx *rctx; 650 struct mtk_cryp *cryp; 651 652 cryp = mtk_aes_find_dev(ctx); 653 if (!cryp) 654 return -ENODEV; 655 656 rctx = skcipher_request_ctx(req); 657 rctx->mode = mode; 658 659 return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT), 660 &req->base); 661} 662 663static int mtk_aes_ecb_encrypt(struct skcipher_request *req) 664{ 665 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB); 666} 667 668static int mtk_aes_ecb_decrypt(struct skcipher_request *req) 669{ 670 return mtk_aes_crypt(req, AES_FLAGS_ECB); 671} 672 673static int mtk_aes_cbc_encrypt(struct skcipher_request *req) 674{ 675 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); 676} 677 678static int mtk_aes_cbc_decrypt(struct skcipher_request *req) 679{ 680 return mtk_aes_crypt(req, AES_FLAGS_CBC); 681} 682 683static int mtk_aes_ctr_encrypt(struct skcipher_request *req) 684{ 685 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); 686} 687 688static int mtk_aes_ctr_decrypt(struct skcipher_request *req) 689{ 690 return mtk_aes_crypt(req, AES_FLAGS_CTR); 691} 692 693static int mtk_aes_ofb_encrypt(struct skcipher_request *req) 694{ 695 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); 696} 697 698static int mtk_aes_ofb_decrypt(struct skcipher_request *req) 699{ 700 return mtk_aes_crypt(req, AES_FLAGS_OFB); 701} 702 703static int mtk_aes_cfb_encrypt(struct skcipher_request *req) 704{ 705 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128); 706} 707 708static int mtk_aes_cfb_decrypt(struct skcipher_request *req) 709{ 710 return mtk_aes_crypt(req, AES_FLAGS_CFB128); 711} 712 713static int mtk_aes_init_tfm(struct crypto_skcipher *tfm) 714{ 715 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 716 717 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); 718 ctx->base.start = mtk_aes_start; 719 return 0; 720} 721 722static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm) 723{ 724 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 725 726 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); 727 ctx->base.start = mtk_aes_ctr_start; 728 return 0; 729} 730 731static struct skcipher_alg aes_algs[] = { 732{ 733 .base.cra_name = "cbc(aes)", 734 .base.cra_driver_name = "cbc-aes-mtk", 735 .base.cra_priority = 400, 736 .base.cra_flags = CRYPTO_ALG_ASYNC, 737 .base.cra_blocksize = AES_BLOCK_SIZE, 738 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 739 .base.cra_alignmask = 0xf, 740 .base.cra_module = THIS_MODULE, 741 742 .min_keysize = AES_MIN_KEY_SIZE, 743 .max_keysize = AES_MAX_KEY_SIZE, 744 .setkey = mtk_aes_setkey, 745 .encrypt = mtk_aes_cbc_encrypt, 746 .decrypt = mtk_aes_cbc_decrypt, 747 .ivsize = AES_BLOCK_SIZE, 748 .init = mtk_aes_init_tfm, 749}, 750{ 751 .base.cra_name = "ecb(aes)", 752 .base.cra_driver_name = "ecb-aes-mtk", 753 .base.cra_priority = 400, 754 .base.cra_flags = CRYPTO_ALG_ASYNC, 755 .base.cra_blocksize = AES_BLOCK_SIZE, 756 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 757 .base.cra_alignmask = 0xf, 758 .base.cra_module = THIS_MODULE, 759 760 .min_keysize = AES_MIN_KEY_SIZE, 761 .max_keysize = AES_MAX_KEY_SIZE, 762 .setkey = mtk_aes_setkey, 763 .encrypt = mtk_aes_ecb_encrypt, 764 .decrypt = mtk_aes_ecb_decrypt, 765 .init = mtk_aes_init_tfm, 766}, 767{ 768 .base.cra_name = "ctr(aes)", 769 .base.cra_driver_name = "ctr-aes-mtk", 770 .base.cra_priority = 400, 771 .base.cra_flags = CRYPTO_ALG_ASYNC, 772 .base.cra_blocksize = 1, 773 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 774 .base.cra_alignmask = 0xf, 775 .base.cra_module = THIS_MODULE, 776 777 .min_keysize = AES_MIN_KEY_SIZE, 778 .max_keysize = AES_MAX_KEY_SIZE, 779 .ivsize = AES_BLOCK_SIZE, 780 .setkey = mtk_aes_setkey, 781 .encrypt = mtk_aes_ctr_encrypt, 782 .decrypt = mtk_aes_ctr_decrypt, 783 .init = mtk_aes_ctr_init_tfm, 784}, 785{ 786 .base.cra_name = "ofb(aes)", 787 .base.cra_driver_name = "ofb-aes-mtk", 788 .base.cra_priority = 400, 789 .base.cra_flags = CRYPTO_ALG_ASYNC, 790 .base.cra_blocksize = AES_BLOCK_SIZE, 791 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 792 .base.cra_alignmask = 0xf, 793 .base.cra_module = THIS_MODULE, 794 795 .min_keysize = AES_MIN_KEY_SIZE, 796 .max_keysize = AES_MAX_KEY_SIZE, 797 .ivsize = AES_BLOCK_SIZE, 798 .setkey = mtk_aes_setkey, 799 .encrypt = mtk_aes_ofb_encrypt, 800 .decrypt = mtk_aes_ofb_decrypt, 801}, 802{ 803 .base.cra_name = "cfb(aes)", 804 .base.cra_driver_name = "cfb-aes-mtk", 805 .base.cra_priority = 400, 806 .base.cra_flags = CRYPTO_ALG_ASYNC, 807 .base.cra_blocksize = 1, 808 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 809 .base.cra_alignmask = 0xf, 810 .base.cra_module = THIS_MODULE, 811 812 .min_keysize = AES_MIN_KEY_SIZE, 813 .max_keysize = AES_MAX_KEY_SIZE, 814 .ivsize = AES_BLOCK_SIZE, 815 .setkey = mtk_aes_setkey, 816 .encrypt = mtk_aes_cfb_encrypt, 817 .decrypt = mtk_aes_cfb_decrypt, 818}, 819}; 820 821static inline struct mtk_aes_gcm_ctx * 822mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx) 823{ 824 return container_of(ctx, struct mtk_aes_gcm_ctx, base); 825} 826 827/* 828 * Engine will verify and compare tag automatically, so we just need 829 * to check returned status which stored in the result descriptor. 830 */ 831static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp, 832 struct mtk_aes_rec *aes) 833{ 834 __le32 status = cryp->ring[aes->id]->res_prev->ct; 835 836 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? 837 -EBADMSG : 0); 838} 839 840/* Initialize transform information of GCM mode */ 841static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, 842 struct mtk_aes_rec *aes, 843 size_t len) 844{ 845 struct aead_request *req = aead_request_cast(aes->areq); 846 struct mtk_aes_base_ctx *ctx = aes->ctx; 847 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 848 struct mtk_aes_info *info = &ctx->info; 849 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 850 u32 cnt = 0; 851 852 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 853 854 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); 855 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); 856 info->cmd[cnt++] = AES_GCM_CMD2; 857 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); 858 859 if (aes->flags & AES_FLAGS_ENCRYPT) { 860 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); 861 info->tfm[0] = AES_TFM_GCM_OUT; 862 } else { 863 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); 864 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); 865 info->tfm[0] = AES_TFM_GCM_IN; 866 } 867 ctx->ct_size = cnt; 868 869 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( 870 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | 871 ctx->keymode; 872 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | 873 AES_TFM_ENC_HASH; 874 875 memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE), 876 req->iv, ivsize); 877} 878 879static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 880 struct scatterlist *src, struct scatterlist *dst, 881 size_t len) 882{ 883 bool src_aligned, dst_aligned; 884 885 aes->src.sg = src; 886 aes->dst.sg = dst; 887 aes->real_dst = dst; 888 889 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); 890 if (src == dst) 891 dst_aligned = src_aligned; 892 else 893 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); 894 895 if (!src_aligned || !dst_aligned) { 896 if (aes->total > AES_BUF_SIZE) 897 return mtk_aes_complete(cryp, aes, -ENOMEM); 898 899 if (!src_aligned) { 900 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 901 aes->src.sg = &aes->aligned_sg; 902 aes->src.nents = 1; 903 aes->src.remainder = 0; 904 } 905 906 if (!dst_aligned) { 907 aes->dst.sg = &aes->aligned_sg; 908 aes->dst.nents = 1; 909 aes->dst.remainder = 0; 910 } 911 912 sg_init_table(&aes->aligned_sg, 1); 913 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total); 914 } 915 916 mtk_aes_gcm_info_init(cryp, aes, len); 917 918 return mtk_aes_map(cryp, aes); 919} 920 921/* Todo: GMAC */ 922static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 923{ 924 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx); 925 struct aead_request *req = aead_request_cast(aes->areq); 926 struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 927 u32 len = req->assoclen + req->cryptlen; 928 929 mtk_aes_set_mode(aes, rctx); 930 931 if (aes->flags & AES_FLAGS_ENCRYPT) { 932 u32 tag[4]; 933 934 aes->resume = mtk_aes_transfer_complete; 935 /* Compute total process length. */ 936 aes->total = len + gctx->authsize; 937 /* Hardware will append authenticated tag to output buffer */ 938 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); 939 } else { 940 aes->resume = mtk_aes_gcm_tag_verify; 941 aes->total = len; 942 } 943 944 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); 945} 946 947static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) 948{ 949 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 950 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 951 struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 952 struct mtk_cryp *cryp; 953 bool enc = !!(mode & AES_FLAGS_ENCRYPT); 954 955 cryp = mtk_aes_find_dev(ctx); 956 if (!cryp) 957 return -ENODEV; 958 959 /* Compute text length. */ 960 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); 961 962 /* Empty messages are not supported yet */ 963 if (!gctx->textlen && !req->assoclen) 964 return -EINVAL; 965 966 rctx->mode = AES_FLAGS_GCM | mode; 967 968 return mtk_aes_handle_queue(cryp, enc, &req->base); 969} 970 971/* 972 * Because of the hardware limitation, we need to pre-calculate key(H) 973 * for the GHASH operation. The result of the encryption operation 974 * need to be stored in the transform state buffer. 975 */ 976static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, 977 u32 keylen) 978{ 979 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); 980 union { 981 u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)]; 982 u8 x8[AES_BLOCK_SIZE]; 983 } hash = {}; 984 struct crypto_aes_ctx aes_ctx; 985 int err; 986 int i; 987 988 switch (keylen) { 989 case AES_KEYSIZE_128: 990 ctx->keymode = AES_TFM_128BITS; 991 break; 992 case AES_KEYSIZE_192: 993 ctx->keymode = AES_TFM_192BITS; 994 break; 995 case AES_KEYSIZE_256: 996 ctx->keymode = AES_TFM_256BITS; 997 break; 998 999 default: 1000 return -EINVAL; 1001 } 1002 1003 ctx->keylen = SIZE_IN_WORDS(keylen); 1004 1005 err = aes_expandkey(&aes_ctx, key, keylen); 1006 if (err) 1007 return err; 1008 1009 aes_encrypt(&aes_ctx, hash.x8, hash.x8); 1010 memzero_explicit(&aes_ctx, sizeof(aes_ctx)); 1011 1012 memcpy(ctx->key, key, keylen); 1013 1014 /* Why do we need to do this? */ 1015 for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) 1016 hash.x32[i] = swab32(hash.x32[i]); 1017 1018 memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE); 1019 1020 return 0; 1021} 1022 1023static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, 1024 u32 authsize) 1025{ 1026 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); 1027 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 1028 1029 /* Same as crypto_gcm_authsize() from crypto/gcm.c */ 1030 switch (authsize) { 1031 case 8: 1032 case 12: 1033 case 16: 1034 break; 1035 default: 1036 return -EINVAL; 1037 } 1038 1039 gctx->authsize = authsize; 1040 return 0; 1041} 1042 1043static int mtk_aes_gcm_encrypt(struct aead_request *req) 1044{ 1045 return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); 1046} 1047 1048static int mtk_aes_gcm_decrypt(struct aead_request *req) 1049{ 1050 return mtk_aes_gcm_crypt(req, 0); 1051} 1052 1053static int mtk_aes_gcm_init(struct crypto_aead *aead) 1054{ 1055 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); 1056 1057 crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); 1058 ctx->base.start = mtk_aes_gcm_start; 1059 return 0; 1060} 1061 1062static struct aead_alg aes_gcm_alg = { 1063 .setkey = mtk_aes_gcm_setkey, 1064 .setauthsize = mtk_aes_gcm_setauthsize, 1065 .encrypt = mtk_aes_gcm_encrypt, 1066 .decrypt = mtk_aes_gcm_decrypt, 1067 .init = mtk_aes_gcm_init, 1068 .ivsize = GCM_AES_IV_SIZE, 1069 .maxauthsize = AES_BLOCK_SIZE, 1070 1071 .base = { 1072 .cra_name = "gcm(aes)", 1073 .cra_driver_name = "gcm-aes-mtk", 1074 .cra_priority = 400, 1075 .cra_flags = CRYPTO_ALG_ASYNC, 1076 .cra_blocksize = 1, 1077 .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx), 1078 .cra_alignmask = 0xf, 1079 .cra_module = THIS_MODULE, 1080 }, 1081}; 1082 1083static void mtk_aes_queue_task(unsigned long data) 1084{ 1085 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; 1086 1087 mtk_aes_handle_queue(aes->cryp, aes->id, NULL); 1088} 1089 1090static void mtk_aes_done_task(unsigned long data) 1091{ 1092 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; 1093 struct mtk_cryp *cryp = aes->cryp; 1094 1095 mtk_aes_unmap(cryp, aes); 1096 aes->resume(cryp, aes); 1097} 1098 1099static irqreturn_t mtk_aes_irq(int irq, void *dev_id) 1100{ 1101 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; 1102 struct mtk_cryp *cryp = aes->cryp; 1103 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); 1104 1105 mtk_aes_write(cryp, RDR_STAT(aes->id), val); 1106 1107 if (likely(AES_FLAGS_BUSY & aes->flags)) { 1108 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); 1109 mtk_aes_write(cryp, RDR_THRESH(aes->id), 1110 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1111 1112 tasklet_schedule(&aes->done_task); 1113 } else { 1114 dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); 1115 } 1116 return IRQ_HANDLED; 1117} 1118 1119/* 1120 * The purpose of creating encryption and decryption records is 1121 * to process outbound/inbound data in parallel, it can improve 1122 * performance in most use cases, such as IPSec VPN, especially 1123 * under heavy network traffic. 1124 */ 1125static int mtk_aes_record_init(struct mtk_cryp *cryp) 1126{ 1127 struct mtk_aes_rec **aes = cryp->aes; 1128 int i, err = -ENOMEM; 1129 1130 for (i = 0; i < MTK_REC_NUM; i++) { 1131 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL); 1132 if (!aes[i]) 1133 goto err_cleanup; 1134 1135 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL, 1136 AES_BUF_ORDER); 1137 if (!aes[i]->buf) 1138 goto err_cleanup; 1139 1140 aes[i]->cryp = cryp; 1141 1142 spin_lock_init(&aes[i]->lock); 1143 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); 1144 1145 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, 1146 (unsigned long)aes[i]); 1147 tasklet_init(&aes[i]->done_task, mtk_aes_done_task, 1148 (unsigned long)aes[i]); 1149 } 1150 1151 /* Link to ring0 and ring1 respectively */ 1152 aes[0]->id = MTK_RING0; 1153 aes[1]->id = MTK_RING1; 1154 1155 return 0; 1156 1157err_cleanup: 1158 for (; i--; ) { 1159 free_page((unsigned long)aes[i]->buf); 1160 kfree(aes[i]); 1161 } 1162 1163 return err; 1164} 1165 1166static void mtk_aes_record_free(struct mtk_cryp *cryp) 1167{ 1168 int i; 1169 1170 for (i = 0; i < MTK_REC_NUM; i++) { 1171 tasklet_kill(&cryp->aes[i]->done_task); 1172 tasklet_kill(&cryp->aes[i]->queue_task); 1173 1174 free_page((unsigned long)cryp->aes[i]->buf); 1175 kfree(cryp->aes[i]); 1176 } 1177} 1178 1179static void mtk_aes_unregister_algs(void) 1180{ 1181 int i; 1182 1183 crypto_unregister_aead(&aes_gcm_alg); 1184 1185 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) 1186 crypto_unregister_skcipher(&aes_algs[i]); 1187} 1188 1189static int mtk_aes_register_algs(void) 1190{ 1191 int err, i; 1192 1193 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 1194 err = crypto_register_skcipher(&aes_algs[i]); 1195 if (err) 1196 goto err_aes_algs; 1197 } 1198 1199 err = crypto_register_aead(&aes_gcm_alg); 1200 if (err) 1201 goto err_aes_algs; 1202 1203 return 0; 1204 1205err_aes_algs: 1206 for (; i--; ) 1207 crypto_unregister_skcipher(&aes_algs[i]); 1208 1209 return err; 1210} 1211 1212int mtk_cipher_alg_register(struct mtk_cryp *cryp) 1213{ 1214 int ret; 1215 1216 INIT_LIST_HEAD(&cryp->aes_list); 1217 1218 /* Initialize two cipher records */ 1219 ret = mtk_aes_record_init(cryp); 1220 if (ret) 1221 goto err_record; 1222 1223 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, 1224 0, "mtk-aes", cryp->aes[0]); 1225 if (ret) { 1226 dev_err(cryp->dev, "unable to request AES irq.\n"); 1227 goto err_res; 1228 } 1229 1230 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, 1231 0, "mtk-aes", cryp->aes[1]); 1232 if (ret) { 1233 dev_err(cryp->dev, "unable to request AES irq.\n"); 1234 goto err_res; 1235 } 1236 1237 /* Enable ring0 and ring1 interrupt */ 1238 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0); 1239 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1); 1240 1241 spin_lock(&mtk_aes.lock); 1242 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); 1243 spin_unlock(&mtk_aes.lock); 1244 1245 ret = mtk_aes_register_algs(); 1246 if (ret) 1247 goto err_algs; 1248 1249 return 0; 1250 1251err_algs: 1252 spin_lock(&mtk_aes.lock); 1253 list_del(&cryp->aes_list); 1254 spin_unlock(&mtk_aes.lock); 1255err_res: 1256 mtk_aes_record_free(cryp); 1257err_record: 1258 1259 dev_err(cryp->dev, "mtk-aes initialization failed.\n"); 1260 return ret; 1261} 1262 1263void mtk_cipher_alg_release(struct mtk_cryp *cryp) 1264{ 1265 spin_lock(&mtk_aes.lock); 1266 list_del(&cryp->aes_list); 1267 spin_unlock(&mtk_aes.lock); 1268 1269 mtk_aes_unregister_algs(); 1270 mtk_aes_record_free(cryp); 1271} 1272