1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles 4 */ 5#include <crypto/internal/aead.h> 6#include <crypto/aes.h> 7#include <crypto/algapi.h> 8#include <crypto/authenc.h> 9#include <crypto/internal/des.h> 10#include <crypto/md5.h> 11#include <crypto/sha.h> 12#include <crypto/internal/skcipher.h> 13#include <linux/clk.h> 14#include <linux/crypto.h> 15#include <linux/delay.h> 16#include <linux/dma-mapping.h> 17#include <linux/dmapool.h> 18#include <linux/err.h> 19#include <linux/init.h> 20#include <linux/interrupt.h> 21#include <linux/io.h> 22#include <linux/list.h> 23#include <linux/module.h> 24#include <linux/of.h> 25#include <linux/platform_device.h> 26#include <linux/pm.h> 27#include <linux/rtnetlink.h> 28#include <linux/scatterlist.h> 29#include <linux/sched.h> 30#include <linux/sizes.h> 31#include <linux/slab.h> 32#include <linux/timer.h> 33 34#include "picoxcell_crypto_regs.h" 35 36/* 37 * The threshold for the number of entries in the CMD FIFO available before 38 * the CMD0_CNT interrupt is raised. Increasing this value will reduce the 39 * number of interrupts raised to the CPU. 40 */ 41#define CMD0_IRQ_THRESHOLD 1 42 43/* 44 * The timeout period (in jiffies) for a PDU. When the the number of PDUs in 45 * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. 46 * When there are packets in flight but lower than the threshold, we enable 47 * the timer and at expiry, attempt to remove any processed packets from the 48 * queue and if there are still packets left, schedule the timer again. 49 */ 50#define PACKET_TIMEOUT 1 51 52/* The priority to register each algorithm with. */ 53#define SPACC_CRYPTO_ALG_PRIORITY 10000 54 55#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16 56#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 57#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64 58#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32 59#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32 60#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64 61#define SPACC_CRYPTO_L2_HASH_PG_SZ 64 62#define SPACC_CRYPTO_L2_MAX_CTXS 128 63#define SPACC_CRYPTO_L2_FIFO_SZ 128 64 65#define MAX_DDT_LEN 16 66 67/* DDT format. This must match the hardware DDT format exactly. */ 68struct spacc_ddt { 69 dma_addr_t p; 70 u32 len; 71}; 72 73/* 74 * Asynchronous crypto request structure. 75 * 76 * This structure defines a request that is either queued for processing or 77 * being processed. 78 */ 79struct spacc_req { 80 struct list_head list; 81 struct spacc_engine *engine; 82 struct crypto_async_request *req; 83 int result; 84 bool is_encrypt; 85 unsigned ctx_id; 86 dma_addr_t src_addr, dst_addr; 87 struct spacc_ddt *src_ddt, *dst_ddt; 88 void (*complete)(struct spacc_req *req); 89 struct skcipher_request fallback_req; // keep at the end 90}; 91 92struct spacc_aead { 93 unsigned long ctrl_default; 94 unsigned long type; 95 struct aead_alg alg; 96 struct spacc_engine *engine; 97 struct list_head entry; 98 int key_offs; 99 int iv_offs; 100}; 101 102struct spacc_engine { 103 void __iomem *regs; 104 struct list_head pending; 105 int next_ctx; 106 spinlock_t hw_lock; 107 int in_flight; 108 struct list_head completed; 109 struct list_head in_progress; 110 struct tasklet_struct complete; 111 unsigned long fifo_sz; 112 void __iomem *cipher_ctx_base; 113 void __iomem *hash_key_base; 114 struct spacc_alg *algs; 115 unsigned num_algs; 116 struct list_head registered_algs; 117 struct spacc_aead *aeads; 118 unsigned num_aeads; 119 struct list_head registered_aeads; 120 size_t cipher_pg_sz; 121 size_t hash_pg_sz; 122 const char *name; 123 struct clk *clk; 124 struct device *dev; 125 unsigned max_ctxs; 126 struct timer_list packet_timeout; 127 unsigned stat_irq_thresh; 128 struct dma_pool *req_pool; 129}; 130 131/* Algorithm type mask. */ 132#define SPACC_CRYPTO_ALG_MASK 0x7 133 134/* SPACC definition of a crypto algorithm. */ 135struct spacc_alg { 136 unsigned long ctrl_default; 137 unsigned long type; 138 struct skcipher_alg alg; 139 struct spacc_engine *engine; 140 struct list_head entry; 141 int key_offs; 142 int iv_offs; 143}; 144 145/* Generic context structure for any algorithm type. */ 146struct spacc_generic_ctx { 147 struct spacc_engine *engine; 148 int flags; 149 int key_offs; 150 int iv_offs; 151}; 152 153/* Block cipher context. */ 154struct spacc_ablk_ctx { 155 struct spacc_generic_ctx generic; 156 u8 key[AES_MAX_KEY_SIZE]; 157 u8 key_len; 158 /* 159 * The fallback cipher. If the operation can't be done in hardware, 160 * fallback to a software version. 161 */ 162 struct crypto_skcipher *sw_cipher; 163}; 164 165/* AEAD cipher context. */ 166struct spacc_aead_ctx { 167 struct spacc_generic_ctx generic; 168 u8 cipher_key[AES_MAX_KEY_SIZE]; 169 u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; 170 u8 cipher_key_len; 171 u8 hash_key_len; 172 struct crypto_aead *sw_cipher; 173}; 174 175static int spacc_ablk_submit(struct spacc_req *req); 176 177static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg) 178{ 179 return alg ? container_of(alg, struct spacc_alg, alg) : NULL; 180} 181 182static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg) 183{ 184 return container_of(alg, struct spacc_aead, alg); 185} 186 187static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) 188{ 189 u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); 190 191 return fifo_stat & SPA_FIFO_CMD_FULL; 192} 193 194/* 195 * Given a cipher context, and a context number, get the base address of the 196 * context page. 197 * 198 * Returns the address of the context page where the key/context may 199 * be written. 200 */ 201static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, 202 unsigned indx, 203 bool is_cipher_ctx) 204{ 205 return is_cipher_ctx ? ctx->engine->cipher_ctx_base + 206 (indx * ctx->engine->cipher_pg_sz) : 207 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); 208} 209 210/* The context pages can only be written with 32-bit accesses. */ 211static inline void memcpy_toio32(u32 __iomem *dst, const void *src, 212 unsigned count) 213{ 214 const u32 *src32 = (const u32 *) src; 215 216 while (count--) 217 writel(*src32++, dst++); 218} 219 220static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, 221 void __iomem *page_addr, const u8 *key, 222 size_t key_len, const u8 *iv, size_t iv_len) 223{ 224 void __iomem *key_ptr = page_addr + ctx->key_offs; 225 void __iomem *iv_ptr = page_addr + ctx->iv_offs; 226 227 memcpy_toio32(key_ptr, key, key_len / 4); 228 memcpy_toio32(iv_ptr, iv, iv_len / 4); 229} 230 231/* 232 * Load a context into the engines context memory. 233 * 234 * Returns the index of the context page where the context was loaded. 235 */ 236static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, 237 const u8 *ciph_key, size_t ciph_len, 238 const u8 *iv, size_t ivlen, const u8 *hash_key, 239 size_t hash_len) 240{ 241 unsigned indx = ctx->engine->next_ctx++; 242 void __iomem *ciph_page_addr, *hash_page_addr; 243 244 ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); 245 hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); 246 247 ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; 248 spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, 249 ivlen); 250 writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | 251 (1 << SPA_KEY_SZ_CIPHER_OFFSET), 252 ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); 253 254 if (hash_key) { 255 memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); 256 writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), 257 ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); 258 } 259 260 return indx; 261} 262 263static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) 264{ 265 ddt->p = phys; 266 ddt->len = len; 267} 268 269/* 270 * Take a crypto request and scatterlists for the data and turn them into DDTs 271 * for passing to the crypto engines. This also DMA maps the data so that the 272 * crypto engines can DMA to/from them. 273 */ 274static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, 275 struct scatterlist *payload, 276 unsigned nbytes, 277 enum dma_data_direction dir, 278 dma_addr_t *ddt_phys) 279{ 280 unsigned mapped_ents; 281 struct scatterlist *cur; 282 struct spacc_ddt *ddt; 283 int i; 284 int nents; 285 286 nents = sg_nents_for_len(payload, nbytes); 287 if (nents < 0) { 288 dev_err(engine->dev, "Invalid numbers of SG.\n"); 289 return NULL; 290 } 291 mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); 292 293 if (mapped_ents + 1 > MAX_DDT_LEN) 294 goto out; 295 296 ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); 297 if (!ddt) 298 goto out; 299 300 for_each_sg(payload, cur, mapped_ents, i) 301 ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); 302 ddt_set(&ddt[mapped_ents], 0, 0); 303 304 return ddt; 305 306out: 307 dma_unmap_sg(engine->dev, payload, nents, dir); 308 return NULL; 309} 310 311static int spacc_aead_make_ddts(struct aead_request *areq) 312{ 313 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 314 struct spacc_req *req = aead_request_ctx(areq); 315 struct spacc_engine *engine = req->engine; 316 struct spacc_ddt *src_ddt, *dst_ddt; 317 unsigned total; 318 int src_nents, dst_nents; 319 struct scatterlist *cur; 320 int i, dst_ents, src_ents; 321 322 total = areq->assoclen + areq->cryptlen; 323 if (req->is_encrypt) 324 total += crypto_aead_authsize(aead); 325 326 src_nents = sg_nents_for_len(areq->src, total); 327 if (src_nents < 0) { 328 dev_err(engine->dev, "Invalid numbers of src SG.\n"); 329 return src_nents; 330 } 331 if (src_nents + 1 > MAX_DDT_LEN) 332 return -E2BIG; 333 334 dst_nents = 0; 335 if (areq->src != areq->dst) { 336 dst_nents = sg_nents_for_len(areq->dst, total); 337 if (dst_nents < 0) { 338 dev_err(engine->dev, "Invalid numbers of dst SG.\n"); 339 return dst_nents; 340 } 341 if (src_nents + 1 > MAX_DDT_LEN) 342 return -E2BIG; 343 } 344 345 src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); 346 if (!src_ddt) 347 goto err; 348 349 dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); 350 if (!dst_ddt) 351 goto err_free_src; 352 353 req->src_ddt = src_ddt; 354 req->dst_ddt = dst_ddt; 355 356 if (dst_nents) { 357 src_ents = dma_map_sg(engine->dev, areq->src, src_nents, 358 DMA_TO_DEVICE); 359 if (!src_ents) 360 goto err_free_dst; 361 362 dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents, 363 DMA_FROM_DEVICE); 364 365 if (!dst_ents) { 366 dma_unmap_sg(engine->dev, areq->src, src_nents, 367 DMA_TO_DEVICE); 368 goto err_free_dst; 369 } 370 } else { 371 src_ents = dma_map_sg(engine->dev, areq->src, src_nents, 372 DMA_BIDIRECTIONAL); 373 if (!src_ents) 374 goto err_free_dst; 375 dst_ents = src_ents; 376 } 377 378 /* 379 * Now map in the payload for the source and destination and terminate 380 * with the NULL pointers. 381 */ 382 for_each_sg(areq->src, cur, src_ents, i) 383 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); 384 385 /* For decryption we need to skip the associated data. */ 386 total = req->is_encrypt ? 0 : areq->assoclen; 387 for_each_sg(areq->dst, cur, dst_ents, i) { 388 unsigned len = sg_dma_len(cur); 389 390 if (len <= total) { 391 total -= len; 392 continue; 393 } 394 395 ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total); 396 } 397 398 ddt_set(src_ddt, 0, 0); 399 ddt_set(dst_ddt, 0, 0); 400 401 return 0; 402 403err_free_dst: 404 dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr); 405err_free_src: 406 dma_pool_free(engine->req_pool, src_ddt, req->src_addr); 407err: 408 return -ENOMEM; 409} 410 411static void spacc_aead_free_ddts(struct spacc_req *req) 412{ 413 struct aead_request *areq = container_of(req->req, struct aead_request, 414 base); 415 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 416 unsigned total = areq->assoclen + areq->cryptlen + 417 (req->is_encrypt ? crypto_aead_authsize(aead) : 0); 418 struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); 419 struct spacc_engine *engine = aead_ctx->generic.engine; 420 int nents = sg_nents_for_len(areq->src, total); 421 422 /* sg_nents_for_len should not fail since it works when mapping sg */ 423 if (unlikely(nents < 0)) { 424 dev_err(engine->dev, "Invalid numbers of src SG.\n"); 425 return; 426 } 427 428 if (areq->src != areq->dst) { 429 dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); 430 nents = sg_nents_for_len(areq->dst, total); 431 if (unlikely(nents < 0)) { 432 dev_err(engine->dev, "Invalid numbers of dst SG.\n"); 433 return; 434 } 435 dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE); 436 } else 437 dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); 438 439 dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); 440 dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); 441} 442 443static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, 444 dma_addr_t ddt_addr, struct scatterlist *payload, 445 unsigned nbytes, enum dma_data_direction dir) 446{ 447 int nents = sg_nents_for_len(payload, nbytes); 448 449 if (nents < 0) { 450 dev_err(req->engine->dev, "Invalid numbers of SG.\n"); 451 return; 452 } 453 454 dma_unmap_sg(req->engine->dev, payload, nents, dir); 455 dma_pool_free(req->engine->req_pool, ddt, ddt_addr); 456} 457 458static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, 459 unsigned int keylen) 460{ 461 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 462 struct crypto_authenc_keys keys; 463 int err; 464 465 crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 466 crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) & 467 CRYPTO_TFM_REQ_MASK); 468 err = crypto_aead_setkey(ctx->sw_cipher, key, keylen); 469 if (err) 470 return err; 471 472 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 473 goto badkey; 474 475 if (keys.enckeylen > AES_MAX_KEY_SIZE) 476 goto badkey; 477 478 if (keys.authkeylen > sizeof(ctx->hash_ctx)) 479 goto badkey; 480 481 memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen); 482 ctx->cipher_key_len = keys.enckeylen; 483 484 memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); 485 ctx->hash_key_len = keys.authkeylen; 486 487 memzero_explicit(&keys, sizeof(keys)); 488 return 0; 489 490badkey: 491 memzero_explicit(&keys, sizeof(keys)); 492 return -EINVAL; 493} 494 495static int spacc_aead_setauthsize(struct crypto_aead *tfm, 496 unsigned int authsize) 497{ 498 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); 499 500 return crypto_aead_setauthsize(ctx->sw_cipher, authsize); 501} 502 503/* 504 * Check if an AEAD request requires a fallback operation. Some requests can't 505 * be completed in hardware because the hardware may not support certain key 506 * sizes. In these cases we need to complete the request in software. 507 */ 508static int spacc_aead_need_fallback(struct aead_request *aead_req) 509{ 510 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 511 struct aead_alg *alg = crypto_aead_alg(aead); 512 struct spacc_aead *spacc_alg = to_spacc_aead(alg); 513 struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead); 514 515 /* 516 * If we have a non-supported key-length, then we need to do a 517 * software fallback. 518 */ 519 if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 520 SPA_CTRL_CIPH_ALG_AES && 521 ctx->cipher_key_len != AES_KEYSIZE_128 && 522 ctx->cipher_key_len != AES_KEYSIZE_256) 523 return 1; 524 525 return 0; 526} 527 528static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, 529 bool is_encrypt) 530{ 531 struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); 532 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); 533 struct aead_request *subreq = aead_request_ctx(req); 534 535 aead_request_set_tfm(subreq, ctx->sw_cipher); 536 aead_request_set_callback(subreq, req->base.flags, 537 req->base.complete, req->base.data); 538 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 539 req->iv); 540 aead_request_set_ad(subreq, req->assoclen); 541 542 return is_encrypt ? crypto_aead_encrypt(subreq) : 543 crypto_aead_decrypt(subreq); 544} 545 546static void spacc_aead_complete(struct spacc_req *req) 547{ 548 spacc_aead_free_ddts(req); 549 req->req->complete(req->req, req->result); 550} 551 552static int spacc_aead_submit(struct spacc_req *req) 553{ 554 struct aead_request *aead_req = 555 container_of(req->req, struct aead_request, base); 556 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 557 unsigned int authsize = crypto_aead_authsize(aead); 558 struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead); 559 struct aead_alg *alg = crypto_aead_alg(aead); 560 struct spacc_aead *spacc_alg = to_spacc_aead(alg); 561 struct spacc_engine *engine = ctx->generic.engine; 562 u32 ctrl, proc_len, assoc_len; 563 564 req->result = -EINPROGRESS; 565 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, 566 ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead), 567 ctx->hash_ctx, ctx->hash_key_len); 568 569 /* Set the source and destination DDT pointers. */ 570 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); 571 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); 572 writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); 573 574 assoc_len = aead_req->assoclen; 575 proc_len = aead_req->cryptlen + assoc_len; 576 577 /* 578 * If we are decrypting, we need to take the length of the ICV out of 579 * the processing length. 580 */ 581 if (!req->is_encrypt) 582 proc_len -= authsize; 583 584 writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); 585 writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); 586 writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET); 587 writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); 588 writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); 589 590 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | 591 (1 << SPA_CTRL_ICV_APPEND); 592 if (req->is_encrypt) 593 ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); 594 else 595 ctrl |= (1 << SPA_CTRL_KEY_EXP); 596 597 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); 598 599 writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); 600 601 return -EINPROGRESS; 602} 603 604static int spacc_req_submit(struct spacc_req *req); 605 606static void spacc_push(struct spacc_engine *engine) 607{ 608 struct spacc_req *req; 609 610 while (!list_empty(&engine->pending) && 611 engine->in_flight + 1 <= engine->fifo_sz) { 612 613 ++engine->in_flight; 614 req = list_first_entry(&engine->pending, struct spacc_req, 615 list); 616 list_move_tail(&req->list, &engine->in_progress); 617 618 req->result = spacc_req_submit(req); 619 } 620} 621 622/* 623 * Setup an AEAD request for processing. This will configure the engine, load 624 * the context and then start the packet processing. 625 */ 626static int spacc_aead_setup(struct aead_request *req, 627 unsigned alg_type, bool is_encrypt) 628{ 629 struct crypto_aead *aead = crypto_aead_reqtfm(req); 630 struct aead_alg *alg = crypto_aead_alg(aead); 631 struct spacc_engine *engine = to_spacc_aead(alg)->engine; 632 struct spacc_req *dev_req = aead_request_ctx(req); 633 int err; 634 unsigned long flags; 635 636 dev_req->req = &req->base; 637 dev_req->is_encrypt = is_encrypt; 638 dev_req->result = -EBUSY; 639 dev_req->engine = engine; 640 dev_req->complete = spacc_aead_complete; 641 642 if (unlikely(spacc_aead_need_fallback(req) || 643 ((err = spacc_aead_make_ddts(req)) == -E2BIG))) 644 return spacc_aead_do_fallback(req, alg_type, is_encrypt); 645 646 if (err) 647 goto out; 648 649 err = -EINPROGRESS; 650 spin_lock_irqsave(&engine->hw_lock, flags); 651 if (unlikely(spacc_fifo_cmd_full(engine)) || 652 engine->in_flight + 1 > engine->fifo_sz) { 653 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 654 err = -EBUSY; 655 spin_unlock_irqrestore(&engine->hw_lock, flags); 656 goto out_free_ddts; 657 } 658 list_add_tail(&dev_req->list, &engine->pending); 659 } else { 660 list_add_tail(&dev_req->list, &engine->pending); 661 spacc_push(engine); 662 } 663 spin_unlock_irqrestore(&engine->hw_lock, flags); 664 665 goto out; 666 667out_free_ddts: 668 spacc_aead_free_ddts(dev_req); 669out: 670 return err; 671} 672 673static int spacc_aead_encrypt(struct aead_request *req) 674{ 675 struct crypto_aead *aead = crypto_aead_reqtfm(req); 676 struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead)); 677 678 return spacc_aead_setup(req, alg->type, 1); 679} 680 681static int spacc_aead_decrypt(struct aead_request *req) 682{ 683 struct crypto_aead *aead = crypto_aead_reqtfm(req); 684 struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead)); 685 686 return spacc_aead_setup(req, alg->type, 0); 687} 688 689/* 690 * Initialise a new AEAD context. This is responsible for allocating the 691 * fallback cipher and initialising the context. 692 */ 693static int spacc_aead_cra_init(struct crypto_aead *tfm) 694{ 695 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 696 struct aead_alg *alg = crypto_aead_alg(tfm); 697 struct spacc_aead *spacc_alg = to_spacc_aead(alg); 698 struct spacc_engine *engine = spacc_alg->engine; 699 700 ctx->generic.flags = spacc_alg->type; 701 ctx->generic.engine = engine; 702 ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, 703 CRYPTO_ALG_NEED_FALLBACK); 704 if (IS_ERR(ctx->sw_cipher)) 705 return PTR_ERR(ctx->sw_cipher); 706 ctx->generic.key_offs = spacc_alg->key_offs; 707 ctx->generic.iv_offs = spacc_alg->iv_offs; 708 709 crypto_aead_set_reqsize( 710 tfm, 711 max(sizeof(struct spacc_req), 712 sizeof(struct aead_request) + 713 crypto_aead_reqsize(ctx->sw_cipher))); 714 715 return 0; 716} 717 718/* 719 * Destructor for an AEAD context. This is called when the transform is freed 720 * and must free the fallback cipher. 721 */ 722static void spacc_aead_cra_exit(struct crypto_aead *tfm) 723{ 724 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 725 726 crypto_free_aead(ctx->sw_cipher); 727} 728 729/* 730 * Set the DES key for a block cipher transform. This also performs weak key 731 * checking if the transform has requested it. 732 */ 733static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key, 734 unsigned int len) 735{ 736 struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher); 737 int err; 738 739 err = verify_skcipher_des_key(cipher, key); 740 if (err) 741 return err; 742 743 memcpy(ctx->key, key, len); 744 ctx->key_len = len; 745 746 return 0; 747} 748 749/* 750 * Set the 3DES key for a block cipher transform. This also performs weak key 751 * checking if the transform has requested it. 752 */ 753static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key, 754 unsigned int len) 755{ 756 struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher); 757 int err; 758 759 err = verify_skcipher_des3_key(cipher, key); 760 if (err) 761 return err; 762 763 memcpy(ctx->key, key, len); 764 ctx->key_len = len; 765 766 return 0; 767} 768 769/* 770 * Set the key for an AES block cipher. Some key lengths are not supported in 771 * hardware so this must also check whether a fallback is needed. 772 */ 773static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 774 unsigned int len) 775{ 776 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 777 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 778 int err = 0; 779 780 if (len > AES_MAX_KEY_SIZE) 781 return -EINVAL; 782 783 /* 784 * IPSec engine only supports 128 and 256 bit AES keys. If we get a 785 * request for any other size (192 bits) then we need to do a software 786 * fallback. 787 */ 788 if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { 789 if (!ctx->sw_cipher) 790 return -EINVAL; 791 792 /* 793 * Set the fallback transform to use the same request flags as 794 * the hardware transform. 795 */ 796 crypto_skcipher_clear_flags(ctx->sw_cipher, 797 CRYPTO_TFM_REQ_MASK); 798 crypto_skcipher_set_flags(ctx->sw_cipher, 799 cipher->base.crt_flags & 800 CRYPTO_TFM_REQ_MASK); 801 802 err = crypto_skcipher_setkey(ctx->sw_cipher, key, len); 803 if (err) 804 goto sw_setkey_failed; 805 } 806 807 memcpy(ctx->key, key, len); 808 ctx->key_len = len; 809 810sw_setkey_failed: 811 return err; 812} 813 814static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher, 815 const u8 *key, unsigned int len) 816{ 817 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 818 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 819 int err = 0; 820 821 if (len > AES_MAX_KEY_SIZE) { 822 err = -EINVAL; 823 goto out; 824 } 825 826 memcpy(ctx->key, key, len); 827 ctx->key_len = len; 828 829out: 830 return err; 831} 832 833static int spacc_ablk_need_fallback(struct spacc_req *req) 834{ 835 struct skcipher_request *ablk_req = skcipher_request_cast(req->req); 836 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req); 837 struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm)); 838 struct spacc_ablk_ctx *ctx; 839 840 ctx = crypto_skcipher_ctx(tfm); 841 842 return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 843 SPA_CTRL_CIPH_ALG_AES && 844 ctx->key_len != AES_KEYSIZE_128 && 845 ctx->key_len != AES_KEYSIZE_256; 846} 847 848static void spacc_ablk_complete(struct spacc_req *req) 849{ 850 struct skcipher_request *ablk_req = skcipher_request_cast(req->req); 851 852 if (ablk_req->src != ablk_req->dst) { 853 spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, 854 ablk_req->cryptlen, DMA_TO_DEVICE); 855 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, 856 ablk_req->cryptlen, DMA_FROM_DEVICE); 857 } else 858 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, 859 ablk_req->cryptlen, DMA_BIDIRECTIONAL); 860 861 req->req->complete(req->req, req->result); 862} 863 864static int spacc_ablk_submit(struct spacc_req *req) 865{ 866 struct skcipher_request *ablk_req = skcipher_request_cast(req->req); 867 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req); 868 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 869 struct spacc_alg *spacc_alg = to_spacc_skcipher(alg); 870 struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm); 871 struct spacc_engine *engine = ctx->generic.engine; 872 u32 ctrl; 873 874 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, 875 ctx->key_len, ablk_req->iv, alg->ivsize, 876 NULL, 0); 877 878 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); 879 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); 880 writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); 881 882 writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET); 883 writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); 884 writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); 885 writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); 886 887 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | 888 (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : 889 (1 << SPA_CTRL_KEY_EXP)); 890 891 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); 892 893 writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); 894 895 return -EINPROGRESS; 896} 897 898static int spacc_ablk_do_fallback(struct skcipher_request *req, 899 unsigned alg_type, bool is_encrypt) 900{ 901 struct crypto_tfm *old_tfm = 902 crypto_skcipher_tfm(crypto_skcipher_reqtfm(req)); 903 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); 904 struct spacc_req *dev_req = skcipher_request_ctx(req); 905 int err; 906 907 /* 908 * Change the request to use the software fallback transform, and once 909 * the ciphering has completed, put the old transform back into the 910 * request. 911 */ 912 skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher); 913 skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags, 914 req->base.complete, req->base.data); 915 skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst, 916 req->cryptlen, req->iv); 917 err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) : 918 crypto_skcipher_decrypt(&dev_req->fallback_req); 919 920 return err; 921} 922 923static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type, 924 bool is_encrypt) 925{ 926 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 927 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 928 struct spacc_engine *engine = to_spacc_skcipher(alg)->engine; 929 struct spacc_req *dev_req = skcipher_request_ctx(req); 930 unsigned long flags; 931 int err = -ENOMEM; 932 933 dev_req->req = &req->base; 934 dev_req->is_encrypt = is_encrypt; 935 dev_req->engine = engine; 936 dev_req->complete = spacc_ablk_complete; 937 dev_req->result = -EINPROGRESS; 938 939 if (unlikely(spacc_ablk_need_fallback(dev_req))) 940 return spacc_ablk_do_fallback(req, alg_type, is_encrypt); 941 942 /* 943 * Create the DDT's for the engine. If we share the same source and 944 * destination then we can optimize by reusing the DDT's. 945 */ 946 if (req->src != req->dst) { 947 dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, 948 req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr); 949 if (!dev_req->src_ddt) 950 goto out; 951 952 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, 953 req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr); 954 if (!dev_req->dst_ddt) 955 goto out_free_src; 956 } else { 957 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, 958 req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr); 959 if (!dev_req->dst_ddt) 960 goto out; 961 962 dev_req->src_ddt = NULL; 963 dev_req->src_addr = dev_req->dst_addr; 964 } 965 966 err = -EINPROGRESS; 967 spin_lock_irqsave(&engine->hw_lock, flags); 968 /* 969 * Check if the engine will accept the operation now. If it won't then 970 * we either stick it on the end of a pending list if we can backlog, 971 * or bailout with an error if not. 972 */ 973 if (unlikely(spacc_fifo_cmd_full(engine)) || 974 engine->in_flight + 1 > engine->fifo_sz) { 975 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 976 err = -EBUSY; 977 spin_unlock_irqrestore(&engine->hw_lock, flags); 978 goto out_free_ddts; 979 } 980 list_add_tail(&dev_req->list, &engine->pending); 981 } else { 982 list_add_tail(&dev_req->list, &engine->pending); 983 spacc_push(engine); 984 } 985 spin_unlock_irqrestore(&engine->hw_lock, flags); 986 987 goto out; 988 989out_free_ddts: 990 spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, 991 req->cryptlen, req->src == req->dst ? 992 DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); 993out_free_src: 994 if (req->src != req->dst) 995 spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, 996 req->src, req->cryptlen, DMA_TO_DEVICE); 997out: 998 return err; 999} 1000 1001static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm) 1002{ 1003 struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm); 1004 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 1005 struct spacc_alg *spacc_alg = to_spacc_skcipher(alg); 1006 struct spacc_engine *engine = spacc_alg->engine; 1007 1008 ctx->generic.flags = spacc_alg->type; 1009 ctx->generic.engine = engine; 1010 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 1011 ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, 1012 CRYPTO_ALG_NEED_FALLBACK); 1013 if (IS_ERR(ctx->sw_cipher)) { 1014 dev_warn(engine->dev, "failed to allocate fallback for %s\n", 1015 alg->base.cra_name); 1016 return PTR_ERR(ctx->sw_cipher); 1017 } 1018 crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) + 1019 crypto_skcipher_reqsize(ctx->sw_cipher)); 1020 } else { 1021 /* take the size without the fallback skcipher_request at the end */ 1022 crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req, 1023 fallback_req)); 1024 } 1025 1026 ctx->generic.key_offs = spacc_alg->key_offs; 1027 ctx->generic.iv_offs = spacc_alg->iv_offs; 1028 1029 return 0; 1030} 1031 1032static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm) 1033{ 1034 struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm); 1035 1036 crypto_free_skcipher(ctx->sw_cipher); 1037} 1038 1039static int spacc_ablk_encrypt(struct skcipher_request *req) 1040{ 1041 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 1042 struct skcipher_alg *alg = crypto_skcipher_alg(cipher); 1043 struct spacc_alg *spacc_alg = to_spacc_skcipher(alg); 1044 1045 return spacc_ablk_setup(req, spacc_alg->type, 1); 1046} 1047 1048static int spacc_ablk_decrypt(struct skcipher_request *req) 1049{ 1050 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 1051 struct skcipher_alg *alg = crypto_skcipher_alg(cipher); 1052 struct spacc_alg *spacc_alg = to_spacc_skcipher(alg); 1053 1054 return spacc_ablk_setup(req, spacc_alg->type, 0); 1055} 1056 1057static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) 1058{ 1059 return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & 1060 SPA_FIFO_STAT_EMPTY; 1061} 1062 1063static void spacc_process_done(struct spacc_engine *engine) 1064{ 1065 struct spacc_req *req; 1066 unsigned long flags; 1067 1068 spin_lock_irqsave(&engine->hw_lock, flags); 1069 1070 while (!spacc_fifo_stat_empty(engine)) { 1071 req = list_first_entry(&engine->in_progress, struct spacc_req, 1072 list); 1073 list_move_tail(&req->list, &engine->completed); 1074 --engine->in_flight; 1075 1076 /* POP the status register. */ 1077 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); 1078 req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & 1079 SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; 1080 1081 /* 1082 * Convert the SPAcc error status into the standard POSIX error 1083 * codes. 1084 */ 1085 if (unlikely(req->result)) { 1086 switch (req->result) { 1087 case SPA_STATUS_ICV_FAIL: 1088 req->result = -EBADMSG; 1089 break; 1090 1091 case SPA_STATUS_MEMORY_ERROR: 1092 dev_warn(engine->dev, 1093 "memory error triggered\n"); 1094 req->result = -EFAULT; 1095 break; 1096 1097 case SPA_STATUS_BLOCK_ERROR: 1098 dev_warn(engine->dev, 1099 "block error triggered\n"); 1100 req->result = -EIO; 1101 break; 1102 } 1103 } 1104 } 1105 1106 tasklet_schedule(&engine->complete); 1107 1108 spin_unlock_irqrestore(&engine->hw_lock, flags); 1109} 1110 1111static irqreturn_t spacc_spacc_irq(int irq, void *dev) 1112{ 1113 struct spacc_engine *engine = (struct spacc_engine *)dev; 1114 u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); 1115 1116 writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); 1117 spacc_process_done(engine); 1118 1119 return IRQ_HANDLED; 1120} 1121 1122static void spacc_packet_timeout(struct timer_list *t) 1123{ 1124 struct spacc_engine *engine = from_timer(engine, t, packet_timeout); 1125 1126 spacc_process_done(engine); 1127} 1128 1129static int spacc_req_submit(struct spacc_req *req) 1130{ 1131 struct crypto_alg *alg = req->req->tfm->__crt_alg; 1132 1133 if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) 1134 return spacc_aead_submit(req); 1135 else 1136 return spacc_ablk_submit(req); 1137} 1138 1139static void spacc_spacc_complete(unsigned long data) 1140{ 1141 struct spacc_engine *engine = (struct spacc_engine *)data; 1142 struct spacc_req *req, *tmp; 1143 unsigned long flags; 1144 LIST_HEAD(completed); 1145 1146 spin_lock_irqsave(&engine->hw_lock, flags); 1147 1148 list_splice_init(&engine->completed, &completed); 1149 spacc_push(engine); 1150 if (engine->in_flight) 1151 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); 1152 1153 spin_unlock_irqrestore(&engine->hw_lock, flags); 1154 1155 list_for_each_entry_safe(req, tmp, &completed, list) { 1156 list_del(&req->list); 1157 req->complete(req); 1158 } 1159} 1160 1161#ifdef CONFIG_PM 1162static int spacc_suspend(struct device *dev) 1163{ 1164 struct spacc_engine *engine = dev_get_drvdata(dev); 1165 1166 /* 1167 * We only support standby mode. All we have to do is gate the clock to 1168 * the spacc. The hardware will preserve state until we turn it back 1169 * on again. 1170 */ 1171 clk_disable(engine->clk); 1172 1173 return 0; 1174} 1175 1176static int spacc_resume(struct device *dev) 1177{ 1178 struct spacc_engine *engine = dev_get_drvdata(dev); 1179 1180 return clk_enable(engine->clk); 1181} 1182 1183static const struct dev_pm_ops spacc_pm_ops = { 1184 .suspend = spacc_suspend, 1185 .resume = spacc_resume, 1186}; 1187#endif /* CONFIG_PM */ 1188 1189static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) 1190{ 1191 return dev ? dev_get_drvdata(dev) : NULL; 1192} 1193 1194static ssize_t spacc_stat_irq_thresh_show(struct device *dev, 1195 struct device_attribute *attr, 1196 char *buf) 1197{ 1198 struct spacc_engine *engine = spacc_dev_to_engine(dev); 1199 1200 return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); 1201} 1202 1203static ssize_t spacc_stat_irq_thresh_store(struct device *dev, 1204 struct device_attribute *attr, 1205 const char *buf, size_t len) 1206{ 1207 struct spacc_engine *engine = spacc_dev_to_engine(dev); 1208 unsigned long thresh; 1209 1210 if (kstrtoul(buf, 0, &thresh)) 1211 return -EINVAL; 1212 1213 thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); 1214 1215 engine->stat_irq_thresh = thresh; 1216 writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, 1217 engine->regs + SPA_IRQ_CTRL_REG_OFFSET); 1218 1219 return len; 1220} 1221static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, 1222 spacc_stat_irq_thresh_store); 1223 1224static struct spacc_alg ipsec_engine_algs[] = { 1225 { 1226 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, 1227 .key_offs = 0, 1228 .iv_offs = AES_MAX_KEY_SIZE, 1229 .alg = { 1230 .base.cra_name = "cbc(aes)", 1231 .base.cra_driver_name = "cbc-aes-picoxcell", 1232 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1233 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1234 CRYPTO_ALG_ASYNC | 1235 CRYPTO_ALG_ALLOCATES_MEMORY | 1236 CRYPTO_ALG_NEED_FALLBACK, 1237 .base.cra_blocksize = AES_BLOCK_SIZE, 1238 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1239 .base.cra_module = THIS_MODULE, 1240 1241 .setkey = spacc_aes_setkey, 1242 .encrypt = spacc_ablk_encrypt, 1243 .decrypt = spacc_ablk_decrypt, 1244 .min_keysize = AES_MIN_KEY_SIZE, 1245 .max_keysize = AES_MAX_KEY_SIZE, 1246 .ivsize = AES_BLOCK_SIZE, 1247 .init = spacc_ablk_init_tfm, 1248 .exit = spacc_ablk_exit_tfm, 1249 }, 1250 }, 1251 { 1252 .key_offs = 0, 1253 .iv_offs = AES_MAX_KEY_SIZE, 1254 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, 1255 .alg = { 1256 .base.cra_name = "ecb(aes)", 1257 .base.cra_driver_name = "ecb-aes-picoxcell", 1258 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1259 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1260 CRYPTO_ALG_ASYNC | 1261 CRYPTO_ALG_ALLOCATES_MEMORY | 1262 CRYPTO_ALG_NEED_FALLBACK, 1263 .base.cra_blocksize = AES_BLOCK_SIZE, 1264 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1265 .base.cra_module = THIS_MODULE, 1266 1267 .setkey = spacc_aes_setkey, 1268 .encrypt = spacc_ablk_encrypt, 1269 .decrypt = spacc_ablk_decrypt, 1270 .min_keysize = AES_MIN_KEY_SIZE, 1271 .max_keysize = AES_MAX_KEY_SIZE, 1272 .init = spacc_ablk_init_tfm, 1273 .exit = spacc_ablk_exit_tfm, 1274 }, 1275 }, 1276 { 1277 .key_offs = DES_BLOCK_SIZE, 1278 .iv_offs = 0, 1279 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, 1280 .alg = { 1281 .base.cra_name = "cbc(des)", 1282 .base.cra_driver_name = "cbc-des-picoxcell", 1283 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1284 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1285 CRYPTO_ALG_ASYNC | 1286 CRYPTO_ALG_ALLOCATES_MEMORY, 1287 .base.cra_blocksize = DES_BLOCK_SIZE, 1288 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1289 .base.cra_module = THIS_MODULE, 1290 1291 .setkey = spacc_des_setkey, 1292 .encrypt = spacc_ablk_encrypt, 1293 .decrypt = spacc_ablk_decrypt, 1294 .min_keysize = DES_KEY_SIZE, 1295 .max_keysize = DES_KEY_SIZE, 1296 .ivsize = DES_BLOCK_SIZE, 1297 .init = spacc_ablk_init_tfm, 1298 .exit = spacc_ablk_exit_tfm, 1299 }, 1300 }, 1301 { 1302 .key_offs = DES_BLOCK_SIZE, 1303 .iv_offs = 0, 1304 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, 1305 .alg = { 1306 .base.cra_name = "ecb(des)", 1307 .base.cra_driver_name = "ecb-des-picoxcell", 1308 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1309 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1310 CRYPTO_ALG_ASYNC | 1311 CRYPTO_ALG_ALLOCATES_MEMORY, 1312 .base.cra_blocksize = DES_BLOCK_SIZE, 1313 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1314 .base.cra_module = THIS_MODULE, 1315 1316 .setkey = spacc_des_setkey, 1317 .encrypt = spacc_ablk_encrypt, 1318 .decrypt = spacc_ablk_decrypt, 1319 .min_keysize = DES_KEY_SIZE, 1320 .max_keysize = DES_KEY_SIZE, 1321 .init = spacc_ablk_init_tfm, 1322 .exit = spacc_ablk_exit_tfm, 1323 }, 1324 }, 1325 { 1326 .key_offs = DES_BLOCK_SIZE, 1327 .iv_offs = 0, 1328 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, 1329 .alg = { 1330 .base.cra_name = "cbc(des3_ede)", 1331 .base.cra_driver_name = "cbc-des3-ede-picoxcell", 1332 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1333 .base.cra_flags = CRYPTO_ALG_ASYNC | 1334 CRYPTO_ALG_ALLOCATES_MEMORY | 1335 CRYPTO_ALG_KERN_DRIVER_ONLY, 1336 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1337 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1338 .base.cra_module = THIS_MODULE, 1339 1340 .setkey = spacc_des3_setkey, 1341 .encrypt = spacc_ablk_encrypt, 1342 .decrypt = spacc_ablk_decrypt, 1343 .min_keysize = DES3_EDE_KEY_SIZE, 1344 .max_keysize = DES3_EDE_KEY_SIZE, 1345 .ivsize = DES3_EDE_BLOCK_SIZE, 1346 .init = spacc_ablk_init_tfm, 1347 .exit = spacc_ablk_exit_tfm, 1348 }, 1349 }, 1350 { 1351 .key_offs = DES_BLOCK_SIZE, 1352 .iv_offs = 0, 1353 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, 1354 .alg = { 1355 .base.cra_name = "ecb(des3_ede)", 1356 .base.cra_driver_name = "ecb-des3-ede-picoxcell", 1357 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1358 .base.cra_flags = CRYPTO_ALG_ASYNC | 1359 CRYPTO_ALG_ALLOCATES_MEMORY | 1360 CRYPTO_ALG_KERN_DRIVER_ONLY, 1361 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1362 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1363 .base.cra_module = THIS_MODULE, 1364 1365 .setkey = spacc_des3_setkey, 1366 .encrypt = spacc_ablk_encrypt, 1367 .decrypt = spacc_ablk_decrypt, 1368 .min_keysize = DES3_EDE_KEY_SIZE, 1369 .max_keysize = DES3_EDE_KEY_SIZE, 1370 .init = spacc_ablk_init_tfm, 1371 .exit = spacc_ablk_exit_tfm, 1372 }, 1373 }, 1374}; 1375 1376static struct spacc_aead ipsec_engine_aeads[] = { 1377 { 1378 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | 1379 SPA_CTRL_CIPH_MODE_CBC | 1380 SPA_CTRL_HASH_ALG_SHA | 1381 SPA_CTRL_HASH_MODE_HMAC, 1382 .key_offs = 0, 1383 .iv_offs = AES_MAX_KEY_SIZE, 1384 .alg = { 1385 .base = { 1386 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1387 .cra_driver_name = "authenc-hmac-sha1-" 1388 "cbc-aes-picoxcell", 1389 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1390 .cra_flags = CRYPTO_ALG_ASYNC | 1391 CRYPTO_ALG_ALLOCATES_MEMORY | 1392 CRYPTO_ALG_NEED_FALLBACK | 1393 CRYPTO_ALG_KERN_DRIVER_ONLY, 1394 .cra_blocksize = AES_BLOCK_SIZE, 1395 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1396 .cra_module = THIS_MODULE, 1397 }, 1398 .setkey = spacc_aead_setkey, 1399 .setauthsize = spacc_aead_setauthsize, 1400 .encrypt = spacc_aead_encrypt, 1401 .decrypt = spacc_aead_decrypt, 1402 .ivsize = AES_BLOCK_SIZE, 1403 .maxauthsize = SHA1_DIGEST_SIZE, 1404 .init = spacc_aead_cra_init, 1405 .exit = spacc_aead_cra_exit, 1406 }, 1407 }, 1408 { 1409 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | 1410 SPA_CTRL_CIPH_MODE_CBC | 1411 SPA_CTRL_HASH_ALG_SHA256 | 1412 SPA_CTRL_HASH_MODE_HMAC, 1413 .key_offs = 0, 1414 .iv_offs = AES_MAX_KEY_SIZE, 1415 .alg = { 1416 .base = { 1417 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1418 .cra_driver_name = "authenc-hmac-sha256-" 1419 "cbc-aes-picoxcell", 1420 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1421 .cra_flags = CRYPTO_ALG_ASYNC | 1422 CRYPTO_ALG_ALLOCATES_MEMORY | 1423 CRYPTO_ALG_NEED_FALLBACK | 1424 CRYPTO_ALG_KERN_DRIVER_ONLY, 1425 .cra_blocksize = AES_BLOCK_SIZE, 1426 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1427 .cra_module = THIS_MODULE, 1428 }, 1429 .setkey = spacc_aead_setkey, 1430 .setauthsize = spacc_aead_setauthsize, 1431 .encrypt = spacc_aead_encrypt, 1432 .decrypt = spacc_aead_decrypt, 1433 .ivsize = AES_BLOCK_SIZE, 1434 .maxauthsize = SHA256_DIGEST_SIZE, 1435 .init = spacc_aead_cra_init, 1436 .exit = spacc_aead_cra_exit, 1437 }, 1438 }, 1439 { 1440 .key_offs = 0, 1441 .iv_offs = AES_MAX_KEY_SIZE, 1442 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | 1443 SPA_CTRL_CIPH_MODE_CBC | 1444 SPA_CTRL_HASH_ALG_MD5 | 1445 SPA_CTRL_HASH_MODE_HMAC, 1446 .alg = { 1447 .base = { 1448 .cra_name = "authenc(hmac(md5),cbc(aes))", 1449 .cra_driver_name = "authenc-hmac-md5-" 1450 "cbc-aes-picoxcell", 1451 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1452 .cra_flags = CRYPTO_ALG_ASYNC | 1453 CRYPTO_ALG_ALLOCATES_MEMORY | 1454 CRYPTO_ALG_NEED_FALLBACK | 1455 CRYPTO_ALG_KERN_DRIVER_ONLY, 1456 .cra_blocksize = AES_BLOCK_SIZE, 1457 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1458 .cra_module = THIS_MODULE, 1459 }, 1460 .setkey = spacc_aead_setkey, 1461 .setauthsize = spacc_aead_setauthsize, 1462 .encrypt = spacc_aead_encrypt, 1463 .decrypt = spacc_aead_decrypt, 1464 .ivsize = AES_BLOCK_SIZE, 1465 .maxauthsize = MD5_DIGEST_SIZE, 1466 .init = spacc_aead_cra_init, 1467 .exit = spacc_aead_cra_exit, 1468 }, 1469 }, 1470 { 1471 .key_offs = DES_BLOCK_SIZE, 1472 .iv_offs = 0, 1473 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | 1474 SPA_CTRL_CIPH_MODE_CBC | 1475 SPA_CTRL_HASH_ALG_SHA | 1476 SPA_CTRL_HASH_MODE_HMAC, 1477 .alg = { 1478 .base = { 1479 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1480 .cra_driver_name = "authenc-hmac-sha1-" 1481 "cbc-3des-picoxcell", 1482 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1483 .cra_flags = CRYPTO_ALG_ASYNC | 1484 CRYPTO_ALG_ALLOCATES_MEMORY | 1485 CRYPTO_ALG_NEED_FALLBACK | 1486 CRYPTO_ALG_KERN_DRIVER_ONLY, 1487 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1488 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1489 .cra_module = THIS_MODULE, 1490 }, 1491 .setkey = spacc_aead_setkey, 1492 .setauthsize = spacc_aead_setauthsize, 1493 .encrypt = spacc_aead_encrypt, 1494 .decrypt = spacc_aead_decrypt, 1495 .ivsize = DES3_EDE_BLOCK_SIZE, 1496 .maxauthsize = SHA1_DIGEST_SIZE, 1497 .init = spacc_aead_cra_init, 1498 .exit = spacc_aead_cra_exit, 1499 }, 1500 }, 1501 { 1502 .key_offs = DES_BLOCK_SIZE, 1503 .iv_offs = 0, 1504 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | 1505 SPA_CTRL_CIPH_MODE_CBC | 1506 SPA_CTRL_HASH_ALG_SHA256 | 1507 SPA_CTRL_HASH_MODE_HMAC, 1508 .alg = { 1509 .base = { 1510 .cra_name = "authenc(hmac(sha256)," 1511 "cbc(des3_ede))", 1512 .cra_driver_name = "authenc-hmac-sha256-" 1513 "cbc-3des-picoxcell", 1514 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1515 .cra_flags = CRYPTO_ALG_ASYNC | 1516 CRYPTO_ALG_ALLOCATES_MEMORY | 1517 CRYPTO_ALG_NEED_FALLBACK | 1518 CRYPTO_ALG_KERN_DRIVER_ONLY, 1519 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1520 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1521 .cra_module = THIS_MODULE, 1522 }, 1523 .setkey = spacc_aead_setkey, 1524 .setauthsize = spacc_aead_setauthsize, 1525 .encrypt = spacc_aead_encrypt, 1526 .decrypt = spacc_aead_decrypt, 1527 .ivsize = DES3_EDE_BLOCK_SIZE, 1528 .maxauthsize = SHA256_DIGEST_SIZE, 1529 .init = spacc_aead_cra_init, 1530 .exit = spacc_aead_cra_exit, 1531 }, 1532 }, 1533 { 1534 .key_offs = DES_BLOCK_SIZE, 1535 .iv_offs = 0, 1536 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | 1537 SPA_CTRL_CIPH_MODE_CBC | 1538 SPA_CTRL_HASH_ALG_MD5 | 1539 SPA_CTRL_HASH_MODE_HMAC, 1540 .alg = { 1541 .base = { 1542 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1543 .cra_driver_name = "authenc-hmac-md5-" 1544 "cbc-3des-picoxcell", 1545 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1546 .cra_flags = CRYPTO_ALG_ASYNC | 1547 CRYPTO_ALG_ALLOCATES_MEMORY | 1548 CRYPTO_ALG_NEED_FALLBACK | 1549 CRYPTO_ALG_KERN_DRIVER_ONLY, 1550 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1551 .cra_ctxsize = sizeof(struct spacc_aead_ctx), 1552 .cra_module = THIS_MODULE, 1553 }, 1554 .setkey = spacc_aead_setkey, 1555 .setauthsize = spacc_aead_setauthsize, 1556 .encrypt = spacc_aead_encrypt, 1557 .decrypt = spacc_aead_decrypt, 1558 .ivsize = DES3_EDE_BLOCK_SIZE, 1559 .maxauthsize = MD5_DIGEST_SIZE, 1560 .init = spacc_aead_cra_init, 1561 .exit = spacc_aead_cra_exit, 1562 }, 1563 }, 1564}; 1565 1566static struct spacc_alg l2_engine_algs[] = { 1567 { 1568 .key_offs = 0, 1569 .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, 1570 .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | 1571 SPA_CTRL_CIPH_MODE_F8, 1572 .alg = { 1573 .base.cra_name = "f8(kasumi)", 1574 .base.cra_driver_name = "f8-kasumi-picoxcell", 1575 .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, 1576 .base.cra_flags = CRYPTO_ALG_ASYNC | 1577 CRYPTO_ALG_ALLOCATES_MEMORY | 1578 CRYPTO_ALG_KERN_DRIVER_ONLY, 1579 .base.cra_blocksize = 8, 1580 .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), 1581 .base.cra_module = THIS_MODULE, 1582 1583 .setkey = spacc_kasumi_f8_setkey, 1584 .encrypt = spacc_ablk_encrypt, 1585 .decrypt = spacc_ablk_decrypt, 1586 .min_keysize = 16, 1587 .max_keysize = 16, 1588 .ivsize = 8, 1589 .init = spacc_ablk_init_tfm, 1590 .exit = spacc_ablk_exit_tfm, 1591 }, 1592 }, 1593}; 1594 1595#ifdef CONFIG_OF 1596static const struct of_device_id spacc_of_id_table[] = { 1597 { .compatible = "picochip,spacc-ipsec" }, 1598 { .compatible = "picochip,spacc-l2" }, 1599 {} 1600}; 1601MODULE_DEVICE_TABLE(of, spacc_of_id_table); 1602#endif /* CONFIG_OF */ 1603 1604static void spacc_tasklet_kill(void *data) 1605{ 1606 tasklet_kill(data); 1607} 1608 1609static int spacc_probe(struct platform_device *pdev) 1610{ 1611 int i, err, ret; 1612 struct resource *irq; 1613 struct device_node *np = pdev->dev.of_node; 1614 struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), 1615 GFP_KERNEL); 1616 if (!engine) 1617 return -ENOMEM; 1618 1619 if (of_device_is_compatible(np, "picochip,spacc-ipsec")) { 1620 engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS; 1621 engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; 1622 engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ; 1623 engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; 1624 engine->algs = ipsec_engine_algs; 1625 engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); 1626 engine->aeads = ipsec_engine_aeads; 1627 engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads); 1628 } else if (of_device_is_compatible(np, "picochip,spacc-l2")) { 1629 engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; 1630 engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; 1631 engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ; 1632 engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ; 1633 engine->algs = l2_engine_algs; 1634 engine->num_algs = ARRAY_SIZE(l2_engine_algs); 1635 } else { 1636 return -EINVAL; 1637 } 1638 1639 engine->name = dev_name(&pdev->dev); 1640 1641 engine->regs = devm_platform_ioremap_resource(pdev, 0); 1642 if (IS_ERR(engine->regs)) 1643 return PTR_ERR(engine->regs); 1644 1645 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1646 if (!irq) { 1647 dev_err(&pdev->dev, "no memory/irq resource for engine\n"); 1648 return -ENXIO; 1649 } 1650 1651 tasklet_init(&engine->complete, spacc_spacc_complete, 1652 (unsigned long)engine); 1653 1654 ret = devm_add_action(&pdev->dev, spacc_tasklet_kill, 1655 &engine->complete); 1656 if (ret) 1657 return ret; 1658 1659 if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, 1660 engine->name, engine)) { 1661 dev_err(engine->dev, "failed to request IRQ\n"); 1662 return -EBUSY; 1663 } 1664 1665 engine->dev = &pdev->dev; 1666 engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; 1667 engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; 1668 1669 engine->req_pool = dmam_pool_create(engine->name, engine->dev, 1670 MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); 1671 if (!engine->req_pool) 1672 return -ENOMEM; 1673 1674 spin_lock_init(&engine->hw_lock); 1675 1676 engine->clk = clk_get(&pdev->dev, "ref"); 1677 if (IS_ERR(engine->clk)) { 1678 dev_info(&pdev->dev, "clk unavailable\n"); 1679 return PTR_ERR(engine->clk); 1680 } 1681 1682 if (clk_prepare_enable(engine->clk)) { 1683 dev_info(&pdev->dev, "unable to prepare/enable clk\n"); 1684 ret = -EIO; 1685 goto err_clk_put; 1686 } 1687 1688 /* 1689 * Use an IRQ threshold of 50% as a default. This seems to be a 1690 * reasonable trade off of latency against throughput but can be 1691 * changed at runtime. 1692 */ 1693 engine->stat_irq_thresh = (engine->fifo_sz / 2); 1694 1695 ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1696 if (ret) 1697 goto err_clk_disable; 1698 1699 /* 1700 * Configure the interrupts. We only use the STAT_CNT interrupt as we 1701 * only submit a new packet for processing when we complete another in 1702 * the queue. This minimizes time spent in the interrupt handler. 1703 */ 1704 writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, 1705 engine->regs + SPA_IRQ_CTRL_REG_OFFSET); 1706 writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, 1707 engine->regs + SPA_IRQ_EN_REG_OFFSET); 1708 1709 timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0); 1710 1711 INIT_LIST_HEAD(&engine->pending); 1712 INIT_LIST_HEAD(&engine->completed); 1713 INIT_LIST_HEAD(&engine->in_progress); 1714 engine->in_flight = 0; 1715 1716 platform_set_drvdata(pdev, engine); 1717 1718 ret = -EINVAL; 1719 INIT_LIST_HEAD(&engine->registered_algs); 1720 for (i = 0; i < engine->num_algs; ++i) { 1721 engine->algs[i].engine = engine; 1722 err = crypto_register_skcipher(&engine->algs[i].alg); 1723 if (!err) { 1724 list_add_tail(&engine->algs[i].entry, 1725 &engine->registered_algs); 1726 ret = 0; 1727 } 1728 if (err) 1729 dev_err(engine->dev, "failed to register alg \"%s\"\n", 1730 engine->algs[i].alg.base.cra_name); 1731 else 1732 dev_dbg(engine->dev, "registered alg \"%s\"\n", 1733 engine->algs[i].alg.base.cra_name); 1734 } 1735 1736 INIT_LIST_HEAD(&engine->registered_aeads); 1737 for (i = 0; i < engine->num_aeads; ++i) { 1738 engine->aeads[i].engine = engine; 1739 err = crypto_register_aead(&engine->aeads[i].alg); 1740 if (!err) { 1741 list_add_tail(&engine->aeads[i].entry, 1742 &engine->registered_aeads); 1743 ret = 0; 1744 } 1745 if (err) 1746 dev_err(engine->dev, "failed to register alg \"%s\"\n", 1747 engine->aeads[i].alg.base.cra_name); 1748 else 1749 dev_dbg(engine->dev, "registered alg \"%s\"\n", 1750 engine->aeads[i].alg.base.cra_name); 1751 } 1752 1753 if (!ret) 1754 return 0; 1755 1756 del_timer_sync(&engine->packet_timeout); 1757 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1758err_clk_disable: 1759 clk_disable_unprepare(engine->clk); 1760err_clk_put: 1761 clk_put(engine->clk); 1762 1763 return ret; 1764} 1765 1766static int spacc_remove(struct platform_device *pdev) 1767{ 1768 struct spacc_aead *aead, *an; 1769 struct spacc_alg *alg, *next; 1770 struct spacc_engine *engine = platform_get_drvdata(pdev); 1771 1772 del_timer_sync(&engine->packet_timeout); 1773 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1774 1775 list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) { 1776 list_del(&aead->entry); 1777 crypto_unregister_aead(&aead->alg); 1778 } 1779 1780 list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { 1781 list_del(&alg->entry); 1782 crypto_unregister_skcipher(&alg->alg); 1783 } 1784 1785 clk_disable_unprepare(engine->clk); 1786 clk_put(engine->clk); 1787 1788 return 0; 1789} 1790 1791static struct platform_driver spacc_driver = { 1792 .probe = spacc_probe, 1793 .remove = spacc_remove, 1794 .driver = { 1795 .name = "picochip,spacc", 1796#ifdef CONFIG_PM 1797 .pm = &spacc_pm_ops, 1798#endif /* CONFIG_PM */ 1799 .of_match_table = of_match_ptr(spacc_of_id_table), 1800 }, 1801}; 1802 1803module_platform_driver(spacc_driver); 1804 1805MODULE_LICENSE("GPL"); 1806MODULE_AUTHOR("Jamie Iles"); 1807