1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Driver for EIP97 cryptographic accelerator. 4 * 5 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 6 */ 7 8#include <linux/clk.h> 9#include <linux/init.h> 10#include <linux/kernel.h> 11#include <linux/module.h> 12#include <linux/mod_devicetable.h> 13#include <linux/platform_device.h> 14#include <linux/pm_runtime.h> 15#include "mtk-platform.h" 16 17#define MTK_BURST_SIZE_MSK GENMASK(7, 4) 18#define MTK_BURST_SIZE(x) ((x) << 4) 19#define MTK_DESC_SIZE(x) ((x) << 0) 20#define MTK_DESC_OFFSET(x) ((x) << 16) 21#define MTK_DESC_FETCH_SIZE(x) ((x) << 0) 22#define MTK_DESC_FETCH_THRESH(x) ((x) << 16) 23#define MTK_DESC_OVL_IRQ_EN BIT(25) 24#define MTK_DESC_ATP_PRESENT BIT(30) 25 26#define MTK_DFSE_IDLE GENMASK(3, 0) 27#define MTK_DFSE_THR_CTRL_EN BIT(30) 28#define MTK_DFSE_THR_CTRL_RESET BIT(31) 29#define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0)) 30#define MTK_DFSE_MIN_DATA(x) ((x) << 0) 31#define MTK_DFSE_MAX_DATA(x) ((x) << 8) 32#define MTK_DFE_MIN_CTRL(x) ((x) << 16) 33#define MTK_DFE_MAX_CTRL(x) ((x) << 24) 34 35#define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8) 36#define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12) 37#define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0) 38#define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4) 39#define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0)) 40#define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) 41#define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0)) 42#define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) 43#define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0)) 44 45#define MTK_PE_TK_LOC_AVL BIT(2) 46#define MTK_PE_PROC_HELD BIT(14) 47#define MTK_PE_TK_TIMEOUT_EN BIT(22) 48#define MTK_PE_INPUT_DMA_ERR BIT(0) 49#define MTK_PE_OUTPUT_DMA_ERR BIT(1) 50#define MTK_PE_PKT_PORC_ERR BIT(2) 51#define MTK_PE_PKT_TIMEOUT BIT(3) 52#define MTK_PE_FATAL_ERR BIT(14) 53#define MTK_PE_INPUT_DMA_ERR_EN BIT(16) 54#define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17) 55#define MTK_PE_PKT_PORC_ERR_EN BIT(18) 56#define MTK_PE_PKT_TIMEOUT_EN BIT(19) 57#define MTK_PE_FATAL_ERR_EN BIT(30) 58#define MTK_PE_INT_OUT_EN BIT(31) 59 60#define MTK_HIA_SIGNATURE ((u16)0x35ca) 61#define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0)) 62#define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0)) 63#define MTK_CDR_STAT_CLR GENMASK(4, 0) 64#define MTK_RDR_STAT_CLR GENMASK(7, 0) 65 66#define MTK_AIC_INT_MSK GENMASK(5, 0) 67#define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20)) 68#define MTK_AIC_VER11 0x011036c9 69#define MTK_AIC_VER12 0x012036c9 70#define MTK_AIC_G_CLR GENMASK(30, 20) 71 72/** 73 * EIP97 is an integrated security subsystem to accelerate cryptographic 74 * functions and protocols to offload the host processor. 75 * Some important hardware modules are briefly introduced below: 76 * 77 * Host Interface Adapter(HIA) - the main interface between the host 78 * system and the hardware subsystem. It is responsible for attaching 79 * processing engine to the specific host bus interface and provides a 80 * standardized software view for off loading tasks to the engine. 81 * 82 * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many 83 * CD the host has prepared in the CDR. It monitors the fill level of its 84 * CD-FIFO and if there's sufficient space for the next block of descriptors, 85 * then it fires off a DMA request to fetch a block of CDs. 86 * 87 * Data fetch engine(DFE) - It is responsible for parsing the CD and 88 * setting up the required control and packet data DMA transfers from 89 * system memory to the processing engine. 90 * 91 * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager, 92 * but target is result descriptors, Moreover, it also handles the RD 93 * updates under control of the DSE. For each packet data segment 94 * processed, the DSE triggers the RDR Manager to write the updated RD. 95 * If triggered to update, the RDR Manager sets up a DMA operation to 96 * copy the RD from the DSE to the correct location in the RDR. 97 * 98 * Data Store Engine(DSE) - It is responsible for parsing the prepared RD 99 * and setting up the required control and packet data DMA transfers from 100 * the processing engine to system memory. 101 * 102 * Advanced Interrupt Controllers(AICs) - receive interrupt request signals 103 * from various sources and combine them into one interrupt output. 104 * The AICs are used by: 105 * - One for the HIA global and processing engine interrupts. 106 * - The others for the descriptor ring interrupts. 107 */ 108 109/* Cryptographic engine capabilities */ 110struct mtk_sys_cap { 111 /* host interface adapter */ 112 u32 hia_ver; 113 u32 hia_opt; 114 /* packet engine */ 115 u32 pkt_eng_opt; 116 /* global hardware */ 117 u32 hw_opt; 118}; 119 120static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask) 121{ 122 /* Assign rings to DFE/DSE thread and enable it */ 123 writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL); 124 writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL); 125} 126 127static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp, 128 struct mtk_sys_cap *cap) 129{ 130 u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2; 131 u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1; 132 u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len); 133 u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len); 134 u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len); 135 136 writel(MTK_DFSE_MIN_DATA(ipbuf - 1) | 137 MTK_DFSE_MAX_DATA(ipbuf) | 138 MTK_DFE_MIN_CTRL(itbuf - 1) | 139 MTK_DFE_MAX_CTRL(itbuf), 140 cryp->base + DFE_CFG); 141 142 writel(MTK_DFSE_MIN_DATA(opbuf - 1) | 143 MTK_DFSE_MAX_DATA(opbuf), 144 cryp->base + DSE_CFG); 145 146 writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) | 147 MTK_IN_BUF_MAX_THRESH(ipbuf), 148 cryp->base + PE_IN_DBUF_THRESH); 149 150 writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) | 151 MTK_IN_BUF_MAX_THRESH(itbuf), 152 cryp->base + PE_IN_TBUF_THRESH); 153 154 writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) | 155 MTK_OUT_BUF_MAX_THRESH(opbuf), 156 cryp->base + PE_OUT_DBUF_THRESH); 157 158 writel(0, cryp->base + PE_OUT_TBUF_THRESH); 159 writel(0, cryp->base + PE_OUT_BUF_CTRL); 160} 161 162static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp) 163{ 164 int ret = -EINVAL; 165 u32 val; 166 167 /* Check for completion of all DMA transfers */ 168 val = readl(cryp->base + DFE_THR_STAT); 169 if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) { 170 val = readl(cryp->base + DSE_THR_STAT); 171 if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) 172 ret = 0; 173 } 174 175 if (!ret) { 176 /* Take DFE/DSE thread out of reset */ 177 writel(0, cryp->base + DFE_THR_CTRL); 178 writel(0, cryp->base + DSE_THR_CTRL); 179 } else { 180 return -EBUSY; 181 } 182 183 return 0; 184} 185 186static int mtk_dfe_dse_reset(struct mtk_cryp *cryp) 187{ 188 /* Reset DSE/DFE and correct system priorities for all rings. */ 189 writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL); 190 writel(0, cryp->base + DFE_PRIO_0); 191 writel(0, cryp->base + DFE_PRIO_1); 192 writel(0, cryp->base + DFE_PRIO_2); 193 writel(0, cryp->base + DFE_PRIO_3); 194 195 writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL); 196 writel(0, cryp->base + DSE_PRIO_0); 197 writel(0, cryp->base + DSE_PRIO_1); 198 writel(0, cryp->base + DSE_PRIO_2); 199 writel(0, cryp->base + DSE_PRIO_3); 200 201 return mtk_dfe_dse_state_check(cryp); 202} 203 204static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp, 205 int i, struct mtk_sys_cap *cap) 206{ 207 /* Full descriptor that fits FIFO minus one */ 208 u32 count = 209 ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1; 210 211 /* Temporarily disable external triggering */ 212 writel(0, cryp->base + CDR_CFG(i)); 213 214 /* Clear CDR count */ 215 writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i)); 216 writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i)); 217 218 writel(0, cryp->base + CDR_PREP_PNTR(i)); 219 writel(0, cryp->base + CDR_PROC_PNTR(i)); 220 writel(0, cryp->base + CDR_DMA_CFG(i)); 221 222 /* Configure CDR host address space */ 223 writel(0, cryp->base + CDR_BASE_ADDR_HI(i)); 224 writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i)); 225 226 writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i)); 227 228 /* Clear and disable all CDR interrupts */ 229 writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i)); 230 231 /* 232 * Set command descriptor offset and enable additional 233 * token present in descriptor. 234 */ 235 writel(MTK_DESC_SIZE(MTK_DESC_SZ) | 236 MTK_DESC_OFFSET(MTK_DESC_OFF) | 237 MTK_DESC_ATP_PRESENT, 238 cryp->base + CDR_DESC_SIZE(i)); 239 240 writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | 241 MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ), 242 cryp->base + CDR_CFG(i)); 243} 244 245static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp, 246 int i, struct mtk_sys_cap *cap) 247{ 248 u32 rndup = 2; 249 u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1; 250 251 /* Temporarily disable external triggering */ 252 writel(0, cryp->base + RDR_CFG(i)); 253 254 /* Clear RDR count */ 255 writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i)); 256 writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i)); 257 258 writel(0, cryp->base + RDR_PREP_PNTR(i)); 259 writel(0, cryp->base + RDR_PROC_PNTR(i)); 260 writel(0, cryp->base + RDR_DMA_CFG(i)); 261 262 /* Configure RDR host address space */ 263 writel(0, cryp->base + RDR_BASE_ADDR_HI(i)); 264 writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i)); 265 266 writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i)); 267 writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i)); 268 269 /* 270 * RDR manager generates update interrupts on a per-completed-packet, 271 * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count 272 * for the RDR exceeds the number of packets. 273 */ 274 writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE, 275 cryp->base + RDR_THRESH(i)); 276 277 /* 278 * Configure a threshold and time-out value for the processed 279 * result descriptors (or complete packets) that are written to 280 * the RDR. 281 */ 282 writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF), 283 cryp->base + RDR_DESC_SIZE(i)); 284 285 /* 286 * Configure HIA fetch size and fetch threshold that are used to 287 * fetch blocks of multiple descriptors. 288 */ 289 writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | 290 MTK_DESC_FETCH_THRESH(count * rndup) | 291 MTK_DESC_OVL_IRQ_EN, 292 cryp->base + RDR_CFG(i)); 293} 294 295static int mtk_packet_engine_setup(struct mtk_cryp *cryp) 296{ 297 struct mtk_sys_cap cap; 298 int i, err; 299 u32 val; 300 301 cap.hia_ver = readl(cryp->base + HIA_VERSION); 302 cap.hia_opt = readl(cryp->base + HIA_OPTIONS); 303 cap.hw_opt = readl(cryp->base + EIP97_OPTIONS); 304 305 if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE)) 306 return -EINVAL; 307 308 /* Configure endianness conversion method for master (DMA) interface */ 309 writel(0, cryp->base + EIP97_MST_CTRL); 310 311 /* Set HIA burst size */ 312 val = readl(cryp->base + HIA_MST_CTRL); 313 val &= ~MTK_BURST_SIZE_MSK; 314 val |= MTK_BURST_SIZE(5); 315 writel(val, cryp->base + HIA_MST_CTRL); 316 317 err = mtk_dfe_dse_reset(cryp); 318 if (err) { 319 dev_err(cryp->dev, "Failed to reset DFE and DSE.\n"); 320 return err; 321 } 322 323 mtk_dfe_dse_buf_setup(cryp, &cap); 324 325 /* Enable the 4 rings for the packet engines. */ 326 mtk_desc_ring_link(cryp, 0xf); 327 328 for (i = 0; i < MTK_RING_MAX; i++) { 329 mtk_cmd_desc_ring_setup(cryp, i, &cap); 330 mtk_res_desc_ring_setup(cryp, i, &cap); 331 } 332 333 writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN, 334 cryp->base + PE_TOKEN_CTRL_STAT); 335 336 /* Clear all pending interrupts */ 337 writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK); 338 writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR | 339 MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT | 340 MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN | 341 MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN | 342 MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN | 343 MTK_PE_INT_OUT_EN, 344 cryp->base + PE_INTERRUPT_CTRL_STAT); 345 346 return 0; 347} 348 349static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw) 350{ 351 u32 val; 352 353 if (hw == MTK_RING_MAX) 354 val = readl(cryp->base + AIC_G_VERSION); 355 else 356 val = readl(cryp->base + AIC_VERSION(hw)); 357 358 val &= MTK_AIC_VER_MSK; 359 if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12) 360 return -ENXIO; 361 362 if (hw == MTK_RING_MAX) 363 val = readl(cryp->base + AIC_G_OPTIONS); 364 else 365 val = readl(cryp->base + AIC_OPTIONS(hw)); 366 367 val &= MTK_AIC_INT_MSK; 368 if (!val || val > 32) 369 return -ENXIO; 370 371 return 0; 372} 373 374static int mtk_aic_init(struct mtk_cryp *cryp, int hw) 375{ 376 int err; 377 378 err = mtk_aic_cap_check(cryp, hw); 379 if (err) 380 return err; 381 382 /* Disable all interrupts and set initial configuration */ 383 if (hw == MTK_RING_MAX) { 384 writel(0, cryp->base + AIC_G_ENABLE_CTRL); 385 writel(0, cryp->base + AIC_G_POL_CTRL); 386 writel(0, cryp->base + AIC_G_TYPE_CTRL); 387 writel(0, cryp->base + AIC_G_ENABLE_SET); 388 } else { 389 writel(0, cryp->base + AIC_ENABLE_CTRL(hw)); 390 writel(0, cryp->base + AIC_POL_CTRL(hw)); 391 writel(0, cryp->base + AIC_TYPE_CTRL(hw)); 392 writel(0, cryp->base + AIC_ENABLE_SET(hw)); 393 } 394 395 return 0; 396} 397 398static int mtk_accelerator_init(struct mtk_cryp *cryp) 399{ 400 int i, err; 401 402 /* Initialize advanced interrupt controller(AIC) */ 403 for (i = 0; i < MTK_IRQ_NUM; i++) { 404 err = mtk_aic_init(cryp, i); 405 if (err) { 406 dev_err(cryp->dev, "Failed to initialize AIC.\n"); 407 return err; 408 } 409 } 410 411 /* Initialize packet engine */ 412 err = mtk_packet_engine_setup(cryp); 413 if (err) { 414 dev_err(cryp->dev, "Failed to configure packet engine.\n"); 415 return err; 416 } 417 418 return 0; 419} 420 421static void mtk_desc_dma_free(struct mtk_cryp *cryp) 422{ 423 int i; 424 425 for (i = 0; i < MTK_RING_MAX; i++) { 426 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 427 cryp->ring[i]->res_base, 428 cryp->ring[i]->res_dma); 429 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 430 cryp->ring[i]->cmd_base, 431 cryp->ring[i]->cmd_dma); 432 kfree(cryp->ring[i]); 433 } 434} 435 436static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) 437{ 438 struct mtk_ring **ring = cryp->ring; 439 int i; 440 441 for (i = 0; i < MTK_RING_MAX; i++) { 442 ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); 443 if (!ring[i]) 444 goto err_cleanup; 445 446 ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, 447 MTK_DESC_RING_SZ, 448 &ring[i]->cmd_dma, 449 GFP_KERNEL); 450 if (!ring[i]->cmd_base) 451 goto err_cleanup; 452 453 ring[i]->res_base = dma_alloc_coherent(cryp->dev, 454 MTK_DESC_RING_SZ, 455 &ring[i]->res_dma, 456 GFP_KERNEL); 457 if (!ring[i]->res_base) 458 goto err_cleanup; 459 460 ring[i]->cmd_next = ring[i]->cmd_base; 461 ring[i]->res_next = ring[i]->res_base; 462 } 463 return 0; 464 465err_cleanup: 466 do { 467 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 468 ring[i]->res_base, ring[i]->res_dma); 469 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 470 ring[i]->cmd_base, ring[i]->cmd_dma); 471 kfree(ring[i]); 472 } while (i--); 473 return -ENOMEM; 474} 475 476static int mtk_crypto_probe(struct platform_device *pdev) 477{ 478 struct mtk_cryp *cryp; 479 int i, err; 480 481 cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); 482 if (!cryp) 483 return -ENOMEM; 484 485 cryp->base = devm_platform_ioremap_resource(pdev, 0); 486 if (IS_ERR(cryp->base)) 487 return PTR_ERR(cryp->base); 488 489 for (i = 0; i < MTK_IRQ_NUM; i++) { 490 cryp->irq[i] = platform_get_irq(pdev, i); 491 if (cryp->irq[i] < 0) 492 return cryp->irq[i]; 493 } 494 495 cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); 496 if (IS_ERR(cryp->clk_cryp)) 497 return -EPROBE_DEFER; 498 499 cryp->dev = &pdev->dev; 500 pm_runtime_enable(cryp->dev); 501 pm_runtime_get_sync(cryp->dev); 502 503 err = clk_prepare_enable(cryp->clk_cryp); 504 if (err) 505 goto err_clk_cryp; 506 507 /* Allocate four command/result descriptor rings */ 508 err = mtk_desc_ring_alloc(cryp); 509 if (err) { 510 dev_err(cryp->dev, "Unable to allocate descriptor rings.\n"); 511 goto err_resource; 512 } 513 514 /* Initialize hardware modules */ 515 err = mtk_accelerator_init(cryp); 516 if (err) { 517 dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n"); 518 goto err_engine; 519 } 520 521 err = mtk_cipher_alg_register(cryp); 522 if (err) { 523 dev_err(cryp->dev, "Unable to register cipher algorithm.\n"); 524 goto err_cipher; 525 } 526 527 err = mtk_hash_alg_register(cryp); 528 if (err) { 529 dev_err(cryp->dev, "Unable to register hash algorithm.\n"); 530 goto err_hash; 531 } 532 533 platform_set_drvdata(pdev, cryp); 534 return 0; 535 536err_hash: 537 mtk_cipher_alg_release(cryp); 538err_cipher: 539 mtk_dfe_dse_reset(cryp); 540err_engine: 541 mtk_desc_dma_free(cryp); 542err_resource: 543 clk_disable_unprepare(cryp->clk_cryp); 544err_clk_cryp: 545 pm_runtime_put_sync(cryp->dev); 546 pm_runtime_disable(cryp->dev); 547 548 return err; 549} 550 551static int mtk_crypto_remove(struct platform_device *pdev) 552{ 553 struct mtk_cryp *cryp = platform_get_drvdata(pdev); 554 555 mtk_hash_alg_release(cryp); 556 mtk_cipher_alg_release(cryp); 557 mtk_desc_dma_free(cryp); 558 559 clk_disable_unprepare(cryp->clk_cryp); 560 561 pm_runtime_put_sync(cryp->dev); 562 pm_runtime_disable(cryp->dev); 563 platform_set_drvdata(pdev, NULL); 564 565 return 0; 566} 567 568static const struct of_device_id of_crypto_id[] = { 569 { .compatible = "mediatek,eip97-crypto" }, 570 {}, 571}; 572MODULE_DEVICE_TABLE(of, of_crypto_id); 573 574static struct platform_driver mtk_crypto_driver = { 575 .probe = mtk_crypto_probe, 576 .remove = mtk_crypto_remove, 577 .driver = { 578 .name = "mtk-crypto", 579 .of_match_table = of_crypto_id, 580 }, 581}; 582module_platform_driver(mtk_crypto_driver); 583 584MODULE_LICENSE("GPL"); 585MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>"); 586MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97"); 587