Lines Matching defs:dev
145 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
146 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
147 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
149 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
150 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
151 SSS_AES_REG(dev, reg))
211 * HASH bit numbers, used by device, setting in dev->hash_flags with
253 struct s5p_aes_dev *dev;
262 * @dev: Associated device
301 struct device *dev;
427 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
428 return of_device_get_match_data(&pdev->dev);
436 static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
439 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
440 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
443 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
446 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
447 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
450 static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
457 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
477 static void s5p_sg_done(struct s5p_aes_dev *dev)
479 struct skcipher_request *req = dev->req;
482 if (dev->sg_dst_cpy) {
483 dev_dbg(dev->dev,
485 dev->req->cryptlen);
486 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
487 dev->req->cryptlen, 1);
489 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
490 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
492 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
495 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
498 /* Calls the completion. Cannot be called with dev->lock hold. */
504 static void s5p_unset_outdata(struct s5p_aes_dev *dev)
506 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
509 static void s5p_unset_indata(struct s5p_aes_dev *dev)
511 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
514 static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
524 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
532 s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
540 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
545 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
548 dev->sg_dst = sg;
553 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
558 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
561 dev->sg_src = sg;
573 static int s5p_aes_tx(struct s5p_aes_dev *dev)
577 s5p_unset_outdata(dev);
579 if (!sg_is_last(dev->sg_dst)) {
580 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
595 static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
599 s5p_unset_indata(dev);
601 if (!sg_is_last(dev->sg_src)) {
602 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
623 * @dev: device
626 static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
629 dev->hash_sg_cnt--;
630 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
631 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
636 * @dev: device
643 static int s5p_hash_rx(struct s5p_aes_dev *dev)
645 if (dev->hash_sg_cnt > 0) {
646 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
650 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
651 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
660 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
671 spin_lock_irqsave(&dev->lock, flags);
683 status = SSS_READ(dev, FCINTSTAT);
685 err_dma_rx = s5p_aes_rx(dev);
688 if (sg_is_last(dev->sg_dst))
690 err_dma_tx = s5p_aes_tx(dev);
694 err_dma_hx = s5p_hash_rx(dev);
699 SSS_WRITE(dev, FCINTPEND, st_bits);
710 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
711 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
727 s5p_sg_done(dev);
729 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
731 spin_unlock_irqrestore(&dev->lock, flags);
733 s5p_aes_complete(dev->req, 0);
735 tasklet_schedule(&dev->tasklet);
744 s5p_set_dma_outdata(dev, dev->sg_dst);
746 s5p_set_dma_indata(dev, dev->sg_src);
748 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
750 spin_unlock_irqrestore(&dev->lock, flags);
756 s5p_sg_done(dev);
757 dev->busy = false;
758 req = dev->req;
760 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
762 spin_unlock_irqrestore(&dev->lock, flags);
772 tasklet_schedule(&dev->hash_tasklet);
774 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
837 * @dev: secss device
839 static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
841 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
846 * @dev: secss device
850 static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
852 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
857 * @dev: secss device
860 static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
862 SSS_WRITE(dev, FCINTENCLR, flags);
867 * @dev: secss device
870 static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
872 SSS_WRITE(dev, FCINTENSET, flags);
877 * @dev: secss device
880 static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
885 spin_lock_irqsave(&dev->lock, flags);
887 flow = SSS_READ(dev, FCFIFOCTRL);
890 SSS_WRITE(dev, FCFIFOCTRL, flow);
892 spin_unlock_irqrestore(&dev->lock, flags);
897 * @dev: secss device
903 static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
905 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
908 s5p_hash_dma_flush(dev);
910 s5p_hash_dma_enable(dev);
911 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
912 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
987 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
989 dev_err(dd->dev, "dma_map_sg error\n");
1033 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1301 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1317 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1409 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1609 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1808 static void s5p_set_aes(struct s5p_aes_dev *dev,
1815 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1819 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1823 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1825 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1827 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1843 static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1849 dev->sg_src_cpy = NULL;
1852 dev_dbg(dev->dev,
1854 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1858 sg = dev->sg_src_cpy;
1861 err = s5p_set_indata(dev, sg);
1863 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1870 static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1876 dev->sg_dst_cpy = NULL;
1879 dev_dbg(dev->dev,
1881 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1885 sg = dev->sg_dst_cpy;
1888 err = s5p_set_outdata(dev, sg);
1890 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1897 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1899 struct skcipher_request *req = dev->req;
1923 if (dev->ctx->keylen == AES_KEYSIZE_192)
1925 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1937 spin_lock_irqsave(&dev->lock, flags);
1939 SSS_WRITE(dev, FCINTENCLR,
1941 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1943 err = s5p_set_indata_start(dev, req);
1947 err = s5p_set_outdata_start(dev, req);
1951 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1952 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1954 s5p_set_dma_indata(dev, dev->sg_src);
1955 s5p_set_dma_outdata(dev, dev->sg_dst);
1957 SSS_WRITE(dev, FCINTENSET,
1960 spin_unlock_irqrestore(&dev->lock, flags);
1965 s5p_unset_indata(dev);
1968 s5p_sg_done(dev);
1969 dev->busy = false;
1970 spin_unlock_irqrestore(&dev->lock, flags);
1976 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
1981 spin_lock_irqsave(&dev->lock, flags);
1982 backlog = crypto_get_backlog(&dev->queue);
1983 async_req = crypto_dequeue_request(&dev->queue);
1986 dev->busy = false;
1987 spin_unlock_irqrestore(&dev->lock, flags);
1990 spin_unlock_irqrestore(&dev->lock, flags);
1995 dev->req = skcipher_request_cast(async_req);
1996 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
1997 reqctx = skcipher_request_ctx(dev->req);
1999 s5p_aes_crypt_start(dev, reqctx->mode);
2002 static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2008 spin_lock_irqsave(&dev->lock, flags);
2009 err = crypto_enqueue_request(&dev->queue, &req->base);
2010 if (dev->busy) {
2011 spin_unlock_irqrestore(&dev->lock, flags);
2014 dev->busy = true;
2016 spin_unlock_irqrestore(&dev->lock, flags);
2018 tasklet_schedule(&dev->tasklet);
2028 struct s5p_aes_dev *dev = ctx->dev;
2035 dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
2041 return s5p_aes_handle_req(dev, req);
2090 ctx->dev = s5p_dev;
2157 struct device *dev = &pdev->dev;
2167 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2190 pdata->ioaddr = devm_ioremap_resource(dev, res);
2197 pdata->ioaddr = devm_ioremap_resource(dev, res);
2202 pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
2204 return dev_err_probe(dev, PTR_ERR(pdata->clk),
2210 dev_err(dev, "Enabling clock %s failed, err %d\n",
2216 pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2218 err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
2226 dev_err(dev, "Enabling clock %s failed, err %d\n",
2243 dev_warn(dev, "feed control interrupt is not available.\n");
2246 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2250 dev_warn(dev, "feed control interrupt is not available.\n");
2255 pdata->dev = dev;
2280 dev_err(dev, "can't register '%s': %d\n",
2287 dev_info(dev, "s5p-sss driver registered\n");
2300 dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,