Lines Matching refs:sdcp

173 	struct dcp *sdcp = global_sdcp;
177 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
178 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
181 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
185 reinit_completion(&sdcp->completion[chan]);
188 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
191 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
194 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
196 ret = wait_for_completion_timeout(&sdcp->completion[chan],
199 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
200 chan, readl(sdcp->base + MXS_DCP_STAT));
204 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
206 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
211 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
223 struct dcp *sdcp = global_sdcp;
224 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
228 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
230 ret = dma_mapping_error(sdcp->dev, key_phys);
234 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
236 ret = dma_mapping_error(sdcp->dev, src_phys);
240 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
242 ret = dma_mapping_error(sdcp->dev, dst_phys);
247 dev_err(sdcp->dev, "Invalid block size!\n");
282 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
284 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
286 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
294 struct dcp *sdcp = global_sdcp;
305 uint8_t *in_buf = sdcp->coh->aes_in_buf;
306 uint8_t *out_buf = sdcp->coh->aes_out_buf;
312 uint8_t *key = sdcp->coh->aes_key;
391 struct dcp *sdcp = global_sdcp;
402 spin_lock(&sdcp->lock[chan]);
403 backlog = crypto_get_backlog(&sdcp->queue[chan]);
404 arq = crypto_dequeue_request(&sdcp->queue[chan]);
405 spin_unlock(&sdcp->lock[chan]);
449 struct dcp *sdcp = global_sdcp;
462 spin_lock(&sdcp->lock[actx->chan]);
463 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
464 spin_unlock(&sdcp->lock[actx->chan]);
466 wake_up_process(sdcp->thread[actx->chan]);
546 struct dcp *sdcp = global_sdcp;
552 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
555 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
558 ret = dma_mapping_error(sdcp->dev, buf_phys);
585 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
592 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
594 ret = dma_mapping_error(sdcp->dev, digest_phys);
605 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
609 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
616 struct dcp *sdcp = global_sdcp;
624 uint8_t *in_buf = sdcp->coh->sha_in_buf;
625 uint8_t *out_buf = sdcp->coh->sha_out_buf;
688 struct dcp *sdcp = global_sdcp;
698 spin_lock(&sdcp->lock[chan]);
699 backlog = crypto_get_backlog(&sdcp->queue[chan]);
700 arq = crypto_dequeue_request(&sdcp->queue[chan]);
701 spin_unlock(&sdcp->lock[chan]);
751 struct dcp *sdcp = global_sdcp;
775 spin_lock(&sdcp->lock[actx->chan]);
776 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
777 spin_unlock(&sdcp->lock[actx->chan]);
779 wake_up_process(sdcp->thread[actx->chan]);
950 struct dcp *sdcp = context;
954 stat = readl(sdcp->base + MXS_DCP_STAT);
960 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
965 complete(&sdcp->completion[i]);
973 struct dcp *sdcp = NULL;
990 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
991 if (!sdcp)
994 sdcp->dev = dev;
995 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
996 if (IS_ERR(sdcp->base))
997 return PTR_ERR(sdcp->base);
1001 "dcp-vmi-irq", sdcp);
1008 "dcp-irq", sdcp);
1015 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1017 if (!sdcp->coh)
1021 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1024 sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1025 if (IS_ERR(sdcp->dcp_clk)) {
1026 if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1027 return PTR_ERR(sdcp->dcp_clk);
1028 sdcp->dcp_clk = NULL;
1030 ret = clk_prepare_enable(sdcp->dcp_clk);
1035 ret = stmp_reset_block(sdcp->base);
1044 sdcp->base + MXS_DCP_CTRL);
1048 sdcp->base + MXS_DCP_CHANNELCTRL);
1057 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1059 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1060 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1062 global_sdcp = sdcp;
1064 platform_set_drvdata(pdev, sdcp);
1067 spin_lock_init(&sdcp->lock[i]);
1068 init_completion(&sdcp->completion[i]);
1069 crypto_init_queue(&sdcp->queue[i], 50);
1073 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1075 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1077 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1081 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1083 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1085 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1090 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1092 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1102 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1111 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1123 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1127 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1131 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1134 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1137 clk_disable_unprepare(sdcp->dcp_clk);
1144 struct dcp *sdcp = platform_get_drvdata(pdev);
1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1149 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1152 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1155 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1156 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1158 clk_disable_unprepare(sdcp->dcp_clk);