Lines Matching refs:sdcp
174 struct dcp *sdcp = global_sdcp;
178 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
179 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
182 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
186 reinit_completion(&sdcp->completion[chan]);
189 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
192 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
195 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
197 ret = wait_for_completion_timeout(&sdcp->completion[chan],
200 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
201 chan, readl(sdcp->base + MXS_DCP_STAT));
205 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
207 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
212 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
224 struct dcp *sdcp = global_sdcp;
225 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
229 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
231 ret = dma_mapping_error(sdcp->dev, key_phys);
235 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
237 ret = dma_mapping_error(sdcp->dev, src_phys);
241 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
243 ret = dma_mapping_error(sdcp->dev, dst_phys);
248 dev_err(sdcp->dev, "Invalid block size!\n");
283 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
285 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
287 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
295 struct dcp *sdcp = global_sdcp;
306 uint8_t *in_buf = sdcp->coh->aes_in_buf;
307 uint8_t *out_buf = sdcp->coh->aes_out_buf;
313 uint8_t *key = sdcp->coh->aes_key;
392 struct dcp *sdcp = global_sdcp;
403 spin_lock(&sdcp->lock[chan]);
404 backlog = crypto_get_backlog(&sdcp->queue[chan]);
405 arq = crypto_dequeue_request(&sdcp->queue[chan]);
406 spin_unlock(&sdcp->lock[chan]);
450 struct dcp *sdcp = global_sdcp;
463 spin_lock(&sdcp->lock[actx->chan]);
464 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
465 spin_unlock(&sdcp->lock[actx->chan]);
467 wake_up_process(sdcp->thread[actx->chan]);
547 struct dcp *sdcp = global_sdcp;
553 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
556 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
559 ret = dma_mapping_error(sdcp->dev, buf_phys);
586 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
593 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
595 ret = dma_mapping_error(sdcp->dev, digest_phys);
606 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
610 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
617 struct dcp *sdcp = global_sdcp;
625 uint8_t *in_buf = sdcp->coh->sha_in_buf;
626 uint8_t *out_buf = sdcp->coh->sha_out_buf;
689 struct dcp *sdcp = global_sdcp;
699 spin_lock(&sdcp->lock[chan]);
700 backlog = crypto_get_backlog(&sdcp->queue[chan]);
701 arq = crypto_dequeue_request(&sdcp->queue[chan]);
702 spin_unlock(&sdcp->lock[chan]);
752 struct dcp *sdcp = global_sdcp;
776 spin_lock(&sdcp->lock[actx->chan]);
777 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
778 spin_unlock(&sdcp->lock[actx->chan]);
780 wake_up_process(sdcp->thread[actx->chan]);
951 struct dcp *sdcp = context;
955 stat = readl(sdcp->base + MXS_DCP_STAT);
961 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
966 complete(&sdcp->completion[i]);
974 struct dcp *sdcp = NULL;
991 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
992 if (!sdcp)
995 sdcp->dev = dev;
996 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
997 if (IS_ERR(sdcp->base))
998 return PTR_ERR(sdcp->base);
1002 "dcp-vmi-irq", sdcp);
1009 "dcp-irq", sdcp);
1016 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1018 if (!sdcp->coh)
1022 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1025 sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1026 if (IS_ERR(sdcp->dcp_clk))
1027 return PTR_ERR(sdcp->dcp_clk);
1030 ret = stmp_reset_block(sdcp->base);
1039 sdcp->base + MXS_DCP_CTRL);
1043 sdcp->base + MXS_DCP_CHANNELCTRL);
1052 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1054 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1055 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1057 global_sdcp = sdcp;
1059 platform_set_drvdata(pdev, sdcp);
1062 spin_lock_init(&sdcp->lock[i]);
1063 init_completion(&sdcp->completion[i]);
1064 crypto_init_queue(&sdcp->queue[i], 50);
1068 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1070 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1072 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1076 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1078 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1080 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1085 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1087 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1097 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1106 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1118 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1122 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1126 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1129 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1136 struct dcp *sdcp = platform_get_drvdata(pdev);
1138 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1141 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1144 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1147 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1148 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);