Lines Matching defs:hdev

134 	struct stm32_hash_dev	*hdev;
155 struct stm32_hash_dev *hdev;
224 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
226 return readl_relaxed(hdev->io_base + offset);
229 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
232 writel_relaxed(value, hdev->io_base + offset);
235 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
240 if (!hdev->pdata->has_sr)
241 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
244 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
248 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
252 reg = stm32_hash_read(hdev, HASH_STR);
255 stm32_hash_write(hdev, HASH_STR, reg);
258 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
260 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
267 stm32_hash_set_nblw(hdev, keylen);
270 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
275 reg = stm32_hash_read(hdev, HASH_STR);
277 stm32_hash_write(hdev, HASH_STR, reg);
285 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
287 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
288 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
295 if (!(hdev->flags & HASH_FLAGS_INIT)) {
296 if (hdev->pdata->ux500) {
299 if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
303 reg |= alg << hdev->pdata->alg_shift;
309 hdev->flags |= HASH_FLAGS_HMAC;
315 if (!hdev->polled)
316 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
318 stm32_hash_write(hdev, HASH_CR, reg);
320 hdev->flags |= HASH_FLAGS_INIT;
328 dev_dbg(hdev->dev, "Write Control %x\n", reg);
367 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
370 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
377 hdev->flags |= HASH_FLAGS_FINAL;
380 if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
381 hdev->pdata->broken_emptymsg) {
389 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
392 hdev->flags |= HASH_FLAGS_CPU;
394 stm32_hash_write_ctrl(hdev);
396 if (stm32_hash_wait_busy(hdev))
399 if ((hdev->flags & HASH_FLAGS_HMAC) &&
400 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
401 hdev->flags |= HASH_FLAGS_HMAC_KEY;
402 stm32_hash_write_key(hdev);
403 if (stm32_hash_wait_busy(hdev))
408 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
411 if (stm32_hash_wait_busy(hdev))
414 stm32_hash_set_nblw(hdev, length);
415 reg = stm32_hash_read(hdev, HASH_STR);
417 stm32_hash_write(hdev, HASH_STR, reg);
418 if (hdev->flags & HASH_FLAGS_HMAC) {
419 if (stm32_hash_wait_busy(hdev))
421 stm32_hash_write_key(hdev);
468 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
470 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
476 dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
485 err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
495 return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
498 if (!(hdev->flags & HASH_FLAGS_INIT))
501 if (stm32_hash_wait_busy(hdev))
506 if (!hdev->pdata->ux500)
507 *preg++ = stm32_hash_read(hdev, HASH_IMR);
508 *preg++ = stm32_hash_read(hdev, HASH_STR);
509 *preg++ = stm32_hash_read(hdev, HASH_CR);
511 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
518 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
526 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
530 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
534 reinit_completion(&hdev->dma_completion);
536 in_desc->callback_param = hdev;
538 hdev->flags |= HASH_FLAGS_FINAL;
539 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
541 reg = stm32_hash_read(hdev, HASH_CR);
543 if (hdev->pdata->has_mdmat) {
551 stm32_hash_write(hdev, HASH_CR, reg);
553 stm32_hash_set_nblw(hdev, length);
560 dma_async_issue_pending(hdev->dma_lch);
562 if (!wait_for_completion_timeout(&hdev->dma_completion,
566 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
571 dev_err(hdev->dev, "DMA Error %i\n", err);
572 dmaengine_terminate_all(hdev->dma_lch);
581 struct stm32_hash_dev *hdev = param;
583 complete(&hdev->dma_completion);
586 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
588 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
589 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
593 if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) {
594 err = stm32_hash_write_key(hdev);
595 if (stm32_hash_wait_busy(hdev))
598 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
602 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
605 dev_err(hdev->dev, "dma_map_sg error\n");
609 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
611 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
617 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
626 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
632 chan = dma_request_chan(hdev->dev, "in");
636 hdev->dma_lch = chan;
638 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
640 dma_release_channel(hdev->dma_lch);
641 hdev->dma_lch = NULL;
642 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
646 init_completion(&hdev->dma_completion);
651 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
653 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
660 rctx->sg = hdev->req->src;
661 rctx->total = hdev->req->nbytes;
667 stm32_hash_write_ctrl(hdev);
669 if (hdev->flags & HASH_FLAGS_HMAC) {
670 err = stm32_hash_hmac_dma_send(hdev);
682 if (hdev->dma_mode == 1) {
700 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
703 dev_err(hdev->dev, "dma_map_sg error\n");
707 err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
710 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
718 if (hdev->dma_mode == 1) {
719 if (stm32_hash_wait_busy(hdev))
721 reg = stm32_hash_read(hdev, HASH_CR);
724 stm32_hash_write(hdev, HASH_CR, reg);
729 writesl(hdev->io_base + HASH_DIN, buffer,
732 stm32_hash_set_nblw(hdev, ncp);
733 reg = stm32_hash_read(hdev, HASH_STR);
735 stm32_hash_write(hdev, HASH_STR, reg);
739 if (hdev->flags & HASH_FLAGS_HMAC) {
740 if (stm32_hash_wait_busy(hdev))
742 err = stm32_hash_hmac_dma_send(hdev);
750 struct stm32_hash_dev *hdev = NULL, *tmp;
753 if (!ctx->hdev) {
755 hdev = tmp;
758 ctx->hdev = hdev;
760 hdev = ctx->hdev;
765 return hdev;
773 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
776 if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen)
780 if (hdev->dma_mode == 1)
800 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
804 rctx->hdev = hdev;
817 if (hdev->pdata->ux500)
832 if (hdev->pdata->ux500)
857 dev_err(hdev->dev, "Error, block too large");
867 dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
872 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
874 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
878 return stm32_hash_dma_send(hdev);
880 return stm32_hash_update_cpu(hdev);
883 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
885 struct ahash_request *req = hdev->req;
891 return stm32_hash_update_req(hdev);
895 return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
903 struct stm32_hash_dev *hdev = rctx->hdev;
906 dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
910 dev_err(hdev->dev, "no fallback engine\n");
917 dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
924 dev_err(hdev->dev, "shash digest error\n");
932 struct stm32_hash_dev *hdev = rctx->hdev;
936 if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
942 if (hdev->pdata->ux500)
943 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
946 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
956 reg = stm32_hash_read(rctx->hdev, HASH_SR);
958 stm32_hash_write(rctx->hdev, HASH_SR, reg);
971 struct stm32_hash_dev *hdev = rctx->hdev;
973 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
978 pm_runtime_mark_last_busy(hdev->dev);
979 pm_runtime_put_autosuspend(hdev->dev);
981 crypto_finalize_hash_request(hdev->engine, req, err);
984 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
987 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
996 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1001 if (!hdev)
1004 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
1007 pm_runtime_get_sync(hdev->dev);
1009 hdev->req = req;
1010 hdev->flags = 0;
1018 if (!hdev->pdata->ux500)
1019 stm32_hash_write(hdev, HASH_IMR, *preg++);
1020 stm32_hash_write(hdev, HASH_STR, *preg++);
1021 stm32_hash_write(hdev, HASH_CR, *preg);
1023 stm32_hash_write(hdev, HASH_CR, reg);
1026 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1028 hdev->flags |= HASH_FLAGS_INIT;
1031 hdev->flags |= HASH_FLAGS_HMAC |
1036 err = stm32_hash_update_req(hdev);
1038 err = stm32_hash_final_req(hdev);
1041 if (err == -EINPROGRESS && hdev->polled) {
1042 if (stm32_hash_wait_busy(hdev))
1045 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1061 struct stm32_hash_dev *hdev = ctx->hdev;
1065 return stm32_hash_handle_queue(hdev, req);
1102 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1113 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1162 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1167 if (!hdev->pdata->ux500)
1172 dev_err(hdev->dev, "failed to allocate %s fallback\n",
1176 dev_info(hdev->dev, "allocated %s fallback\n", name);
1229 struct stm32_hash_dev *hdev = dev_id;
1231 if (HASH_FLAGS_CPU & hdev->flags) {
1232 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1233 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1236 } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1237 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1245 stm32_hash_finish_req(hdev->req, 0);
1252 struct stm32_hash_dev *hdev = dev_id;
1255 reg = stm32_hash_read(hdev, HASH_SR);
1257 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1259 stm32_hash_write(hdev, HASH_IMR, 0);
1874 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1879 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1880 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1882 &hdev->pdata->algs_info[i].algs_list[j]);
1890 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1894 &hdev->pdata->algs_info[i].algs_list[j]);
1900 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1904 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1905 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1907 &hdev->pdata->algs_info[i].algs_list[j]);
2019 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
2022 hdev->pdata = of_device_get_match_data(dev);
2023 if (!hdev->pdata) {
2033 struct stm32_hash_dev *hdev;
2038 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
2039 if (!hdev)
2042 hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2043 if (IS_ERR(hdev->io_base))
2044 return PTR_ERR(hdev->io_base);
2046 hdev->phys_base = res->start;
2048 ret = stm32_hash_get_of_match(hdev, dev);
2061 dev_name(dev), hdev);
2068 hdev->polled = true;
2071 hdev->clk = devm_clk_get(&pdev->dev, NULL);
2072 if (IS_ERR(hdev->clk))
2073 return dev_err_probe(dev, PTR_ERR(hdev->clk),
2076 ret = clk_prepare_enable(hdev->clk);
2089 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
2090 if (IS_ERR(hdev->rst)) {
2091 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
2096 reset_control_assert(hdev->rst);
2098 reset_control_deassert(hdev->rst);
2101 hdev->dev = dev;
2103 platform_set_drvdata(pdev, hdev);
2105 ret = stm32_hash_dma_init(hdev);
2119 list_add_tail(&hdev->list, &stm32_hash.dev_list);
2123 hdev->engine = crypto_engine_alloc_init(dev, 1);
2124 if (!hdev->engine) {
2129 ret = crypto_engine_start(hdev->engine);
2133 if (hdev->pdata->ux500)
2135 hdev->dma_mode = 0;
2137 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
2140 ret = stm32_hash_register_algs(hdev);
2145 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
2153 crypto_engine_exit(hdev->engine);
2156 list_del(&hdev->list);
2159 if (hdev->dma_lch)
2160 dma_release_channel(hdev->dma_lch);
2165 clk_disable_unprepare(hdev->clk);
2172 struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
2175 ret = pm_runtime_get_sync(hdev->dev);
2177 stm32_hash_unregister_algs(hdev);
2179 crypto_engine_exit(hdev->engine);
2182 list_del(&hdev->list);
2185 if (hdev->dma_lch)
2186 dma_release_channel(hdev->dma_lch);
2188 pm_runtime_disable(hdev->dev);
2189 pm_runtime_put_noidle(hdev->dev);
2192 clk_disable_unprepare(hdev->clk);
2198 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2200 clk_disable_unprepare(hdev->clk);
2207 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2210 ret = clk_prepare_enable(hdev->clk);
2212 dev_err(hdev->dev, "Failed to prepare_enable clock\n");