Lines Matching defs:hdev
116 struct stm32_hash_dev *hdev;
124 struct stm32_hash_dev *hdev;
195 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
197 return readl_relaxed(hdev->io_base + offset);
200 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
203 writel_relaxed(value, hdev->io_base + offset);
206 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
210 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
214 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
218 reg = stm32_hash_read(hdev, HASH_STR);
221 stm32_hash_write(hdev, HASH_STR, reg);
224 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
226 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
233 stm32_hash_set_nblw(hdev, keylen);
236 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
241 reg = stm32_hash_read(hdev, HASH_STR);
243 stm32_hash_write(hdev, HASH_STR, reg);
251 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
253 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
254 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
259 if (!(hdev->flags & HASH_FLAGS_INIT)) {
280 hdev->flags |= HASH_FLAGS_HMAC;
286 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
288 stm32_hash_write(hdev, HASH_CR, reg);
290 hdev->flags |= HASH_FLAGS_INIT;
292 dev_dbg(hdev->dev, "Write Control %x\n", reg);
330 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
338 hdev->flags |= HASH_FLAGS_FINAL;
342 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
345 hdev->flags |= HASH_FLAGS_CPU;
347 stm32_hash_write_ctrl(hdev);
349 if (stm32_hash_wait_busy(hdev))
352 if ((hdev->flags & HASH_FLAGS_HMAC) &&
353 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
354 hdev->flags |= HASH_FLAGS_HMAC_KEY;
355 stm32_hash_write_key(hdev);
356 if (stm32_hash_wait_busy(hdev))
361 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
364 stm32_hash_set_nblw(hdev, length);
365 reg = stm32_hash_read(hdev, HASH_STR);
367 stm32_hash_write(hdev, HASH_STR, reg);
368 if (hdev->flags & HASH_FLAGS_HMAC) {
369 if (stm32_hash_wait_busy(hdev))
371 stm32_hash_write_key(hdev);
379 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
381 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
384 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
393 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
401 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
408 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
416 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
420 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
424 reinit_completion(&hdev->dma_completion);
426 in_desc->callback_param = hdev;
428 hdev->flags |= HASH_FLAGS_FINAL;
429 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
431 reg = stm32_hash_read(hdev, HASH_CR);
440 stm32_hash_write(hdev, HASH_CR, reg);
442 stm32_hash_set_nblw(hdev, length);
449 dma_async_issue_pending(hdev->dma_lch);
451 if (!wait_for_completion_timeout(&hdev->dma_completion,
455 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
460 dev_err(hdev->dev, "DMA Error %i\n", err);
461 dmaengine_terminate_all(hdev->dma_lch);
470 struct stm32_hash_dev *hdev = param;
472 complete(&hdev->dma_completion);
474 hdev->flags |= HASH_FLAGS_DMA_READY;
477 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
479 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
480 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
484 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
485 err = stm32_hash_write_key(hdev);
486 if (stm32_hash_wait_busy(hdev))
489 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
493 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
496 dev_err(hdev->dev, "dma_map_sg error\n");
500 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
502 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
508 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
517 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
519 dma_conf.src_maxburst = hdev->dma_maxburst;
520 dma_conf.dst_maxburst = hdev->dma_maxburst;
523 chan = dma_request_chan(hdev->dev, "in");
527 hdev->dma_lch = chan;
529 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
531 dma_release_channel(hdev->dma_lch);
532 hdev->dma_lch = NULL;
533 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
537 init_completion(&hdev->dma_completion);
542 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
544 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
550 rctx->sg = hdev->req->src;
551 rctx->total = hdev->req->nbytes;
558 stm32_hash_write_ctrl(hdev);
560 if (hdev->flags & HASH_FLAGS_HMAC) {
561 err = stm32_hash_hmac_dma_send(hdev);
571 if (hdev->dma_mode == 1) {
589 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
592 dev_err(hdev->dev, "dma_map_sg error\n");
596 err = stm32_hash_xmit_dma(hdev, sg, len,
599 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
605 if (hdev->dma_mode == 1) {
606 if (stm32_hash_wait_busy(hdev))
608 reg = stm32_hash_read(hdev, HASH_CR);
611 stm32_hash_write(hdev, HASH_CR, reg);
616 writesl(hdev->io_base + HASH_DIN, buffer,
619 stm32_hash_set_nblw(hdev, ncp);
620 reg = stm32_hash_read(hdev, HASH_STR);
622 stm32_hash_write(hdev, HASH_STR, reg);
626 if (hdev->flags & HASH_FLAGS_HMAC) {
627 if (stm32_hash_wait_busy(hdev))
629 err = stm32_hash_hmac_dma_send(hdev);
637 struct stm32_hash_dev *hdev = NULL, *tmp;
640 if (!ctx->hdev) {
642 hdev = tmp;
645 ctx->hdev = hdev;
647 hdev = ctx->hdev;
652 return hdev;
659 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
666 if (hdev->dma_mode == 1)
686 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
688 rctx->hdev = hdev;
721 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
726 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
728 return stm32_hash_update_cpu(hdev);
731 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
733 struct ahash_request *req = hdev->req;
741 err = stm32_hash_dma_send(hdev);
743 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
773 hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
792 struct stm32_hash_dev *hdev = rctx->hdev;
794 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
797 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
806 pm_runtime_mark_last_busy(hdev->dev);
807 pm_runtime_put_autosuspend(hdev->dev);
809 crypto_finalize_hash_request(hdev->engine, req, err);
812 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
815 pm_runtime_get_sync(hdev->dev);
817 if (!(HASH_FLAGS_INIT & hdev->flags)) {
818 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
819 stm32_hash_write(hdev, HASH_STR, 0);
820 stm32_hash_write(hdev, HASH_DIN, 0);
821 stm32_hash_write(hdev, HASH_IMR, 0);
822 hdev->err = 0;
831 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
834 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
842 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
845 if (!hdev)
848 hdev->req = req;
852 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
855 return stm32_hash_hw_init(hdev, rctx);
863 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
867 if (!hdev)
870 hdev->req = req;
875 err = stm32_hash_update_req(hdev);
877 err = stm32_hash_final_req(hdev);
890 struct stm32_hash_dev *hdev = ctx->hdev;
894 return stm32_hash_handle_queue(hdev, req);
929 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
934 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
960 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
964 pm_runtime_get_sync(hdev->dev);
966 while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
975 *preg++ = stm32_hash_read(hdev, HASH_IMR);
976 *preg++ = stm32_hash_read(hdev, HASH_STR);
977 *preg++ = stm32_hash_read(hdev, HASH_CR);
979 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
981 pm_runtime_mark_last_busy(hdev->dev);
982 pm_runtime_put_autosuspend(hdev->dev);
993 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1002 pm_runtime_get_sync(hdev->dev);
1004 stm32_hash_write(hdev, HASH_IMR, *preg++);
1005 stm32_hash_write(hdev, HASH_STR, *preg++);
1006 stm32_hash_write(hdev, HASH_CR, *preg);
1008 stm32_hash_write(hdev, HASH_CR, reg);
1011 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1013 pm_runtime_mark_last_busy(hdev->dev);
1014 pm_runtime_put_autosuspend(hdev->dev);
1082 struct stm32_hash_dev *hdev = dev_id;
1084 if (HASH_FLAGS_CPU & hdev->flags) {
1085 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1086 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1089 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1090 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1091 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1100 stm32_hash_finish_req(hdev->req, 0);
1107 struct stm32_hash_dev *hdev = dev_id;
1110 reg = stm32_hash_read(hdev, HASH_SR);
1113 stm32_hash_write(hdev, HASH_SR, reg);
1114 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1116 stm32_hash_write(hdev, HASH_IMR, 0);
1333 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1338 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1339 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1341 &hdev->pdata->algs_info[i].algs_list[j]);
1349 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1353 &hdev->pdata->algs_info[i].algs_list[j]);
1359 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1363 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1364 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1366 &hdev->pdata->algs_info[i].algs_list[j]);
1414 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1417 hdev->pdata = of_device_get_match_data(dev);
1418 if (!hdev->pdata) {
1424 &hdev->dma_maxburst)) {
1426 hdev->dma_maxburst = 0;
1434 struct stm32_hash_dev *hdev;
1439 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1440 if (!hdev)
1444 hdev->io_base = devm_ioremap_resource(dev, res);
1445 if (IS_ERR(hdev->io_base))
1446 return PTR_ERR(hdev->io_base);
1448 hdev->phys_base = res->start;
1450 ret = stm32_hash_get_of_match(hdev, dev);
1460 dev_name(dev), hdev);
1466 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1467 if (IS_ERR(hdev->clk))
1468 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1471 ret = clk_prepare_enable(hdev->clk);
1484 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1485 if (IS_ERR(hdev->rst)) {
1486 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1491 reset_control_assert(hdev->rst);
1493 reset_control_deassert(hdev->rst);
1496 hdev->dev = dev;
1498 platform_set_drvdata(pdev, hdev);
1500 ret = stm32_hash_dma_init(hdev);
1512 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1516 hdev->engine = crypto_engine_alloc_init(dev, 1);
1517 if (!hdev->engine) {
1522 ret = crypto_engine_start(hdev->engine);
1526 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1529 ret = stm32_hash_register_algs(hdev);
1534 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1542 crypto_engine_exit(hdev->engine);
1545 list_del(&hdev->list);
1548 if (hdev->dma_lch)
1549 dma_release_channel(hdev->dma_lch);
1554 clk_disable_unprepare(hdev->clk);
1561 struct stm32_hash_dev *hdev;
1564 hdev = platform_get_drvdata(pdev);
1565 if (!hdev)
1568 ret = pm_runtime_get_sync(hdev->dev);
1570 stm32_hash_unregister_algs(hdev);
1572 crypto_engine_exit(hdev->engine);
1575 list_del(&hdev->list);
1578 if (hdev->dma_lch)
1579 dma_release_channel(hdev->dma_lch);
1581 pm_runtime_disable(hdev->dev);
1582 pm_runtime_put_noidle(hdev->dev);
1585 clk_disable_unprepare(hdev->clk);
1593 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1595 clk_disable_unprepare(hdev->clk);
1602 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1605 ret = clk_prepare_enable(hdev->clk);
1607 dev_err(hdev->dev, "Failed to prepare_enable clock\n");