Lines Matching refs:dev
230 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
232 writel(data, dev->regs_base + reg);
235 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
237 return readl(dev->regs_base + reg);
240 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
246 if (dev->flags & FLAGS_CBC) {
251 if (dev->flags & FLAGS_ENCRYPT) {
259 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
319 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
324 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
326 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
330 dev_err(dev->device, " * DMA read.\n");
332 dev_err(dev->device, " * DMA write.\n");
334 dev_err(dev->device, " * %s.\n",
336 dev_err(dev->device, " * %s.\n",
339 dev_err(dev->device, " * %s.\n",
341 dev_err(dev->device, " * %s.\n",
344 dev_err(dev->device, "\n");
349 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
358 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
361 dev_dbg(dev->device, " - State = %d:\n", state);
363 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
365 dev_dbg(dev->device, " * %s.\n",
369 dev_dbg(dev->device, " - DAR Full.\n");
371 dev_dbg(dev->device, " - Error.\n");
373 dev_dbg(dev->device, " - Secure.\n");
375 dev_dbg(dev->device, " - Fail.\n");
377 dev_dbg(dev->device, " - RNG Reseed Request.\n");
379 dev_dbg(dev->device, " - RNG Active.\n");
381 dev_dbg(dev->device, " - MDHA Active.\n");
383 dev_dbg(dev->device, " - SKHA Active.\n");
386 dev_dbg(dev->device, " - Batch Mode.\n");
388 dev_dbg(dev->device, " - Dedicated Mode.\n");
390 dev_dbg(dev->device, " - Debug Mode.\n");
392 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
395 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
396 sahara_read(dev, SAHARA_REG_CDAR));
397 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
398 sahara_read(dev, SAHARA_REG_IDAR));
401 static void sahara_dump_descriptors(struct sahara_dev *dev)
409 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
410 i, &dev->hw_phys_desc[i]);
411 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
412 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
413 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
414 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
415 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
416 dev_dbg(dev->device, "\tnext = 0x%08x\n",
417 dev->hw_desc[i]->next);
419 dev_dbg(dev->device, "\n");
422 static void sahara_dump_links(struct sahara_dev *dev)
430 dev_dbg(dev->device, "Link (%d) (%pad):\n",
431 i, &dev->hw_phys_link[i]);
432 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
433 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
434 dev_dbg(dev->device, "\tnext = 0x%08x\n",
435 dev->hw_link[i]->next);
437 dev_dbg(dev->device, "\n");
440 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
442 struct sahara_ctx *ctx = dev->ctx;
449 memcpy(dev->key_base, ctx->key, ctx->keylen);
451 if (dev->flags & FLAGS_CBC) {
452 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
453 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
455 dev->hw_desc[idx]->len1 = 0;
456 dev->hw_desc[idx]->p1 = 0;
458 dev->hw_desc[idx]->len2 = ctx->keylen;
459 dev->hw_desc[idx]->p2 = dev->key_phys_base;
460 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
461 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
466 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
467 if (dev->nb_in_sg < 0) {
468 dev_err(dev->device, "Invalid numbers of src SG.\n");
469 return dev->nb_in_sg;
471 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
472 if (dev->nb_out_sg < 0) {
473 dev_err(dev->device, "Invalid numbers of dst SG.\n");
474 return dev->nb_out_sg;
476 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
477 dev_err(dev->device, "not enough hw links (%d)\n",
478 dev->nb_in_sg + dev->nb_out_sg);
482 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
484 if (ret != dev->nb_in_sg) {
485 dev_err(dev->device, "couldn't map in sg\n");
489 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
491 if (ret != dev->nb_out_sg) {
492 dev_err(dev->device, "couldn't map out sg\n");
497 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
498 sg = dev->in_sg;
499 len = dev->total;
500 for (i = 0; i < dev->nb_in_sg; i++) {
501 dev->hw_link[i]->len = min(len, sg->length);
502 dev->hw_link[i]->p = sg->dma_address;
503 if (i == (dev->nb_in_sg - 1)) {
504 dev->hw_link[i]->next = 0;
507 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
513 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
514 sg = dev->out_sg;
515 len = dev->total;
516 for (j = i; j < dev->nb_out_sg + i; j++) {
517 dev->hw_link[j]->len = min(len, sg->length);
518 dev->hw_link[j]->p = sg->dma_address;
519 if (j == (dev->nb_out_sg + i - 1)) {
520 dev->hw_link[j]->next = 0;
523 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
529 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
530 dev->hw_desc[idx]->len1 = dev->total;
531 dev->hw_desc[idx]->len2 = dev->total;
532 dev->hw_desc[idx]->next = 0;
534 sahara_dump_descriptors(dev);
535 sahara_dump_links(dev);
537 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
542 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
566 struct sahara_dev *dev = dev_ptr;
573 dev_dbg(dev->device,
578 dev->total = req->cryptlen;
579 dev->in_sg = req->src;
580 dev->out_sg = req->dst;
585 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
587 if ((dev->flags & FLAGS_CBC) && req->iv) {
590 memcpy(dev->iv_base, req->iv, ivsize);
592 if (!(dev->flags & FLAGS_ENCRYPT)) {
600 dev->ctx = ctx;
602 reinit_completion(&dev->dma_completion);
604 ret = sahara_hw_descriptor_create(dev);
608 timeout = wait_for_completion_timeout(&dev->dma_completion,
611 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
613 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
617 dev_err(dev->device, "AES timeout\n");
621 if ((dev->flags & FLAGS_CBC) && req->iv)
677 struct sahara_dev *dev = dev_ptr;
686 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
690 dev_err(dev->device,
697 spin_lock_bh(&dev->queue_spinlock);
698 err = crypto_enqueue_request(&dev->queue, &req->base);
699 spin_unlock_bh(&dev->queue_spinlock);
701 wake_up_process(dev->kthread);
751 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
774 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
783 dev->in_sg = rctx->in_sg;
785 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
786 if (dev->nb_in_sg < 0) {
787 dev_err(dev->device, "Invalid numbers of src SG.\n");
788 return dev->nb_in_sg;
790 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
791 dev_err(dev->device, "not enough hw links (%d)\n",
792 dev->nb_in_sg + dev->nb_out_sg);
796 sg = dev->in_sg;
797 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
802 for (i = start; i < dev->nb_in_sg + start; i++) {
803 dev->hw_link[i]->len = min(len, sg->length);
804 dev->hw_link[i]->p = sg->dma_address;
805 if (i == (dev->nb_in_sg + start - 1)) {
806 dev->hw_link[i]->next = 0;
809 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
817 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
827 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
830 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
832 dev->hw_desc[index]->len1 = rctx->total;
833 if (dev->hw_desc[index]->len1 == 0) {
835 dev->hw_desc[index]->p1 = 0;
839 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
840 i = sahara_sha_hw_links_create(dev, rctx, index);
847 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
851 dev->hw_link[i]->p = dev->context_phys_base;
853 dev->hw_link[i]->len = result_len;
854 dev->hw_desc[index]->len2 = result_len;
856 dev->hw_link[i]->next = 0;
870 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
875 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
877 dev->hw_desc[index]->len1 = rctx->context_size;
878 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
879 dev->hw_desc[index]->len2 = 0;
880 dev->hw_desc[index]->p2 = 0;
882 dev->hw_link[index]->len = rctx->context_size;
883 dev->hw_link[index]->p = dev->context_phys_base;
884 dev->hw_link[index]->next = 0;
949 struct sahara_dev *dev = dev_ptr;
959 ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
963 dev->hw_desc[0]->next = 0;
966 memcpy(dev->context_base, rctx->context, rctx->context_size);
968 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
969 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
970 ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
974 dev->hw_desc[1]->next = 0;
977 sahara_dump_descriptors(dev);
978 sahara_dump_links(dev);
980 reinit_completion(&dev->dma_completion);
982 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
984 timeout = wait_for_completion_timeout(&dev->dma_completion,
988 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
992 dev_err(dev->device, "SHA timeout\n");
996 memcpy(rctx->context, dev->context_base, rctx->context_size);
1006 struct sahara_dev *dev = (struct sahara_dev *)data;
1014 spin_lock_bh(&dev->queue_spinlock);
1015 backlog = crypto_get_backlog(&dev->queue);
1016 async_req = crypto_dequeue_request(&dev->queue);
1017 spin_unlock_bh(&dev->queue_spinlock);
1050 struct sahara_dev *dev = dev_ptr;
1063 spin_lock_bh(&dev->queue_spinlock);
1064 ret = crypto_enqueue_request(&dev->queue, &req->base);
1065 spin_unlock_bh(&dev->queue_spinlock);
1067 wake_up_process(dev->kthread);
1240 struct sahara_dev *dev = (struct sahara_dev *)data;
1241 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1242 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1244 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1247 sahara_decode_status(dev, stat);
1252 dev->error = 0;
1254 sahara_decode_error(dev, err);
1255 dev->error = -EINVAL;
1258 complete(&dev->dma_completion);
1264 static int sahara_register_algs(struct sahara_dev *dev)
1281 if (dev->version > SAHARA_VERSION_3)
1305 static void sahara_unregister_algs(struct sahara_dev *dev)
1315 if (dev->version > SAHARA_VERSION_3)
1335 struct sahara_dev *dev;
1341 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1342 if (!dev)
1345 dev->device = &pdev->dev;
1346 platform_set_drvdata(pdev, dev);
1349 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1350 if (IS_ERR(dev->regs_base))
1351 return PTR_ERR(dev->regs_base);
1358 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1359 0, dev_name(&pdev->dev), dev);
1361 dev_err(&pdev->dev, "failed to request irq\n");
1366 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1367 if (IS_ERR(dev->clk_ipg)) {
1368 dev_err(&pdev->dev, "Could not get ipg clock\n");
1369 return PTR_ERR(dev->clk_ipg);
1372 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1373 if (IS_ERR(dev->clk_ahb)) {
1374 dev_err(&pdev->dev, "Could not get ahb clock\n");
1375 return PTR_ERR(dev->clk_ahb);
1379 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1381 &dev->hw_phys_desc[0], GFP_KERNEL);
1382 if (!dev->hw_desc[0]) {
1383 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1386 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1387 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1391 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1392 &dev->key_phys_base, GFP_KERNEL);
1393 if (!dev->key_base) {
1394 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1397 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1398 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1401 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1403 &dev->context_phys_base, GFP_KERNEL);
1404 if (!dev->context_base) {
1405 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1410 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1412 &dev->hw_phys_link[0], GFP_KERNEL);
1413 if (!dev->hw_link[0]) {
1414 dev_err(&pdev->dev, "Could not allocate hw links\n");
1418 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1420 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1423 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1425 spin_lock_init(&dev->queue_spinlock);
1427 dev_ptr = dev;
1429 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1430 if (IS_ERR(dev->kthread)) {
1431 return PTR_ERR(dev->kthread);
1434 init_completion(&dev->dma_completion);
1436 err = clk_prepare_enable(dev->clk_ipg);
1439 err = clk_prepare_enable(dev->clk_ahb);
1443 version = sahara_read(dev, SAHARA_REG_VERSION);
1444 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1447 } else if (of_device_is_compatible(pdev->dev.of_node,
1454 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1459 dev->version = version;
1461 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1463 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1469 err = sahara_register_algs(dev);
1473 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1478 kthread_stop(dev->kthread);
1480 clk_disable_unprepare(dev->clk_ahb);
1482 clk_disable_unprepare(dev->clk_ipg);
1489 struct sahara_dev *dev = platform_get_drvdata(pdev);
1491 kthread_stop(dev->kthread);
1493 sahara_unregister_algs(dev);
1495 clk_disable_unprepare(dev->clk_ipg);
1496 clk_disable_unprepare(dev->clk_ahb);