Lines Matching defs:queue

227 static int sec_queue_map_io(struct sec_queue *queue)
229 struct device *dev = queue->dev_info->dev;
234 2 + queue->queue_id);
236 dev_err(dev, "Failed to get queue %d memory resource\n",
237 queue->queue_id);
240 queue->regs = ioremap(res->start, resource_size(res));
241 if (!queue->regs)
247 static void sec_queue_unmap_io(struct sec_queue *queue)
249 iounmap(queue->regs);
252 static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
254 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
267 static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
269 void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
519 static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
521 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
536 static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
538 void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
553 static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
555 void __iomem *base = queue->regs;
566 static void sec_queue_depth(struct sec_queue *queue, u32 depth)
568 void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
578 static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
580 writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
581 writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
584 static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
587 queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
589 queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
592 static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
595 queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
597 queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
600 static void sec_queue_irq_disable(struct sec_queue *queue)
602 writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
605 static void sec_queue_irq_enable(struct sec_queue *queue)
607 writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
610 static void sec_queue_abn_irq_disable(struct sec_queue *queue)
612 writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
615 static void sec_queue_stop(struct sec_queue *queue)
617 disable_irq(queue->task_irq);
618 sec_queue_irq_disable(queue);
619 writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
622 static void sec_queue_start(struct sec_queue *queue)
624 sec_queue_irq_enable(queue);
625 enable_irq(queue->task_irq);
626 queue->expected = 0;
627 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
628 writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
637 /* Get the first idle queue in SEC device */
651 static int sec_queue_free(struct sec_queue *queue)
653 struct sec_dev_info *info = queue->dev_info;
655 if (queue->queue_id >= SEC_Q_NUM) {
656 dev_err(info->dev, "No queue %d\n", queue->queue_id);
660 if (!queue->in_use) {
661 dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
666 queue->in_use = false;
681 struct sec_queue *queue = q;
682 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
683 struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
687 void __iomem *base = queue->regs;
701 set_bit(q_id, queue->unprocessed);
702 if (q_id == queue->expected)
703 while (test_bit(queue->expected, queue->unprocessed)) {
704 clear_bit(queue->expected, queue->unprocessed);
705 msg = msg_ring->vaddr + queue->expected;
708 queue->shadow[queue->expected]);
709 queue->shadow[queue->expected] = NULL;
710 queue->expected = (queue->expected + 1) %
723 sec_queue_irq_enable(queue);
728 static int sec_queue_irq_init(struct sec_queue *queue)
730 struct sec_dev_info *info = queue->dev_info;
731 int irq = queue->task_irq;
735 IRQF_TRIGGER_RISING, queue->name, queue);
745 static int sec_queue_irq_uninit(struct sec_queue *queue)
747 free_irq(queue->task_irq, queue);
774 struct sec_queue *queue;
776 queue = sec_alloc_queue(info);
777 if (IS_ERR(queue)) {
778 dev_err(info->dev, "alloc sec queue failed! %ld\n",
779 PTR_ERR(queue));
780 return queue;
783 sec_queue_start(queue);
785 return queue;
789 * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
793 * queue. Future work may focus on optimizing this in order to improve full
799 struct sec_queue *queue = ERR_PTR(-ENODEV);
806 queue = sec_queue_alloc_start(info);
811 return queue;
815 * sec_queue_stop_release() - free up a hw queue for reuse
816 * @queue: The queue we are done with.
818 * This will stop the current queue, terminanting any transactions
821 int sec_queue_stop_release(struct sec_queue *queue)
823 struct device *dev = queue->dev_info->dev;
826 sec_queue_stop(queue);
828 ret = sec_queue_free(queue);
830 dev_err(dev, "Releasing queue failed %d\n", ret);
836 * sec_queue_empty() - Is this hardware queue currently empty.
838 * We need to know if we have an empty queue for some of the chaining modes
839 * as if it is not empty we may need to hold the message in a software queue
840 * until the hw queue is drained.
842 bool sec_queue_empty(struct sec_queue *queue)
844 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
850 * sec_queue_send() - queue up a single operation in the hw queue
851 * @queue: The queue in which to put the message
855 * This function will return -EAGAIN if the queue is currently full.
857 int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
859 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
860 void __iomem *base = queue->regs;
871 queue->shadow[write] = ctx;
874 /* Ensure content updated before queue advance */
884 bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
886 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
891 static void sec_queue_hw_init(struct sec_queue *queue)
893 sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
894 sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
895 sec_queue_ar_pkgattr(queue, 1);
896 sec_queue_aw_pkgattr(queue, 1);
898 /* Enable out of order queue */
899 sec_queue_reorder(queue, true);
902 writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
904 sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
906 sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
908 sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
910 sec_queue_errbase_addr(queue, queue->ring_db.paddr);
912 writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
914 sec_queue_abn_irq_disable(queue);
915 sec_queue_irq_disable(queue);
916 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
1001 struct sec_queue *queue, int queue_id)
1003 queue->dev_info = info;
1004 queue->queue_id = queue_id;
1005 snprintf(queue->name, sizeof(queue->name),
1006 "%s_%d", dev_name(info->dev), queue->queue_id);
1077 static int sec_queue_res_cfg(struct sec_queue *queue)
1079 struct device *dev = queue->dev_info->dev;
1080 struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
1081 struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
1082 struct sec_queue_ring_db *ring_db = &queue->ring_db;
1107 queue->task_irq = platform_get_irq(to_platform_device(dev),
1108 queue->queue_id * 2 + 1);
1109 if (queue->task_irq <= 0) {
1117 dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
1118 queue->ring_db.paddr);
1120 dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
1121 queue->ring_cq.paddr);
1123 dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
1124 queue->ring_cmd.paddr);
1129 static void sec_queue_free_ring_pages(struct sec_queue *queue)
1131 struct device *dev = queue->dev_info->dev;
1133 dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
1134 queue->ring_db.paddr);
1135 dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
1136 queue->ring_cq.paddr);
1137 dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
1138 queue->ring_cmd.paddr);
1141 static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
1146 sec_queue_base_init(info, queue, queue_id);
1148 ret = sec_queue_res_cfg(queue);
1152 ret = sec_queue_map_io(queue);
1155 sec_queue_free_ring_pages(queue);
1159 sec_queue_hw_init(queue);
1165 struct sec_queue *queue)
1167 sec_queue_unmap_io(queue);
1168 sec_queue_free_ring_pages(queue);