Lines Matching refs:aq
123 struct efa_com_admin_queue *aq = &edev->aq;
124 struct efa_com_admin_sq *sq = &aq->sq;
125 u16 size = aq->depth * sizeof(*sq->entries);
131 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
149 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
160 struct efa_com_admin_queue *aq = &edev->aq;
161 struct efa_com_admin_cq *cq = &aq->cq;
162 u16 size = aq->depth * sizeof(*cq->entries);
168 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
183 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
187 aq->msix_vector_idx);
241 static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
245 spin_lock(&aq->comp_ctx_lock);
246 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
247 aq->comp_ctx_pool_next++;
248 spin_unlock(&aq->comp_ctx_lock);
253 static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
256 spin_lock(&aq->comp_ctx_lock);
257 aq->comp_ctx_pool_next--;
258 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
259 spin_unlock(&aq->comp_ctx_lock);
262 static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
267 u16 ctx_id = cmd_id & (aq->depth - 1);
269 ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
271 efa_com_dealloc_ctx_id(aq, ctx_id);
274 static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
277 u16 ctx_id = cmd_id & (aq->depth - 1);
279 if (aq->comp_ctx[ctx_id].occupied && capture) {
281 aq->efa_dev,
288 aq->comp_ctx[ctx_id].occupied = 1;
289 ibdev_dbg(aq->efa_dev,
293 return &aq->comp_ctx[ctx_id];
296 static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
309 queue_size_mask = aq->depth - 1;
310 pi = aq->sq.pc & queue_size_mask;
312 ctx_id = efa_com_alloc_ctx_id(aq);
316 cmd_id |= aq->sq.pc & ~queue_size_mask;
321 EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
323 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
325 efa_com_dealloc_ctx_id(aq, ctx_id);
336 aqe = &aq->sq.entries[pi];
340 aq->sq.pc++;
341 atomic64_inc(&aq->stats.submitted_cmd);
343 if ((aq->sq.pc & queue_size_mask) == 0)
344 aq->sq.phase = !aq->sq.phase;
347 writel(aq->sq.pc, aq->sq.db_addr);
352 static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
354 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
355 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
359 aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
360 aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
361 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
362 devm_kfree(aq->dmadev, aq->comp_ctx_pool);
363 devm_kfree(aq->dmadev, aq->comp_ctx);
367 for (i = 0; i < aq->depth; i++) {
368 comp_ctx = efa_com_get_comp_ctx(aq, i, false);
372 aq->comp_ctx_pool[i] = i;
375 spin_lock_init(&aq->comp_ctx_lock);
377 aq->comp_ctx_pool_next = 0;
382 static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
390 spin_lock(&aq->sq.lock);
391 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
392 ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
393 spin_unlock(&aq->sq.lock);
397 comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
399 spin_unlock(&aq->sq.lock);
401 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
406 static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
415 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
417 ibdev_err(aq->efa_dev,
419 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
428 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
432 static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
440 queue_size_mask = aq->depth - 1;
442 ci = aq->cq.cc & queue_size_mask;
443 phase = aq->cq.phase;
445 cqe = &aq->cq.entries[ci];
455 efa_com_handle_single_admin_completion(aq, cqe);
459 if (ci == aq->depth) {
464 cqe = &aq->cq.entries[ci];
467 aq->cq.cc += comp_num;
468 aq->cq.phase = phase;
469 aq->sq.cc += comp_num;
470 atomic64_add(comp_num, &aq->stats.completed_cmd);
493 struct efa_com_admin_queue *aq)
499 timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
502 spin_lock_irqsave(&aq->cq.lock, flags);
503 efa_com_handle_admin_completion(aq);
504 spin_unlock_irqrestore(&aq->cq.lock, flags);
511 aq->efa_dev,
514 atomic64_inc(&aq->stats.no_completion);
516 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
521 msleep(aq->poll_interval);
526 efa_com_put_comp_ctx(aq, comp_ctx);
531 struct efa_com_admin_queue *aq)
537 usecs_to_jiffies(aq->completion_timeout));
546 spin_lock_irqsave(&aq->cq.lock, flags);
547 efa_com_handle_admin_completion(aq);
548 spin_unlock_irqrestore(&aq->cq.lock, flags);
550 atomic64_inc(&aq->stats.no_completion);
554 aq->efa_dev,
558 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
561 aq->efa_dev,
565 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
567 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
574 efa_com_put_comp_ctx(aq, comp_ctx);
587 struct efa_com_admin_queue *aq)
589 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
590 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
592 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
597 * @aq: admin queue.
608 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
620 down(&aq->avail_cmds);
622 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
625 comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
628 aq->efa_dev,
633 up(&aq->avail_cmds);
634 atomic64_inc(&aq->stats.cmd_err);
638 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
641 aq->efa_dev,
646 atomic64_inc(&aq->stats.cmd_err);
649 up(&aq->avail_cmds);
660 struct efa_com_admin_queue *aq = &edev->aq;
662 struct efa_com_admin_cq *cq = &aq->cq;
663 struct efa_com_admin_sq *sq = &aq->sq;
666 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
668 devm_kfree(edev->dmadev, aq->comp_ctx_pool);
669 devm_kfree(edev->dmadev, aq->comp_ctx);
671 size = aq->depth * sizeof(*sq->entries);
674 size = aq->depth * sizeof(*cq->entries);
697 set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
699 clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
704 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
707 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
724 struct efa_com_admin_queue *aq = &edev->aq;
737 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
739 aq->dmadev = edev->dmadev;
740 aq->efa_dev = edev->efa_dev;
741 set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
743 sema_init(&aq->avail_cmds, aq->depth);
747 err = efa_com_init_comp_ctxt(aq);
769 aq->completion_timeout = timeout * 100000;
771 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
773 aq->poll_interval = EFA_POLL_INTERVAL_MS;
775 set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
780 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
781 aq->cq.entries, aq->cq.dma_addr);
783 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
784 aq->sq.entries, aq->sq.dma_addr);
786 devm_kfree(edev->dmadev, aq->comp_ctx);
804 spin_lock_irqsave(&edev->aq.cq.lock, flags);
805 efa_com_handle_admin_completion(&edev->aq);
806 spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
1085 edev->aq.completion_timeout = timeout * 100000;
1087 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;