Lines Matching defs:cmdq
9 static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
11 struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
12 u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
14 FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
15 FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
17 *cmdq->cq.db_record = db_data;
20 atomic64_inc(&cmdq->cq.armed_num);
23 static void kick_cmdq_db(struct erdma_cmdq *cmdq)
25 struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
26 u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
28 *cmdq->sq.db_record = db_data;
32 static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
36 spin_lock(&cmdq->lock);
37 comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
38 cmdq->max_outstandings);
39 if (comp_idx == cmdq->max_outstandings) {
40 spin_unlock(&cmdq->lock);
44 __set_bit(comp_idx, cmdq->comp_wait_bitmap);
45 spin_unlock(&cmdq->lock);
47 return &cmdq->wait_pool[comp_idx];
50 static void put_comp_wait(struct erdma_cmdq *cmdq,
55 cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
56 spin_lock(&cmdq->lock);
57 used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
58 spin_unlock(&cmdq->lock);
64 struct erdma_cmdq *cmdq)
68 cmdq->wait_pool =
69 devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
71 if (!cmdq->wait_pool)
74 spin_lock_init(&cmdq->lock);
75 cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
76 &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
77 if (!cmdq->comp_wait_bitmap)
80 for (i = 0; i < cmdq->max_outstandings; i++) {
81 init_completion(&cmdq->wait_pool[i].wait_event);
82 cmdq->wait_pool[i].ctx_id = i;
90 struct erdma_cmdq *cmdq = &dev->cmdq;
91 struct erdma_cmdq_sq *sq = &cmdq->sq;
95 sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
122 struct erdma_cmdq *cmdq = &dev->cmdq;
123 struct erdma_cmdq_cq *cq = &cmdq->cq;
126 cq->depth = cmdq->sq.depth;
153 struct erdma_cmdq *cmdq = &dev->cmdq;
154 struct erdma_eq *eq = &cmdq->eq;
157 eq->depth = cmdq->max_outstandings;
185 struct erdma_cmdq *cmdq = &dev->cmdq;
188 cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
189 cmdq->use_event = false;
191 sema_init(&cmdq->credits, cmdq->max_outstandings);
193 err = erdma_cmdq_wait_res_init(dev, cmdq);
209 set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
215 (cmdq->cq.depth << CQE_SHIFT) +
217 cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
221 (cmdq->sq.depth << SQEBB_SHIFT) +
223 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
230 /* after device init successfully, change cmdq to event mode. */
231 dev->cmdq.use_event = true;
232 arm_cmdq_cq(&dev->cmdq);
237 struct erdma_cmdq *cmdq = &dev->cmdq;
239 clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
242 (cmdq->eq.depth << EQE_SHIFT) +
244 cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
246 (cmdq->sq.depth << SQEBB_SHIFT) +
248 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
250 (cmdq->cq.depth << CQE_SHIFT) +
252 cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
255 static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
257 __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
258 cmdq->cq.depth, CQE_SHIFT);
262 return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
265 static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
273 comp_wait->sq_pi = cmdq->sq.pi;
275 wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
279 cmdq->sq.pi += cmdq->sq.wqebb_cnt;
280 hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
283 FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
286 kick_cmdq_db(cmdq);
289 static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
297 cqe = get_next_valid_cmdq_cqe(cmdq);
301 cmdq->cq.ci++;
307 sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
310 comp_wait = &cmdq->wait_pool[ctx_id];
316 cmdq->sq.ci += cmdq->sq.wqebb_cnt;
320 if (cmdq->use_event)
326 static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
331 spin_lock_irqsave(&cmdq->cq.lock, flags);
336 for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
337 if (erdma_poll_single_cmd_completion(cmdq))
340 if (comp_num && cmdq->use_event)
341 arm_cmdq_cq(cmdq);
343 spin_unlock_irqrestore(&cmdq->cq.lock, flags);
346 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
350 if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
351 !cmdq->use_event)
354 while (get_next_valid_eqe(&cmdq->eq)) {
355 cmdq->eq.ci++;
360 cmdq->cq.cmdsn++;
361 erdma_polling_cmd_completions(cmdq);
364 notify_eq(&cmdq->eq);
368 struct erdma_cmdq *cmdq, u32 timeout)
373 erdma_polling_cmd_completions(cmdq);
387 struct erdma_cmdq *cmdq, u32 timeout)
395 spin_lock_irqsave(&cmdq->cq.lock, flags);
397 spin_unlock_irqrestore(&cmdq->cq.lock, flags);
410 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
416 if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
419 down(&cmdq->credits);
421 comp_wait = get_comp_wait(cmdq);
423 clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
424 set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
425 up(&cmdq->credits);
429 spin_lock(&cmdq->sq.lock);
430 push_cmdq_sqe(cmdq, req, req_size, comp_wait);
431 spin_unlock(&cmdq->sq.lock);
433 if (cmdq->use_event)
434 ret = erdma_wait_cmd_completion(comp_wait, cmdq,
437 ret = erdma_poll_cmd_completion(comp_wait, cmdq,
441 set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
442 clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
453 put_comp_wait(cmdq, comp_wait);
456 up(&cmdq->credits);