Lines Matching defs:queue
451 * the rcd wait queue to be fetched under the exp_lock and
469 struct tid_queue *queue)
475 priv = list_first_entry_or_null(&queue->queue_head,
503 struct tid_queue *queue, struct rvt_qp *qp)
511 fqp = first_qp(rcd, queue);
534 struct tid_queue *queue, struct rvt_qp *qp)
545 queue->dequeue++;
555 * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
560 struct tid_queue *queue, struct rvt_qp *qp)
569 list_add_tail(&priv->tid_wait, &queue->queue_head);
570 priv->tid_enqueue = ++queue->enqueue;
664 static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
678 queue->dequeue++;
789 goto queue;
793 goto queue;
809 queue:
1489 goto queue;
1497 goto queue;
1529 queue:
1611 * @qp: the queue patch
2265 /* We've verified the request, insert it into the ack queue. */
2306 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2559 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
3394 * queue. Rather than a computationaly expensive exact estimate of when
3396 * the flow queue it has to wait approximately (N + 1) * (number of
3404 struct tid_queue *queue)
3406 return qpriv->tid_enqueue - queue->dequeue;
3697 /* We've verified the request, insert it into the ack queue. */
3988 * Go though the entire ack queue and clear any outstanding
4163 * they might not appear sequentially in the queue. Therefore, the
4346 * (of any kind) in the queue.
5271 * req could be pointing at the previous ack queue entry
5440 * queue and false if the qp is already on the queue before
5441 * this call. Either way, the qp will be on the queue when the