Lines Matching refs:pq
40 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
49 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
71 struct hfi1_user_sdma_pkt_q *pq =
75 trace_hfi1_usdma_defer(pq, sde, &pq->busy);
83 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
84 if (list_empty(&pq->busy.list)) {
85 pq->busy.lock = &sde->waitlock;
86 iowait_get_priority(&pq->busy);
87 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
98 struct hfi1_user_sdma_pkt_q *pq =
101 trace_hfi1_usdma_activate(pq, wait, reason);
102 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
113 struct hfi1_user_sdma_pkt_q *pq;
123 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
124 if (!pq)
126 pq->dd = dd;
127 pq->ctxt = uctxt->ctxt;
128 pq->subctxt = fd->subctxt;
129 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
130 atomic_set(&pq->n_reqs, 0);
131 init_waitqueue_head(&pq->wait);
132 atomic_set(&pq->n_locked, 0);
134 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
136 pq->reqidx = 0;
138 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
139 sizeof(*pq->reqs),
141 if (!pq->reqs)
144 pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
145 if (!pq->req_in_use)
150 pq->txreq_cache = kmem_cache_create(buf,
155 if (!pq->txreq_cache) {
172 ret = hfi1_init_system_pinning(pq);
176 rcu_assign_pointer(fd->pq, pq);
186 kmem_cache_destroy(pq->txreq_cache);
188 bitmap_free(pq->req_in_use);
190 kfree(pq->reqs);
192 kfree(pq);
197 static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
200 seqlock_t *lock = pq->busy.lock;
205 if (!list_empty(&pq->busy.list)) {
206 list_del_init(&pq->busy.list);
207 pq->busy.lock = NULL;
215 struct hfi1_user_sdma_pkt_q *pq;
220 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
222 if (pq) {
223 rcu_assign_pointer(fd->pq, NULL);
227 iowait_sdma_drain(&pq->busy);
230 pq->wait,
231 !atomic_read(&pq->n_reqs));
232 kfree(pq->reqs);
233 hfi1_free_system_pinning(pq);
234 bitmap_free(pq->req_in_use);
235 kmem_cache_destroy(pq->txreq_cache);
236 flush_pq_iowait(pq);
237 kfree(pq);
283 struct hfi1_user_sdma_pkt_q *pq =
284 srcu_dereference(fd->pq, &fd->pq_srcu);
286 struct hfi1_devdata *dd = pq->dd;
341 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
352 req = pq->reqs + info.comp_idx;
355 req->pq = pq;
370 atomic_inc(&pq->n_reqs);
525 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
526 pq->state = SDMA_PKT_Q_ACTIVE;
542 pq->busy.wait_dma,
543 pq->state == SDMA_PKT_Q_ACTIVE,
546 trace_hfi1_usdma_we(pq, we_ret);
548 flush_pq_iowait(pq);
561 wait_event(pq->busy.wait_dma,
564 pq_update(pq);
565 set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
618 trace_hfi1_sdma_user_compute_length(req->pq->dd,
619 req->pq->ctxt,
620 req->pq->subctxt,
646 struct hfi1_user_sdma_pkt_q *pq = req->pq;
669 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
671 sdma_txclean(pq->dd, &tx->txreq);
681 struct hfi1_user_sdma_pkt_q *pq = NULL;
684 if (!req->pq)
687 pq = req->pq;
716 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
821 iowait_get_ib_work(&pq->busy),
837 sdma_txclean(pq->dd, &tx->txreq);
839 kmem_cache_free(pq->txreq_cache, tx);
916 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1013 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
1022 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1024 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1033 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1132 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1159 struct hfi1_user_sdma_pkt_q *pq;
1167 pq = req->pq;
1178 kmem_cache_free(pq->txreq_cache, tx);
1185 set_comp_state(pq, cq, req->info.comp_idx, state, status);
1186 pq_update(pq);
1189 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1191 if (atomic_dec_and_test(&pq->n_reqs))
1192 wake_up(&pq->wait);
1204 sdma_txclean(req->pq->dd, t);
1205 kmem_cache_free(req->pq->txreq_cache, tx);
1210 clear_bit(req->info.comp_idx, req->pq->req_in_use);
1213 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1222 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,