Lines Matching refs:pq

81 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
90 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
128 struct hfi1_user_sdma_pkt_q *pq =
132 trace_hfi1_usdma_defer(pq, sde, &pq->busy);
140 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
141 if (list_empty(&pq->busy.list)) {
142 pq->busy.lock = &sde->waitlock;
143 iowait_get_priority(&pq->busy);
144 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
155 struct hfi1_user_sdma_pkt_q *pq =
158 trace_hfi1_usdma_activate(pq, wait, reason);
159 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
170 struct hfi1_user_sdma_pkt_q *pq;
180 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
181 if (!pq)
183 pq->dd = dd;
184 pq->ctxt = uctxt->ctxt;
185 pq->subctxt = fd->subctxt;
186 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
187 atomic_set(&pq->n_reqs, 0);
188 init_waitqueue_head(&pq->wait);
189 atomic_set(&pq->n_locked, 0);
191 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
193 pq->reqidx = 0;
195 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
196 sizeof(*pq->reqs),
198 if (!pq->reqs)
201 pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
202 if (!pq->req_in_use)
207 pq->txreq_cache = kmem_cache_create(buf,
212 if (!pq->txreq_cache) {
229 ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
230 &pq->handler);
236 rcu_assign_pointer(fd->pq, pq);
246 kmem_cache_destroy(pq->txreq_cache);
248 bitmap_free(pq->req_in_use);
250 kfree(pq->reqs);
252 kfree(pq);
257 static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
260 seqlock_t *lock = pq->busy.lock;
265 if (!list_empty(&pq->busy.list)) {
266 list_del_init(&pq->busy.list);
267 pq->busy.lock = NULL;
275 struct hfi1_user_sdma_pkt_q *pq;
280 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
282 if (pq) {
283 rcu_assign_pointer(fd->pq, NULL);
287 iowait_sdma_drain(&pq->busy);
290 pq->wait,
291 !atomic_read(&pq->n_reqs));
292 kfree(pq->reqs);
293 if (pq->handler)
294 hfi1_mmu_rb_unregister(pq->handler);
295 bitmap_free(pq->req_in_use);
296 kmem_cache_destroy(pq->txreq_cache);
297 flush_pq_iowait(pq);
298 kfree(pq);
344 struct hfi1_user_sdma_pkt_q *pq =
345 srcu_dereference(fd->pq, &fd->pq_srcu);
347 struct hfi1_devdata *dd = pq->dd;
402 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
413 req = pq->reqs + info.comp_idx;
416 req->pq = pq;
431 atomic_inc(&pq->n_reqs);
586 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
587 pq->state = SDMA_PKT_Q_ACTIVE;
603 pq->busy.wait_dma,
604 pq->state == SDMA_PKT_Q_ACTIVE,
607 trace_hfi1_usdma_we(pq, we_ret);
609 flush_pq_iowait(pq);
622 wait_event(pq->busy.wait_dma,
625 pq_update(pq);
626 set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
679 trace_hfi1_sdma_user_compute_length(req->pq->dd,
680 req->pq->ctxt,
681 req->pq->subctxt,
707 struct hfi1_user_sdma_pkt_q *pq = req->pq;
730 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
732 sdma_txclean(pq->dd, &tx->txreq);
742 struct hfi1_user_sdma_pkt_q *pq = NULL;
745 if (!req->pq)
748 pq = req->pq;
777 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
882 iowait_get_ib_work(&pq->busy),
898 sdma_txclean(pq->dd, &tx->txreq);
900 kmem_cache_free(pq->txreq_cache, tx);
904 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
907 struct mmu_rb_handler *handler = pq->handler;
988 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1085 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
1094 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1096 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1105 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1204 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1231 struct hfi1_user_sdma_pkt_q *pq;
1239 pq = req->pq;
1250 kmem_cache_free(pq->txreq_cache, tx);
1257 set_comp_state(pq, cq, req->info.comp_idx, state, status);
1258 pq_update(pq);
1261 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1263 if (atomic_dec_and_test(&pq->n_reqs))
1264 wake_up(&pq->wait);
1276 sdma_txclean(req->pq->dd, t);
1277 kmem_cache_free(req->pq->txreq_cache, tx);
1282 clear_bit(req->info.comp_idx, req->pq->req_in_use);
1285 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1294 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1310 atomic_sub(node->npages, &node->pq->n_locked);
1346 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1355 if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
1358 atomic_read(&pq->n_locked), npages);
1359 cleared = sdma_cache_evict(pq, npages);
1383 atomic_add(pinned, &pq->n_locked);
1400 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1414 node->pq = pq;
1417 ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
1434 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1437 struct mmu_rb_handler *handler = pq->handler;
1517 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1554 ret = sdma_txadd_page(pq->dd, &tx->txreq,