Lines Matching refs:wq
43 struct idxd_wq *wq;
55 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
101 struct idxd_wq *wq = ctx->wq;
103 if (!wq_pasid_enabled(wq))
122 struct idxd_wq *wq = ctx->wq;
123 struct idxd_device *idxd = wq->idxd;
131 if (wq_shared(wq)) {
135 /* The wq disable in the disable pasid function will drain the wq */
136 rc = idxd_wq_disable_pasid(wq);
138 dev_err(dev, "wq disable pasid failed.\n");
140 idxd_wq_drain(wq);
145 idxd_cdev_evl_drain_pasid(wq, ctx->pasid);
150 mutex_lock(&wq->wq_lock);
151 idxd_wq_put(wq);
152 mutex_unlock(&wq->wq_lock);
165 struct idxd_wq *wq = idxd_cdev->wq;
167 cdev_ctx = &ictx[wq->idxd->data->type];
188 return idxd_cdev->wq;
193 struct idxd_wq *wq = ctx->wq;
196 mutex_lock(&wq->uc_lock);
197 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL);
199 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n",
201 mutex_unlock(&wq->uc_lock);
204 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
211 mutex_lock(&wq->uc_lock);
212 ctx = xa_load(&wq->upasid_xa, pasid);
214 mutex_unlock(&wq->uc_lock);
218 mutex_unlock(&wq->uc_lock);
225 struct idxd_wq *wq;
232 wq = inode_wq(inode);
233 idxd = wq->idxd;
236 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
242 mutex_lock(&wq->wq_lock);
244 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
249 ctx->wq = wq;
271 mutex_lock(&wq->uc_lock);
272 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL);
273 mutex_unlock(&wq->uc_lock);
277 if (wq_dedicated(wq)) {
278 rc = idxd_wq_set_pasid(wq, pasid);
280 dev_err(dev, "wq set pasid failed: %d\n", rc);
286 idxd_cdev = wq->idxd_cdev;
313 idxd_wq_get(wq);
314 mutex_unlock(&wq->wq_lock);
328 mutex_unlock(&wq->wq_lock);
333 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
335 struct idxd_device *idxd = wq->idxd;
353 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id)
359 drain_workqueue(wq->wq);
365 struct idxd_wq *wq = ctx->wq;
366 struct idxd_device *idxd = wq->idxd;
377 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
380 struct device *dev = &wq->idxd->pdev->dev;
396 struct idxd_wq *wq = ctx->wq;
397 struct idxd_device *idxd = wq->idxd;
404 rc = check_vma(wq, vma, __func__);
409 pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
422 struct idxd_wq *wq = ctx->wq;
423 struct idxd_device *idxd = wq->idxd;
426 poll_wait(filp, &wq->err_queue, wait);
448 int idxd_wq_add_cdev(struct idxd_wq *wq)
450 struct idxd_device *idxd = wq->idxd;
462 idxd_cdev->wq = wq;
465 cdev_ctx = &ictx[wq->idxd->data->type];
474 dev->parent = wq_confdev(wq);
479 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
483 wq->idxd_cdev = idxd_cdev;
487 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
495 wq->idxd_cdev = NULL;
499 void idxd_wq_del_cdev(struct idxd_wq *wq)
503 idxd_cdev = wq->idxd_cdev;
505 wq->idxd_cdev = NULL;
512 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
513 struct idxd_device *idxd = wq->idxd;
537 mutex_lock(&wq->wq_lock);
539 wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
540 if (!wq->wq) {
545 wq->type = IDXD_WQT_USER;
546 rc = drv_enable_wq(wq);
550 rc = idxd_wq_add_cdev(wq);
557 mutex_unlock(&wq->wq_lock);
561 drv_disable_wq(wq);
563 destroy_workqueue(wq->wq);
564 wq->type = IDXD_WQT_NONE;
566 mutex_unlock(&wq->wq_lock);
572 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
574 mutex_lock(&wq->wq_lock);
575 idxd_wq_del_cdev(wq);
576 drv_disable_wq(wq);
577 wq->type = IDXD_WQT_NONE;
578 destroy_workqueue(wq->wq);
579 wq->wq = NULL;
580 mutex_unlock(&wq->wq_lock);
628 * idxd_copy_cr - copy completion record to user address space found by wq and
630 * @wq: work queue
640 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
643 struct device *dev = &wq->idxd->pdev->dev;
648 mutex_lock(&wq->uc_lock);
650 ctx = xa_load(&wq->upasid_xa, pasid);
689 mutex_unlock(&wq->uc_lock);