Lines Matching defs:idxd

12 #include <uapi/linux/idxd.h>
14 #include "idxd.h"
29 struct idxd_device *idxd;
34 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
35 struct device *dev = &idxd->pdev->dev;
38 idxd_device_reset(idxd);
39 rc = idxd_device_config(idxd);
43 rc = idxd_device_enable(idxd);
47 for (i = 0; i < idxd->max_wqs; i++) {
48 if (test_bit(i, idxd->wq_enable_map)) {
49 struct idxd_wq *wq = idxd->wqs[i];
53 clear_bit(i, idxd->wq_enable_map);
63 idxd_device_clear_state(idxd);
74 struct idxd_device *idxd = wq->idxd;
75 struct device *dev = &idxd->pdev->dev;
134 struct idxd_device *idxd = revoke->idxd;
135 struct pci_dev *pdev = idxd->pdev;
139 if (!idxd->request_int_handles) {
154 for (i = 1; i < idxd->irq_cnt; i++) {
155 struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
161 rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
226 struct idxd_device *idxd = wq->idxd;
227 struct device *dev = &idxd->pdev->dev;
228 struct idxd_evl *evl = idxd->evl;
230 void *cr = (void *)entry_head + idxd->data->evl_cr_off;
231 int cr_size = idxd->data->compl_size;
232 u8 *status = (u8 *)cr + idxd->data->cr_status_off;
233 u8 *result = (u8 *)cr + idxd->data->cr_result_off;
308 kmem_cache_free(idxd->evl_cache, fault);
311 static void process_evl_entry(struct idxd_device *idxd,
314 struct device *dev = &idxd->pdev->dev;
315 struct idxd_evl *evl = idxd->evl;
326 int ent_size = evl_ent_size(idxd);
334 fault = kmem_cache_alloc(idxd->evl_cache, GFP_ATOMIC);
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx];
354 static void process_evl_entries(struct idxd_device *idxd)
358 struct idxd_evl *evl = idxd->evl;
360 unsigned int ent_size = evl_ent_size(idxd);
369 idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
370 evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
373 size = idxd->evl->size;
377 process_evl_entry(idxd, entry_head, h);
382 iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
389 struct idxd_device *idxd = ie_to_idxd(irq_entry);
390 struct device *dev = &idxd->pdev->dev;
397 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
401 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
407 spin_lock(&idxd->dev_lock);
409 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
412 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
413 idxd->reg_base + IDXD_SWERR_OFFSET);
415 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
416 int id = idxd->sw_err.wq_idx;
417 struct idxd_wq *wq = idxd->wqs[id];
424 for (i = 0; i < idxd->max_wqs; i++) {
425 struct idxd_wq *wq = idxd->wqs[i];
432 spin_unlock(&idxd->dev_lock);
437 i, idxd->sw_err.bits[i]);
448 revoke->idxd = idxd;
450 queue_work(idxd->wq, &revoke->work);
454 idxd_wqs_quiesce(idxd);
460 complete(idxd->cmd_done);
470 perfmon_counter_overflow(idxd);
475 process_evl_entries(idxd);
487 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
489 idxd->state = IDXD_DEV_HALTED;
496 INIT_WORK(&idxd->work, idxd_device_reinit);
497 queue_work(idxd->wq, &idxd->work);
499 idxd->state = IDXD_DEV_HALTED;
500 idxd_wqs_quiesce(idxd);
501 idxd_wqs_unmap_portal(idxd);
502 idxd_device_clear_state(idxd);
503 dev_err(&idxd->pdev->dev,
504 "idxd halted, need %s.\n",
524 dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n",
546 struct idxd_device *idxd = wq->idxd;
555 queue_work(idxd->wq, &irw->work);