Lines Matching defs:idxd
17 #include <uapi/linux/idxd.h>
21 #include "idxd.h"
37 #define DRV_NAME "idxd"
75 static int idxd_setup_interrupts(struct idxd_device *idxd)
77 struct pci_dev *pdev = idxd->pdev;
88 idxd->irq_cnt = msixcnt;
98 ie = idxd_get_ie(idxd, 0);
100 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
105 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
107 for (i = 0; i < idxd->max_wqs; i++) {
110 ie = idxd_get_ie(idxd, msix_idx);
120 idxd_unmask_error_interrupts(idxd);
124 idxd_mask_error_interrupts(idxd);
130 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
132 struct pci_dev *pdev = idxd->pdev;
140 ie = idxd_get_ie(idxd, 0);
141 idxd_mask_error_interrupts(idxd);
146 static int idxd_setup_wqs(struct idxd_device *idxd)
148 struct device *dev = &idxd->pdev->dev;
153 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
155 if (!idxd->wqs)
158 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
159 if (!idxd->wq_enable_map) {
160 kfree(idxd->wqs);
164 for (i = 0; i < idxd->max_wqs; i++) {
174 wq->idxd = idxd;
176 conf_dev->parent = idxd_confdev(idxd);
179 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
190 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
192 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
199 if (idxd->hw.wq_cap.op_config) {
206 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
210 idxd->wqs[i] = wq;
217 wq = idxd->wqs[i];
224 static int idxd_setup_engines(struct idxd_device *idxd)
227 struct device *dev = &idxd->pdev->dev;
231 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
233 if (!idxd->engines)
236 for (i = 0; i < idxd->max_engines; i++) {
246 engine->idxd = idxd;
248 conf_dev->parent = idxd_confdev(idxd);
251 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
257 idxd->engines[i] = engine;
264 engine = idxd->engines[i];
271 static int idxd_setup_groups(struct idxd_device *idxd)
273 struct device *dev = &idxd->pdev->dev;
278 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
280 if (!idxd->groups)
283 for (i = 0; i < idxd->max_groups; i++) {
293 group->idxd = idxd;
295 conf_dev->parent = idxd_confdev(idxd);
298 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
304 idxd->groups[i] = group;
305 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
316 group->rdbufs_allowed = idxd->max_rdbufs;
323 group = idxd->groups[i];
329 static void idxd_cleanup_internals(struct idxd_device *idxd)
333 for (i = 0; i < idxd->max_groups; i++)
334 put_device(group_confdev(idxd->groups[i]));
335 for (i = 0; i < idxd->max_engines; i++)
336 put_device(engine_confdev(idxd->engines[i]));
337 for (i = 0; i < idxd->max_wqs; i++)
338 put_device(wq_confdev(idxd->wqs[i]));
339 destroy_workqueue(idxd->wq);
342 static int idxd_init_evl(struct idxd_device *idxd)
344 struct device *dev = &idxd->pdev->dev;
349 if (idxd->hw.gen_cap.evl_support == 0)
359 idxd_name = dev_name(idxd_confdev(idxd));
360 evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
366 idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
369 if (!idxd->evl_cache) {
374 idxd->evl = evl;
378 static int idxd_setup_internals(struct idxd_device *idxd)
380 struct device *dev = &idxd->pdev->dev;
383 init_waitqueue_head(&idxd->cmd_waitq);
385 rc = idxd_setup_wqs(idxd);
389 rc = idxd_setup_engines(idxd);
393 rc = idxd_setup_groups(idxd);
397 idxd->wq = create_workqueue(dev_name(dev));
398 if (!idxd->wq) {
403 rc = idxd_init_evl(idxd);
410 destroy_workqueue(idxd->wq);
412 for (i = 0; i < idxd->max_groups; i++)
413 put_device(group_confdev(idxd->groups[i]));
415 for (i = 0; i < idxd->max_engines; i++)
416 put_device(engine_confdev(idxd->engines[i]));
418 for (i = 0; i < idxd->max_wqs; i++)
419 put_device(wq_confdev(idxd->wqs[i]));
424 static void idxd_read_table_offsets(struct idxd_device *idxd)
427 struct device *dev = &idxd->pdev->dev;
429 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
430 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
431 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
432 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
433 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
434 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
435 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
436 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
437 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
438 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
454 static void idxd_read_caps(struct idxd_device *idxd)
456 struct device *dev = &idxd->pdev->dev;
460 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
461 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
463 if (idxd->hw.gen_cap.cmd_cap) {
464 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
465 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
469 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
470 idxd->request_int_handles = true;
472 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
473 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
474 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
475 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
476 if (idxd->hw.gen_cap.config_en)
477 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
480 idxd->hw.group_cap.bits =
481 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
482 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
483 idxd->max_groups = idxd->hw.group_cap.num_groups;
484 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
485 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
486 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
487 idxd->nr_rdbufs = idxd->max_rdbufs;
490 idxd->hw.engine_cap.bits =
491 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
492 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
493 idxd->max_engines = idxd->hw.engine_cap.num_engines;
494 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
497 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
498 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
499 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
500 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
501 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
502 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
503 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
504 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
508 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
510 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
512 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
515 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
516 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
523 struct idxd_device *idxd;
526 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
527 if (!idxd)
530 conf_dev = idxd_confdev(idxd);
531 idxd->pdev = pdev;
532 idxd->data = data;
533 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
534 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
535 if (idxd->id < 0)
538 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
539 if (!idxd->opcap_bmap) {
540 ida_free(&idxd_ida, idxd->id);
547 conf_dev->type = idxd->data->dev_type;
548 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
554 spin_lock_init(&idxd->dev_lock);
555 spin_lock_init(&idxd->cmd_lock);
557 return idxd;
560 static int idxd_enable_system_pasid(struct idxd_device *idxd)
562 struct pci_dev *pdev = idxd->pdev;
593 idxd_set_user_intr(idxd, 1);
594 idxd->pasid = pasid;
599 static void idxd_disable_system_pasid(struct idxd_device *idxd)
601 struct pci_dev *pdev = idxd->pdev;
609 iommu_detach_device_pasid(domain, dev, idxd->pasid);
610 iommu_free_global_pasid(idxd->pasid);
612 idxd_set_user_intr(idxd, 0);
613 idxd->sva = NULL;
614 idxd->pasid = IOMMU_PASID_INVALID;
638 static int idxd_probe(struct idxd_device *idxd)
640 struct pci_dev *pdev = idxd->pdev;
645 rc = idxd_device_init_reset(idxd);
655 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
657 rc = idxd_enable_system_pasid(idxd);
661 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
667 idxd_read_caps(idxd);
668 idxd_read_table_offsets(idxd);
670 rc = idxd_setup_internals(idxd);
675 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
677 rc = idxd_device_load_config(idxd);
682 rc = idxd_setup_interrupts(idxd);
686 idxd->major = idxd_cdev_get_major(idxd);
688 rc = perfmon_pmu_init(idxd);
692 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
696 idxd_cleanup_internals(idxd);
698 if (device_pasid_enabled(idxd))
699 idxd_disable_system_pasid(idxd);
700 if (device_user_pasid_enabled(idxd))
705 static void idxd_cleanup(struct idxd_device *idxd)
707 perfmon_pmu_remove(idxd);
708 idxd_cleanup_interrupts(idxd);
709 idxd_cleanup_internals(idxd);
710 if (device_pasid_enabled(idxd))
711 idxd_disable_system_pasid(idxd);
712 if (device_user_pasid_enabled(idxd))
713 idxd_disable_sva(idxd->pdev);
719 struct idxd_device *idxd;
728 idxd = idxd_alloc(pdev, data);
729 if (!idxd) {
735 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
736 if (!idxd->reg_base) {
748 pci_set_drvdata(pdev, idxd);
750 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
751 rc = idxd_probe(idxd);
757 rc = idxd_register_devices(idxd);
763 rc = idxd_device_init_debugfs(idxd);
768 idxd->hw.version);
773 idxd_cleanup(idxd);
775 pci_iounmap(pdev, idxd->reg_base);
777 put_device(idxd_confdev(idxd));
783 void idxd_wqs_quiesce(struct idxd_device *idxd)
788 for (i = 0; i < idxd->max_wqs; i++) {
789 wq = idxd->wqs[i];
797 struct idxd_device *idxd = pci_get_drvdata(pdev);
801 rc = idxd_device_disable(idxd);
805 irq_entry = &idxd->ie;
807 idxd_mask_error_interrupts(idxd);
808 flush_workqueue(idxd->wq);
813 struct idxd_device *idxd = pci_get_drvdata(pdev);
816 idxd_unregister_devices(idxd);
818 * When ->release() is called for the idxd->conf_dev, it frees all the memory related
819 * to the idxd context. The driver still needs those bits in order to do the rest of
820 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
821 * on the device here to hold off the freeing while allowing the idxd sub-driver
824 get_device(idxd_confdev(idxd));
825 device_unregister(idxd_confdev(idxd));
827 if (device_pasid_enabled(idxd))
828 idxd_disable_system_pasid(idxd);
829 idxd_device_remove_debugfs(idxd);
831 irq_entry = idxd_get_ie(idxd, 0);
834 pci_iounmap(pdev, idxd->reg_base);
835 if (device_user_pasid_enabled(idxd))
838 destroy_workqueue(idxd->wq);
839 perfmon_pmu_remove(idxd);
840 put_device(idxd_confdev(idxd));
860 pr_warn("idxd driver failed to load without MOVDIR64B.\n");