Lines Matching defs:idxd
10 #include <uapi/linux/idxd.h>
12 #include "idxd.h"
15 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
21 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
25 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
28 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
31 void idxd_mask_error_interrupts(struct idxd_device *idxd)
35 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
38 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
53 struct device *dev = &wq->idxd->pdev->dev;
86 struct device *dev = &wq->idxd->pdev->dev;
110 struct idxd_device *idxd = wq->idxd;
111 struct device *dev = &idxd->pdev->dev;
124 wq->compls_size = num_descs * idxd->data->compl_size;
144 if (idxd->data->type == IDXD_TYPE_DSA)
146 else if (idxd->data->type == IDXD_TYPE_IAX)
148 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
167 struct device *dev = &wq->idxd->pdev->dev;
180 struct idxd_device *idxd = wq->idxd;
181 struct device *dev = &idxd->pdev->dev;
189 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
198 set_bit(wq->id, idxd->wq_enable_map);
205 struct idxd_device *idxd = wq->idxd;
206 struct device *dev = &idxd->pdev->dev;
217 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
226 clear_bit(wq->id, idxd->wq_enable_map);
234 struct idxd_device *idxd = wq->idxd;
235 struct device *dev = &idxd->pdev->dev;
245 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
250 struct idxd_device *idxd = wq->idxd;
251 struct device *dev = &idxd->pdev->dev;
260 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
266 struct idxd_device *idxd = wq->idxd;
267 struct pci_dev *pdev = idxd->pdev;
283 struct device *dev = &wq->idxd->pdev->dev;
290 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
294 for (i = 0; i < idxd->max_wqs; i++) {
295 struct idxd_wq *wq = idxd->wqs[i];
304 struct idxd_device *idxd = wq->idxd;
308 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
309 spin_lock(&idxd->dev_lock);
310 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
314 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
315 spin_unlock(&idxd->dev_lock);
337 struct idxd_device *idxd = wq->idxd;
346 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
347 spin_lock(&idxd->dev_lock);
348 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
351 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
352 spin_unlock(&idxd->dev_lock);
363 struct idxd_device *idxd = wq->idxd;
367 memset(wq->wqcfg, 0, idxd->wqcfg_size);
375 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
377 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
426 static inline bool idxd_is_enabled(struct idxd_device *idxd)
430 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
437 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
441 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
451 int idxd_device_init_reset(struct idxd_device *idxd)
453 struct device *dev = &idxd->pdev->dev;
456 if (idxd_device_is_halted(idxd)) {
457 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
464 spin_lock(&idxd->cmd_lock);
465 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
467 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
470 spin_unlock(&idxd->cmd_lock);
474 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
482 if (idxd_device_is_halted(idxd)) {
483 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
494 spin_lock_irqsave(&idxd->cmd_lock, flags);
495 wait_event_lock_irq(idxd->cmd_waitq,
496 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
497 idxd->cmd_lock);
499 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
502 idxd->cmd_status = 0;
503 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
504 idxd->cmd_done = &done;
505 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
511 spin_unlock_irqrestore(&idxd->cmd_lock, flags);
513 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
514 spin_lock(&idxd->cmd_lock);
517 idxd->cmd_status = stat & GENMASK(7, 0);
519 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
521 wake_up(&idxd->cmd_waitq);
522 spin_unlock(&idxd->cmd_lock);
525 int idxd_device_enable(struct idxd_device *idxd)
527 struct device *dev = &idxd->pdev->dev;
530 if (idxd_is_enabled(idxd)) {
535 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
544 idxd->state = IDXD_DEV_ENABLED;
548 int idxd_device_disable(struct idxd_device *idxd)
550 struct device *dev = &idxd->pdev->dev;
553 if (!idxd_is_enabled(idxd)) {
558 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
567 idxd_device_clear_state(idxd);
571 void idxd_device_reset(struct idxd_device *idxd)
573 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
574 idxd_device_clear_state(idxd);
575 spin_lock(&idxd->dev_lock);
576 idxd_unmask_error_interrupts(idxd);
577 spin_unlock(&idxd->dev_lock);
580 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
582 struct device *dev = &idxd->pdev->dev;
587 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
591 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
594 struct device *dev = &idxd->pdev->dev;
597 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
608 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
621 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
624 struct device *dev = &idxd->pdev->dev;
628 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
644 spin_lock(&idxd->cmd_lock);
645 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
647 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
649 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
650 spin_unlock(&idxd->cmd_lock);
662 static void idxd_engines_clear_state(struct idxd_device *idxd)
667 lockdep_assert_held(&idxd->dev_lock);
668 for (i = 0; i < idxd->max_engines; i++) {
669 engine = idxd->engines[i];
674 static void idxd_groups_clear_state(struct idxd_device *idxd)
679 lockdep_assert_held(&idxd->dev_lock);
680 for (i = 0; i < idxd->max_groups; i++) {
681 group = idxd->groups[i];
690 group->rdbufs_allowed = idxd->max_rdbufs;
692 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
704 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
708 for (i = 0; i < idxd->max_wqs; i++) {
709 struct idxd_wq *wq = idxd->wqs[i];
718 void idxd_device_clear_state(struct idxd_device *idxd)
721 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
726 idxd_device_wqs_clear_state(idxd);
728 spin_lock(&idxd->dev_lock);
729 idxd_groups_clear_state(idxd);
730 idxd_engines_clear_state(idxd);
732 spin_lock(&idxd->dev_lock);
735 idxd->state = IDXD_DEV_DISABLED;
736 spin_unlock(&idxd->dev_lock);
739 static int idxd_device_evl_setup(struct idxd_device *idxd)
744 struct device *dev = &idxd->pdev->dev;
748 struct idxd_evl *evl = idxd->evl;
755 size = evl_size(idxd);
783 iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
784 iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
786 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
788 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
790 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
792 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
803 static void idxd_device_evl_free(struct idxd_device *idxd)
810 struct device *dev = &idxd->pdev->dev;
811 struct idxd_evl *evl = idxd->evl;
813 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
819 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
821 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
823 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
825 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
826 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
841 struct idxd_device *idxd = group->idxd;
842 struct device *dev = &idxd->pdev->dev;
850 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
851 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
854 ioread64(idxd->reg_base + grpcfg_offset));
858 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
859 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
861 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
864 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
865 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
868 ioread64(idxd->reg_base + grpcfg_offset));
871 static int idxd_groups_config_write(struct idxd_device *idxd)
876 struct device *dev = &idxd->pdev->dev;
879 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
880 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
881 reg.rdbuf_limit = idxd->rdbuf_limit;
882 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
886 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
888 for (i = 0; i < idxd->max_groups; i++) {
889 struct idxd_group *group = idxd->groups[i];
897 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
899 struct pci_dev *pdev = idxd->pdev;
908 struct idxd_device *idxd = wq->idxd;
909 struct device *dev = &idxd->pdev->dev;
920 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
921 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
922 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
950 !idxd_device_pasid_priv_enabled(idxd) &&
952 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
958 if (idxd->hw.gen_cap.block_on_fault &&
963 if (idxd->hw.wq_cap.wq_ats_support)
966 if (idxd->hw.wq_cap.wq_prs_support)
971 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
974 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
985 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
986 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
987 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
990 ioread32(idxd->reg_base + wq_offset));
996 static int idxd_wqs_config_write(struct idxd_device *idxd)
1000 for (i = 0; i < idxd->max_wqs; i++) {
1001 struct idxd_wq *wq = idxd->wqs[i];
1011 static void idxd_group_flags_setup(struct idxd_device *idxd)
1016 for (i = 0; i < idxd->max_groups; i++) {
1017 struct idxd_group *group = idxd->groups[i];
1035 static int idxd_engines_setup(struct idxd_device *idxd)
1041 for (i = 0; i < idxd->max_groups; i++) {
1042 group = idxd->groups[i];
1046 for (i = 0; i < idxd->max_engines; i++) {
1047 eng = idxd->engines[i];
1063 static int idxd_wqs_setup(struct idxd_device *idxd)
1068 struct device *dev = &idxd->pdev->dev;
1070 for (i = 0; i < idxd->max_groups; i++) {
1071 group = idxd->groups[i];
1076 for (i = 0; i < idxd->max_wqs; i++) {
1077 wq = idxd->wqs[i];
1084 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1094 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1101 int idxd_device_config(struct idxd_device *idxd)
1105 lockdep_assert_held(&idxd->dev_lock);
1106 rc = idxd_wqs_setup(idxd);
1110 rc = idxd_engines_setup(idxd);
1114 idxd_group_flags_setup(idxd);
1116 rc = idxd_wqs_config_write(idxd);
1120 rc = idxd_groups_config_write(idxd);
1129 struct idxd_device *idxd = wq->idxd;
1130 struct device *dev = &idxd->pdev->dev;
1134 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1135 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1149 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1151 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1152 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1161 struct idxd_device *idxd = group->idxd;
1162 struct device *dev = &idxd->pdev->dev;
1172 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1173 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1177 if (i * 64 >= idxd->max_wqs)
1185 if (id >= idxd->max_wqs)
1190 wq = idxd->wqs[id];
1196 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1197 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1203 if (i >= idxd->max_engines)
1207 struct idxd_engine *engine = idxd->engines[i];
1213 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1214 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1219 int idxd_device_load_config(struct idxd_device *idxd)
1224 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1225 idxd->rdbuf_limit = reg.rdbuf_limit;
1227 for (i = 0; i < idxd->max_groups; i++) {
1228 struct idxd_group *group = idxd->groups[i];
1233 for (i = 0; i < idxd->max_wqs; i++) {
1234 struct idxd_wq *wq = idxd->wqs[i];
1280 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1291 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1294 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1297 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1302 struct idxd_device *idxd = wq->idxd;
1310 if (idxd->request_int_handles)
1311 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1312 idxd_device_clear_perm_entry(idxd, ie);
1320 struct idxd_device *idxd = wq->idxd;
1321 struct pci_dev *pdev = idxd->pdev;
1331 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
1332 idxd_device_set_perm_entry(idxd, ie);
1334 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1340 if (idxd->request_int_handles) {
1341 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1355 idxd_device_clear_perm_entry(idxd, ie);
1362 struct idxd_device *idxd = wq->idxd;
1363 struct device *dev = &idxd->pdev->dev;
1368 if (idxd->state != IDXD_DEV_ENABLED) {
1369 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1375 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1382 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1387 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1395 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1408 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1422 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1425 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1433 spin_lock(&idxd->dev_lock);
1434 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1435 rc = idxd_device_config(idxd);
1436 spin_unlock(&idxd->dev_lock);
1450 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1459 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1466 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1473 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1495 struct idxd_device *idxd = wq->idxd;
1496 struct device *dev = &idxd->pdev->dev;
1516 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1524 if (idxd->state != IDXD_DEV_DISABLED) {
1525 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1530 spin_lock(&idxd->dev_lock);
1531 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1532 rc = idxd_device_config(idxd);
1533 spin_unlock(&idxd->dev_lock);
1543 if (idxd->pasid != IOMMU_PASID_INVALID)
1544 idxd_set_user_intr(idxd, 1);
1546 rc = idxd_device_evl_setup(idxd);
1548 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
1553 rc = idxd_device_enable(idxd);
1555 idxd_device_evl_free(idxd);
1560 rc = idxd_register_dma_device(idxd);
1562 idxd_device_disable(idxd);
1563 idxd_device_evl_free(idxd);
1564 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1568 idxd->cmd_status = 0;
1575 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1578 for (i = 0; i < idxd->max_wqs; i++) {
1579 struct idxd_wq *wq = idxd->wqs[i];
1588 idxd_unregister_dma_device(idxd);
1589 idxd_device_disable(idxd);
1590 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1591 idxd_device_reset(idxd);
1592 idxd_device_evl_free(idxd);
1605 .name = "idxd",