Lines Matching defs:idxd

11 #include <uapi/linux/idxd.h>
13 #include "idxd.h"
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
20 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
22 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
27 void idxd_mask_msix_vectors(struct idxd_device *idxd)
29 struct pci_dev *pdev = idxd->pdev;
34 idxd_mask_msix_vector(idxd, i);
37 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
39 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
44 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
48 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
50 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
53 void idxd_mask_error_interrupts(struct idxd_device *idxd)
57 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
59 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
74 struct device *dev = &wq->idxd->pdev->dev;
107 struct device *dev = &wq->idxd->pdev->dev;
131 struct idxd_device *idxd = wq->idxd;
132 struct device *dev = &idxd->pdev->dev;
187 struct device *dev = &wq->idxd->pdev->dev;
200 struct idxd_device *idxd = wq->idxd;
201 struct device *dev = &idxd->pdev->dev;
209 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
224 struct idxd_device *idxd = wq->idxd;
225 struct device *dev = &idxd->pdev->dev;
236 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
250 struct idxd_device *idxd = wq->idxd;
251 struct device *dev = &idxd->pdev->dev;
261 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
266 struct idxd_device *idxd = wq->idxd;
267 struct device *dev = &idxd->pdev->dev;
276 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
282 struct idxd_device *idxd = wq->idxd;
283 struct pci_dev *pdev = idxd->pdev;
300 struct device *dev = &wq->idxd->pdev->dev;
307 struct idxd_device *idxd = wq->idxd;
309 lockdep_assert_held(&idxd->dev_lock);
310 memset(wq->wqcfg, 0, idxd->wqcfg_size);
321 static inline bool idxd_is_enabled(struct idxd_device *idxd)
325 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
332 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
336 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
346 int idxd_device_init_reset(struct idxd_device *idxd)
348 struct device *dev = &idxd->pdev->dev;
352 if (idxd_device_is_halted(idxd)) {
353 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
360 spin_lock_irqsave(&idxd->dev_lock, flags);
361 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
363 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
366 spin_unlock_irqrestore(&idxd->dev_lock, flags);
370 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
377 if (idxd_device_is_halted(idxd)) {
378 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
389 spin_lock_irqsave(&idxd->dev_lock, flags);
390 wait_event_lock_irq(idxd->cmd_waitq,
391 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
392 idxd->dev_lock);
394 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
397 idxd->cmd_status = 0;
398 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
399 idxd->cmd_done = &done;
400 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
406 spin_unlock_irqrestore(&idxd->dev_lock, flags);
408 spin_lock_irqsave(&idxd->dev_lock, flags);
410 *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
411 idxd->cmd_status = *status & GENMASK(7, 0);
414 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
416 wake_up(&idxd->cmd_waitq);
417 spin_unlock_irqrestore(&idxd->dev_lock, flags);
420 int idxd_device_enable(struct idxd_device *idxd)
422 struct device *dev = &idxd->pdev->dev;
425 if (idxd_is_enabled(idxd)) {
430 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
439 idxd->state = IDXD_DEV_ENABLED;
443 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
447 lockdep_assert_held(&idxd->dev_lock);
449 for (i = 0; i < idxd->max_wqs; i++) {
450 struct idxd_wq *wq = &idxd->wqs[i];
459 int idxd_device_disable(struct idxd_device *idxd)
461 struct device *dev = &idxd->pdev->dev;
465 if (!idxd_is_enabled(idxd)) {
470 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
479 spin_lock_irqsave(&idxd->dev_lock, flags);
480 idxd_device_wqs_clear_state(idxd);
481 idxd->state = IDXD_DEV_CONF_READY;
482 spin_unlock_irqrestore(&idxd->dev_lock, flags);
486 void idxd_device_reset(struct idxd_device *idxd)
490 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
491 spin_lock_irqsave(&idxd->dev_lock, flags);
492 idxd_device_wqs_clear_state(idxd);
493 idxd->state = IDXD_DEV_CONF_READY;
494 spin_unlock_irqrestore(&idxd->dev_lock, flags);
500 struct idxd_device *idxd = group->idxd;
501 struct device *dev = &idxd->pdev->dev;
509 grpcfg_offset = idxd->grpcfg_offset +
512 idxd->reg_base + grpcfg_offset);
515 ioread64(idxd->reg_base + grpcfg_offset));
519 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
520 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
522 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
525 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
526 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
529 ioread32(idxd->reg_base + grpcfg_offset));
532 static int idxd_groups_config_write(struct idxd_device *idxd)
537 struct device *dev = &idxd->pdev->dev;
540 if (idxd->token_limit) {
541 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
542 reg.token_limit = idxd->token_limit;
543 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
547 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
549 for (i = 0; i < idxd->max_groups; i++) {
550 struct idxd_group *group = &idxd->groups[i];
560 struct idxd_device *idxd = wq->idxd;
561 struct device *dev = &idxd->pdev->dev;
572 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
573 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
574 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
598 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
599 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
600 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
603 ioread32(idxd->reg_base + wq_offset));
609 static int idxd_wqs_config_write(struct idxd_device *idxd)
613 for (i = 0; i < idxd->max_wqs; i++) {
614 struct idxd_wq *wq = &idxd->wqs[i];
624 static void idxd_group_flags_setup(struct idxd_device *idxd)
629 for (i = 0; i < idxd->max_groups; i++) {
630 struct idxd_group *group = &idxd->groups[i];
646 group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
650 static int idxd_engines_setup(struct idxd_device *idxd)
656 for (i = 0; i < idxd->max_groups; i++) {
657 group = &idxd->groups[i];
661 for (i = 0; i < idxd->max_engines; i++) {
662 eng = &idxd->engines[i];
678 static int idxd_wqs_setup(struct idxd_device *idxd)
683 struct device *dev = &idxd->pdev->dev;
685 for (i = 0; i < idxd->max_groups; i++) {
686 group = &idxd->groups[i];
691 for (i = 0; i < idxd->max_wqs; i++) {
692 wq = &idxd->wqs[i];
715 int idxd_device_config(struct idxd_device *idxd)
719 lockdep_assert_held(&idxd->dev_lock);
720 rc = idxd_wqs_setup(idxd);
724 rc = idxd_engines_setup(idxd);
728 idxd_group_flags_setup(idxd);
730 rc = idxd_wqs_config_write(idxd);
734 rc = idxd_groups_config_write(idxd);