Lines Matching defs:nhi

24 #include "nhi.h"
57 bit += ring->nhi->hop_count;
61 static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
63 if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
66 val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
67 iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
69 iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
73 static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
75 if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
76 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
78 iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
84 * ring->nhi->lock must be held.
103 index = ring->hop + ring->nhi->hop_count;
116 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
117 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
123 ring->nhi->iobase + REG_DMA_MISC);
125 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
135 old = ioread32(ring->nhi->iobase + reg);
141 dev_dbg(&ring->nhi->pdev->dev,
146 dev_WARN(&ring->nhi->pdev->dev,
152 iowrite32(new, ring->nhi->iobase + reg);
154 nhi_mask_interrupt(ring->nhi, mask, index);
162 static void nhi_disable_interrupts(struct tb_nhi *nhi)
166 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
167 nhi_mask_interrupt(nhi, ~0, 4 * i);
170 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
171 nhi_clear_interrupt(nhi, 4 * i);
178 void __iomem *io = ring->nhi->iobase;
186 void __iomem *io = ring->nhi->iobase;
388 val = ioread32(ring->nhi->iobase + reg);
393 iowrite32(val, ring->nhi->iobase + reg);
396 /* Both @nhi->lock and @ring->lock should be held */
421 spin_lock_irqsave(&ring->nhi->lock, flags);
426 spin_unlock_irqrestore(&ring->nhi->lock, flags);
434 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
439 iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
441 iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
442 4 * (ring->nhi->hop_count / 32));
449 spin_lock(&ring->nhi->lock);
454 spin_unlock(&ring->nhi->lock);
461 struct tb_nhi *nhi = ring->nhi;
465 if (!nhi->pdev->msix_enabled)
468 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
474 ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
488 ida_simple_remove(&nhi->msix_ida, ring->vector);
499 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
504 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
509 if (nhi->quirks & QUIRK_E2E) {
512 dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
518 spin_lock_irq(&nhi->lock);
527 for (i = start_hop; i < nhi->hop_count; i++) {
529 if (!nhi->tx_rings[i]) {
534 if (!nhi->rx_rings[i]) {
543 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
547 if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
548 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
552 if (ring->is_tx && nhi->tx_rings[ring->hop]) {
553 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
558 if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
559 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
566 nhi->tx_rings[ring->hop] = ring;
568 nhi->rx_rings[ring->hop] = ring;
571 spin_unlock_irq(&nhi->lock);
576 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
584 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
596 ring->nhi = nhi;
610 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
619 if (nhi_alloc_hop(nhi, ring))
627 dma_free_coherent(&ring->nhi->pdev->dev,
638 * @nhi: Pointer to the NHI the ring is to be allocated
643 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
646 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
652 * @nhi: Pointer to the NHI the ring is to be allocated
664 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
669 return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
685 spin_lock_irq(&ring->nhi->lock);
687 if (ring->nhi->going_away)
690 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
693 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
730 dev_dbg(&ring->nhi->pdev->dev,
734 dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
746 spin_unlock_irq(&ring->nhi->lock);
766 spin_lock_irq(&ring->nhi->lock);
768 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
770 if (ring->nhi->going_away)
773 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
789 spin_unlock_irq(&ring->nhi->lock);
811 spin_lock_irq(&ring->nhi->lock);
817 ring->nhi->tx_rings[ring->hop] = NULL;
819 ring->nhi->rx_rings[ring->hop] = NULL;
822 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
825 spin_unlock_irq(&ring->nhi->lock);
829 dma_free_coherent(&ring->nhi->pdev->dev,
837 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
852 * @nhi: Pointer to the NHI structure
859 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
864 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
866 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
869 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
873 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
889 * @nhi: Pointer to the NHI structure
894 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
898 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
907 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
914 spin_lock_irq(&nhi->lock);
921 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
923 value = ioread32(nhi->iobase
926 if (++hop == nhi->hop_count) {
933 dev_warn(&nhi->pdev->dev,
939 ring = nhi->tx_rings[hop];
941 ring = nhi->rx_rings[hop];
943 dev_warn(&nhi->pdev->dev,
954 spin_unlock_irq(&nhi->lock);
959 struct tb_nhi *nhi = data;
960 schedule_work(&nhi->interrupt_work);
968 struct tb_nhi *nhi = tb->nhi;
975 if (nhi->ops && nhi->ops->suspend_noirq) {
976 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
1028 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
1040 iowrite32(throttle, nhi->iobase + reg);
1048 struct tb_nhi *nhi = tb->nhi;
1057 nhi->going_away = true;
1059 if (nhi->ops && nhi->ops->resume_noirq) {
1060 ret = nhi->ops->resume_noirq(nhi);
1064 nhi_enable_int_throttling(tb->nhi);
1098 struct tb_nhi *nhi = tb->nhi;
1105 if (nhi->ops && nhi->ops->runtime_suspend) {
1106 ret = nhi->ops->runtime_suspend(tb->nhi);
1117 struct tb_nhi *nhi = tb->nhi;
1120 if (nhi->ops && nhi->ops->runtime_resume) {
1121 ret = nhi->ops->runtime_resume(nhi);
1126 nhi_enable_int_throttling(nhi);
1130 static void nhi_shutdown(struct tb_nhi *nhi)
1134 dev_dbg(&nhi->pdev->dev, "shutdown\n");
1136 for (i = 0; i < nhi->hop_count; i++) {
1137 if (nhi->tx_rings[i])
1138 dev_WARN(&nhi->pdev->dev,
1140 if (nhi->rx_rings[i])
1141 dev_WARN(&nhi->pdev->dev,
1144 nhi_disable_interrupts(nhi);
1149 if (!nhi->pdev->msix_enabled) {
1150 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1151 flush_work(&nhi->interrupt_work);
1153 ida_destroy(&nhi->msix_ida);
1155 if (nhi->ops && nhi->ops->shutdown)
1156 nhi->ops->shutdown(nhi);
1159 static void nhi_check_quirks(struct tb_nhi *nhi)
1161 if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
1167 nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
1169 switch (nhi->pdev->device) {
1177 nhi->quirks |= QUIRK_E2E;
1192 static void nhi_check_iommu(struct tb_nhi *nhi)
1194 struct pci_bus *bus = nhi->pdev->bus;
1219 nhi->iommu_dma_protection = port_ok;
1220 dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
1224 static void nhi_reset(struct tb_nhi *nhi)
1229 val = ioread32(nhi->iobase + REG_CAPS);
1235 dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
1239 iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
1244 val = ioread32(nhi->iobase + REG_RESET);
1246 dev_warn(&nhi->pdev->dev, "host router reset successful\n");
1252 dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
1255 static int nhi_init_msi(struct tb_nhi *nhi)
1257 struct pci_dev *pdev = nhi->pdev;
1262 nhi_disable_interrupts(nhi);
1264 nhi_enable_int_throttling(nhi);
1266 ida_init(&nhi->msix_ida);
1281 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1283 irq = pci_irq_vector(nhi->pdev, 0);
1288 IRQF_NO_SUSPEND, "thunderbolt", nhi);
1306 static struct tb *nhi_select_cm(struct tb_nhi *nhi)
1315 return tb_probe(nhi);
1322 tb = icm_probe(nhi);
1324 tb = tb_probe(nhi);
1332 struct tb_nhi *nhi;
1347 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1348 if (!nhi)
1351 nhi->pdev = pdev;
1352 nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1354 nhi->iobase = pcim_iomap_table(pdev)[0];
1355 nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
1356 dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
1358 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1359 sizeof(*nhi->tx_rings), GFP_KERNEL);
1360 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1361 sizeof(*nhi->rx_rings), GFP_KERNEL);
1362 if (!nhi->tx_rings || !nhi->rx_rings)
1365 nhi_check_quirks(nhi);
1366 nhi_check_iommu(nhi);
1368 nhi_reset(nhi);
1370 res = nhi_init_msi(nhi);
1374 spin_lock_init(&nhi->lock);
1382 if (nhi->ops && nhi->ops->init) {
1383 res = nhi->ops->init(nhi);
1388 tb = nhi_select_cm(nhi);
1402 nhi_shutdown(nhi);
1420 struct tb_nhi *nhi = tb->nhi;
1427 nhi_shutdown(nhi);