Lines Matching defs:nhi

22 #include "nhi.h"
43 bit += ring->nhi->hop_count;
50 * ring->nhi->lock must be held.
68 index = ring->hop + ring->nhi->hop_count;
74 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
77 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
80 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
90 old = ioread32(ring->nhi->iobase + reg);
96 dev_dbg(&ring->nhi->pdev->dev,
101 dev_WARN(&ring->nhi->pdev->dev,
105 iowrite32(new, ring->nhi->iobase + reg);
113 static void nhi_disable_interrupts(struct tb_nhi *nhi)
117 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
118 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
121 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
122 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
129 void __iomem *io = ring->nhi->iobase;
137 void __iomem *io = ring->nhi->iobase;
339 val = ioread32(ring->nhi->iobase + reg);
344 iowrite32(val, ring->nhi->iobase + reg);
347 /* Both @nhi->lock and @ring->lock should be held */
372 spin_lock_irqsave(&ring->nhi->lock, flags);
377 spin_unlock_irqrestore(&ring->nhi->lock, flags);
385 spin_lock(&ring->nhi->lock);
389 spin_unlock(&ring->nhi->lock);
396 struct tb_nhi *nhi = ring->nhi;
400 if (!nhi->pdev->msix_enabled)
403 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
409 ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
423 ida_simple_remove(&nhi->msix_ida, ring->vector);
434 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
439 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
443 spin_lock_irq(&nhi->lock);
452 for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
454 if (!nhi->tx_rings[i]) {
459 if (!nhi->rx_rings[i]) {
467 if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
468 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
472 if (ring->is_tx && nhi->tx_rings[ring->hop]) {
473 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
477 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
478 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
485 nhi->tx_rings[ring->hop] = ring;
487 nhi->rx_rings[ring->hop] = ring;
490 spin_unlock_irq(&nhi->lock);
495 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
503 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
515 ring->nhi = nhi;
528 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
537 if (nhi_alloc_hop(nhi, ring))
545 dma_free_coherent(&ring->nhi->pdev->dev,
556 * @nhi: Pointer to the NHI the ring is to be allocated
561 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
564 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
570 * @nhi: Pointer to the NHI the ring is to be allocated
581 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
585 return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
600 spin_lock_irq(&ring->nhi->lock);
602 if (ring->nhi->going_away)
605 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
608 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
636 spin_unlock_irq(&ring->nhi->lock);
655 spin_lock_irq(&ring->nhi->lock);
657 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
659 if (ring->nhi->going_away)
662 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
678 spin_unlock_irq(&ring->nhi->lock);
700 spin_lock_irq(&ring->nhi->lock);
706 ring->nhi->tx_rings[ring->hop] = NULL;
708 ring->nhi->rx_rings[ring->hop] = NULL;
711 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
714 spin_unlock_irq(&ring->nhi->lock);
718 dma_free_coherent(&ring->nhi->pdev->dev,
726 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
741 * @nhi: Pointer to the NHI structure
748 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
753 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
755 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
758 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
762 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
778 * @nhi: Pointer to the NHI structure
783 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
787 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
796 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
803 spin_lock_irq(&nhi->lock);
810 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
812 value = ioread32(nhi->iobase
815 if (++hop == nhi->hop_count) {
822 dev_warn(&nhi->pdev->dev,
828 ring = nhi->tx_rings[hop];
830 ring = nhi->rx_rings[hop];
832 dev_warn(&nhi->pdev->dev,
843 spin_unlock_irq(&nhi->lock);
848 struct tb_nhi *nhi = data;
849 schedule_work(&nhi->interrupt_work);
857 struct tb_nhi *nhi = tb->nhi;
864 if (nhi->ops && nhi->ops->suspend_noirq) {
865 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
917 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
929 iowrite32(throttle, nhi->iobase + reg);
937 struct tb_nhi *nhi = tb->nhi;
946 nhi->going_away = true;
948 if (nhi->ops && nhi->ops->resume_noirq) {
949 ret = nhi->ops->resume_noirq(nhi);
953 nhi_enable_int_throttling(tb->nhi);
987 struct tb_nhi *nhi = tb->nhi;
994 if (nhi->ops && nhi->ops->runtime_suspend) {
995 ret = nhi->ops->runtime_suspend(tb->nhi);
1006 struct tb_nhi *nhi = tb->nhi;
1009 if (nhi->ops && nhi->ops->runtime_resume) {
1010 ret = nhi->ops->runtime_resume(nhi);
1015 nhi_enable_int_throttling(nhi);
1019 static void nhi_shutdown(struct tb_nhi *nhi)
1023 dev_dbg(&nhi->pdev->dev, "shutdown\n");
1025 for (i = 0; i < nhi->hop_count; i++) {
1026 if (nhi->tx_rings[i])
1027 dev_WARN(&nhi->pdev->dev,
1029 if (nhi->rx_rings[i])
1030 dev_WARN(&nhi->pdev->dev,
1033 nhi_disable_interrupts(nhi);
1038 if (!nhi->pdev->msix_enabled) {
1039 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1040 flush_work(&nhi->interrupt_work);
1042 ida_destroy(&nhi->msix_ida);
1044 if (nhi->ops && nhi->ops->shutdown)
1045 nhi->ops->shutdown(nhi);
1048 static int nhi_init_msi(struct tb_nhi *nhi)
1050 struct pci_dev *pdev = nhi->pdev;
1054 nhi_disable_interrupts(nhi);
1056 nhi_enable_int_throttling(nhi);
1058 ida_init(&nhi->msix_ida);
1073 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1075 irq = pci_irq_vector(nhi->pdev, 0);
1080 IRQF_NO_SUSPEND, "thunderbolt", nhi);
1107 static void tb_apple_add_links(struct tb_nhi *nhi)
1114 switch (nhi->pdev->device) {
1124 upstream = pci_upstream_bridge(nhi->pdev);
1150 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1154 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1157 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1165 struct tb_nhi *nhi;
1186 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1187 if (!nhi)
1190 nhi->pdev = pdev;
1191 nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1193 nhi->iobase = pcim_iomap_table(pdev)[0];
1194 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1195 dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
1197 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1198 sizeof(*nhi->tx_rings), GFP_KERNEL);
1199 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1200 sizeof(*nhi->rx_rings), GFP_KERNEL);
1201 if (!nhi->tx_rings || !nhi->rx_rings)
1204 res = nhi_init_msi(nhi);
1210 spin_lock_init(&nhi->lock);
1222 if (nhi->ops && nhi->ops->init) {
1223 res = nhi->ops->init(nhi);
1228 tb_apple_add_links(nhi);
1229 tb_acpi_add_links(nhi);
1231 tb = icm_probe(nhi);
1233 tb = tb_probe(nhi);
1235 dev_err(&nhi->pdev->dev,
1240 dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1249 nhi_shutdown(nhi);
1267 struct tb_nhi *nhi = tb->nhi;
1274 nhi_shutdown(nhi);