Lines Matching refs:dev

166 static inline u64 async_mask(struct mthca_dev *dev)
168 return dev->mthca_flags & MTHCA_FLAG_SRQ ?
173 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
185 dev->kar + MTHCA_EQ_DOORBELL,
186 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
189 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
199 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
201 if (mthca_is_memfree(dev))
202 arbel_set_eq_ci(dev, eq, ci);
204 tavor_set_eq_ci(dev, eq, ci);
207 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
210 dev->kar + MTHCA_EQ_DOORBELL,
211 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
214 static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
216 writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
221 if (!mthca_is_memfree(dev)) {
223 dev->kar + MTHCA_EQ_DOORBELL,
224 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
246 static void port_change(struct mthca_dev *dev, int port, int active)
250 mthca_dbg(dev, "Port change to %s for port %d\n",
253 record.device = &dev->ib_dev;
260 static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
277 disarm_cq(dev, eq->eqn, disarm_cqn);
278 mthca_cq_completion(dev, disarm_cqn);
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
302 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
327 mthca_cmd_event(dev,
334 port_change(dev,
340 mthca_warn(dev, "CQ %s on CQN %06x\n",
344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
349 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
379 set_eq_ci(dev, eq, eq->cons_index);
393 struct mthca_dev *dev = dev_ptr;
397 if (dev->eq_table.clr_mask)
398 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
400 ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
404 writel(ecr, dev->eq_regs.tavor.ecr_base +
408 if (ecr & dev->eq_table.eq[i].eqn_mask) {
409 if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
410 tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
411 dev->eq_table.eq[i].cons_index);
412 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
421 struct mthca_dev *dev = eq->dev;
423 mthca_eq_int(dev, eq);
424 tavor_set_eq_ci(dev, eq, eq->cons_index);
425 tavor_eq_req_not(dev, eq->eqn);
433 struct mthca_dev *dev = dev_ptr;
437 if (dev->eq_table.clr_mask)
438 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
441 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
443 arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
444 dev->eq_table.eq[i].cons_index);
447 arbel_eq_req_not(dev, dev->eq_table.arm_mask);
455 struct mthca_dev *dev = eq->dev;
457 mthca_eq_int(dev, eq);
458 arbel_set_eq_ci(dev, eq, eq->cons_index);
459 arbel_eq_req_not(dev, eq->eqn_mask);
465 static int mthca_create_eq(struct mthca_dev *dev,
478 eq->dev = dev;
494 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
518 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
532 if (mthca_is_memfree(dev))
536 if (mthca_is_memfree(dev)) {
537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
539 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
540 eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
545 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
547 mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
552 mthca_free_mailbox(dev, mailbox);
557 dev->eq_table.arm_mask |= eq->eqn_mask;
559 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
565 mthca_free_mr(dev, &eq->mr);
568 mthca_free(&dev->eq_table.alloc, eq->eqn);
573 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
578 mthca_free_mailbox(dev, mailbox);
588 static void mthca_free_eq(struct mthca_dev *dev,
597 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
601 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
603 mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
605 dev->eq_table.arm_mask &= ~eq->eqn_mask;
608 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
618 mthca_free_mr(dev, &eq->mr);
620 pci_free_consistent(dev->pdev, PAGE_SIZE,
625 mthca_free_mailbox(dev, mailbox);
628 static void mthca_free_irqs(struct mthca_dev *dev)
632 if (dev->eq_table.have_irq)
633 free_irq(dev->pdev->irq, dev);
635 if (dev->eq_table.eq[i].have_irq) {
636 free_irq(dev->eq_table.eq[i].msi_x_vector,
637 dev->eq_table.eq + i);
638 dev->eq_table.eq[i].have_irq = 0;
642 static int mthca_map_reg(struct mthca_dev *dev,
646 phys_addr_t base = pci_resource_start(dev->pdev, 0);
655 static int mthca_map_eq_regs(struct mthca_dev *dev)
657 if (mthca_is_memfree(dev)) {
665 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
666 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
667 &dev->clr_base)) {
668 mthca_err(dev, "Couldn't map interrupt clear register, "
677 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
678 dev->fw.arbel.eq_arm_base) + 4, 4,
679 &dev->eq_regs.arbel.eq_arm)) {
680 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
681 iounmap(dev->clr_base);
685 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
686 dev->fw.arbel.eq_set_ci_base,
688 &dev->eq_regs.arbel.eq_set_ci_base)) {
689 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
690 iounmap(dev->eq_regs.arbel.eq_arm);
691 iounmap(dev->clr_base);
695 if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
696 &dev->clr_base)) {
697 mthca_err(dev, "Couldn't map interrupt clear register, "
702 if (mthca_map_reg(dev, MTHCA_ECR_BASE,
704 &dev->eq_regs.tavor.ecr_base)) {
705 mthca_err(dev, "Couldn't map ecr register, "
707 iounmap(dev->clr_base);
716 static void mthca_unmap_eq_regs(struct mthca_dev *dev)
718 if (mthca_is_memfree(dev)) {
719 iounmap(dev->eq_regs.arbel.eq_set_ci_base);
720 iounmap(dev->eq_regs.arbel.eq_arm);
721 iounmap(dev->clr_base);
723 iounmap(dev->eq_regs.tavor.ecr_base);
724 iounmap(dev->clr_base);
728 int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
738 dev->eq_table.icm_virt = icm_virt;
739 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
740 if (!dev->eq_table.icm_page)
742 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
744 if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
745 __free_page(dev->eq_table.icm_page);
749 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
751 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
753 __free_page(dev->eq_table.icm_page);
759 void mthca_unmap_eq_icm(struct mthca_dev *dev)
761 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
762 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
764 __free_page(dev->eq_table.icm_page);
767 int mthca_init_eq_table(struct mthca_dev *dev)
773 err = mthca_alloc_init(&dev->eq_table.alloc,
774 dev->limits.num_eqs,
775 dev->limits.num_eqs - 1,
776 dev->limits.reserved_eqs);
780 err = mthca_map_eq_regs(dev);
784 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
785 dev->eq_table.clr_mask = 0;
787 dev->eq_table.clr_mask =
788 swab32(1 << (dev->eq_table.inta_pin & 31));
789 dev->eq_table.clr_int = dev->clr_base +
790 (dev->eq_table.inta_pin < 32 ? 4 : 0);
793 dev->eq_table.arm_mask = 0;
795 intr = dev->eq_table.inta_pin;
797 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
798 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
799 &dev->eq_table.eq[MTHCA_EQ_COMP]);
803 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
804 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
805 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
809 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
810 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
811 &dev->eq_table.eq[MTHCA_EQ_CMD]);
815 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
823 snprintf(dev->eq_table.eq[i].irq_name,
826 pci_name(dev->pdev));
827 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
828 mthca_is_memfree(dev) ?
831 0, dev->eq_table.eq[i].irq_name,
832 dev->eq_table.eq + i);
835 dev->eq_table.eq[i].have_irq = 1;
838 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
839 DRV_NAME "@pci:%s", pci_name(dev->pdev));
840 err = request_irq(dev->pdev->irq,
841 mthca_is_memfree(dev) ?
844 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
847 dev->eq_table.have_irq = 1;
850 err = mthca_MAP_EQ(dev, async_mask(dev),
851 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
853 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
854 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
856 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
857 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
859 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
860 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
863 if (mthca_is_memfree(dev))
864 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
866 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
871 mthca_free_irqs(dev);
872 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
875 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
878 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
881 mthca_unmap_eq_regs(dev);
884 mthca_alloc_cleanup(&dev->eq_table.alloc);
888 void mthca_cleanup_eq_table(struct mthca_dev *dev)
892 mthca_free_irqs(dev);
894 mthca_MAP_EQ(dev, async_mask(dev),
895 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
896 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
897 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
900 mthca_free_eq(dev, &dev->eq_table.eq[i]);
902 mthca_unmap_eq_regs(dev);
904 mthca_alloc_cleanup(&dev->eq_table.alloc);