Lines Matching refs:hw
47 struct csio_hw *hw = (struct csio_hw *) dev_id;
51 if (unlikely(!hw))
54 if (unlikely(pci_channel_offline(hw->pdev))) {
55 CSIO_INC_STATS(hw, n_pcich_offline);
59 spin_lock_irqsave(&hw->lock, flags);
60 csio_hw_slow_intr_handler(hw);
61 rv = csio_mb_isr_handler(hw);
63 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
64 hw->flags |= CSIO_HWF_FWEVT_PENDING;
65 spin_unlock_irqrestore(&hw->lock, flags);
66 schedule_work(&hw->evtq_work);
69 spin_unlock_irqrestore(&hw->lock, flags);
75 * @hw: HW module.
81 csio_fwevt_handler(struct csio_hw *hw)
86 rv = csio_fwevtq_handler(hw);
88 spin_lock_irqsave(&hw->lock, flags);
89 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
90 hw->flags |= CSIO_HWF_FWEVT_PENDING;
91 spin_unlock_irqrestore(&hw->lock, flags);
92 schedule_work(&hw->evtq_work);
95 spin_unlock_irqrestore(&hw->lock, flags);
110 struct csio_hw *hw = (struct csio_hw *) dev_id;
112 if (unlikely(!hw))
115 if (unlikely(pci_channel_offline(hw->pdev))) {
116 CSIO_INC_STATS(hw, n_pcich_offline);
120 csio_fwevt_handler(hw);
131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
134 csio_fwevt_handler(hw);
139 * @hw: HW module.
146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
166 spin_lock_irqsave(&hw->lock, flags);
188 spin_unlock_irqrestore(&hw->lock, flags);
191 csio_put_scsi_ioreq_lock(hw,
192 csio_hw_to_scsim(hw), ioreq);
194 spin_lock_irqsave(&hw->lock, flags);
196 spin_unlock_irqrestore(&hw->lock, flags);
214 struct csio_hw *hw = (struct csio_hw *)iq->owner;
221 scm = csio_hw_to_scsim(hw);
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
231 ioreq->io_cbfn(hw, ioreq);
234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
259 struct csio_hw *hw;
264 hw = (struct csio_hw *)iq->owner;
266 if (unlikely(pci_channel_offline(hw->pdev))) {
267 CSIO_INC_STATS(hw, n_pcich_offline);
285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
304 struct csio_hw *hw = (struct csio_hw *) dev_id;
310 if (unlikely(!hw))
313 if (unlikely(pci_channel_offline(hw->pdev))) {
314 CSIO_INC_STATS(hw, n_pcich_offline);
319 if (hw->intr_mode == CSIO_IM_INTX)
320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
326 if (csio_hw_slow_intr_handler(hw))
330 intx_q = csio_get_q(hw, hw->intr_iq_idx);
335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
338 spin_lock_irqsave(&hw->lock, flags);
339 rv = csio_mb_isr_handler(hw);
340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
341 hw->flags |= CSIO_HWF_FWEVT_PENDING;
342 spin_unlock_irqrestore(&hw->lock, flags);
343 schedule_work(&hw->evtq_work);
346 spin_unlock_irqrestore(&hw->lock, flags);
352 csio_add_msix_desc(struct csio_hw *hw)
355 struct csio_msix_entries *entryp = &hw->msix_entries[0];
358 int cnt = hw->num_sqsets + k;
363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
381 csio_request_irqs(struct csio_hw *hw)
384 struct csio_msix_entries *entryp = &hw->msix_entries[0];
386 struct pci_dev *pdev = hw->pdev;
388 if (hw->intr_mode != CSIO_IM_MSIX) {
390 hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
391 KBUILD_MODNAME, hw);
393 csio_err(hw, "Failed to allocate interrupt line.\n");
401 csio_add_msix_desc(hw);
404 entryp[k].desc, hw);
406 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
411 entryp[k++].dev_id = hw;
414 entryp[k].desc, hw);
416 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
421 entryp[k++].dev_id = (void *)hw;
424 for (i = 0; i < hw->num_pports; i++) {
425 info = &hw->scsi_cpu_info[i];
427 struct csio_scsi_qset *sqset = &hw->sqset[i][j];
428 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
433 csio_err(hw,
445 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
450 free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
451 pci_free_irq_vectors(hw->pdev);
457 csio_reduce_sqsets(struct csio_hw *hw, int cnt)
462 while (cnt < hw->num_sqsets) {
463 for (i = 0; i < hw->num_pports; i++) {
464 info = &hw->scsi_cpu_info[i];
467 hw->num_sqsets--;
468 if (hw->num_sqsets <= cnt)
474 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
479 struct csio_hw *hw = affd->priv;
485 if (nvecs < hw->num_pports) {
491 affd->nr_sets = hw->num_pports;
492 for (i = 0; i < hw->num_pports; i++)
493 affd->set_size[i] = nvecs / hw->num_pports;
497 csio_enable_msix(struct csio_hw *hw)
505 .priv = hw,
508 if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
511 min = hw->num_pports + extra;
512 cnt = hw->num_sqsets + extra;
515 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
516 cnt = min_t(uint8_t, hw->cfg_niq, cnt);
518 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
520 cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
525 if (cnt < (hw->num_sqsets + extra)) {
526 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
527 csio_reduce_sqsets(hw, cnt - extra);
532 csio_set_nondata_intr_idx(hw, k);
533 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
534 csio_set_fwevt_intr_idx(hw, k++);
536 for (i = 0; i < hw->num_pports; i++) {
537 info = &hw->scsi_cpu_info[i];
539 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
541 hw->sqset[i][j].intr_idx = n;
551 csio_intr_enable(struct csio_hw *hw)
553 hw->intr_mode = CSIO_IM_NONE;
554 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
557 if ((csio_msi == 2) && !csio_enable_msix(hw))
558 hw->intr_mode = CSIO_IM_MSIX;
561 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
562 !csio_is_hw_master(hw)) {
565 if (hw->cfg_niq < (hw->num_sqsets + extra)) {
566 csio_dbg(hw, "Reducing sqsets to %d\n",
567 hw->cfg_niq - extra);
568 csio_reduce_sqsets(hw, hw->cfg_niq - extra);
572 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
573 hw->intr_mode = CSIO_IM_MSI;
575 hw->intr_mode = CSIO_IM_INTX;
578 csio_dbg(hw, "Using %s interrupt mode.\n",
579 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
580 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
584 csio_intr_disable(struct csio_hw *hw, bool free)
586 csio_hw_intr_disable(hw);
591 switch (hw->intr_mode) {
593 for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
594 free_irq(pci_irq_vector(hw->pdev, i),
595 hw->msix_entries[i].dev_id);
600 free_irq(pci_irq_vector(hw->pdev, 0), hw);
607 pci_free_irq_vectors(hw->pdev);
608 hw->intr_mode = CSIO_IM_NONE;
609 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;