Lines Matching refs:mvi

79 static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
81 struct mvs_phy *phy = &mvi->phy[phy_id];
84 phy->mvi = mvi;
87 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
97 sas_phy->sas_addr = &mvi->sas_addr[0];
99 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
103 static void mvs_free(struct mvs_info *mvi)
108 if (!mvi)
111 if (mvi->flags & MVF_FLAG_SOC)
116 dma_pool_destroy(mvi->dma_pool);
118 if (mvi->tx)
119 dma_free_coherent(mvi->dev,
120 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
121 mvi->tx, mvi->tx_dma);
122 if (mvi->rx_fis)
123 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
124 mvi->rx_fis, mvi->rx_fis_dma);
125 if (mvi->rx)
126 dma_free_coherent(mvi->dev,
127 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
128 mvi->rx, mvi->rx_dma);
129 if (mvi->slot)
130 dma_free_coherent(mvi->dev,
131 sizeof(*mvi->slot) * slot_nr,
132 mvi->slot, mvi->slot_dma);
134 if (mvi->bulk_buffer)
135 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
136 mvi->bulk_buffer, mvi->bulk_buffer_dma);
137 if (mvi->bulk_buffer1)
138 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
139 mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
141 MVS_CHIP_DISP->chip_iounmap(mvi);
142 if (mvi->shost)
143 scsi_host_put(mvi->shost);
144 list_for_each_entry(mwq, &mvi->wq_list, entry)
146 kfree(mvi->tags);
147 kfree(mvi);
156 struct mvs_info *mvi;
160 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
162 if (unlikely(!mvi))
165 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
170 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
171 MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
174 MVS_CHIP_DISP->interrupt_enable(mvi);
182 struct mvs_info *mvi;
191 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
193 if (unlikely(!mvi))
196 MVS_CHIP_DISP->interrupt_disable(mvi);
199 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
202 MVS_CHIP_DISP->interrupt_enable(mvi);
211 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
212 MVS_CHIP_DISP->isr(mvi, irq, stat);
218 static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
223 if (mvi->flags & MVF_FLAG_SOC)
228 spin_lock_init(&mvi->lock);
229 for (i = 0; i < mvi->chip->n_phy; i++) {
230 mvs_phy_init(mvi, i);
231 mvi->port[i].wide_port_phymap = 0;
232 mvi->port[i].port_attached = 0;
233 INIT_LIST_HEAD(&mvi->port[i].list);
236 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
237 mvi->devices[i].dev_type = SAS_PHY_UNUSED;
238 mvi->devices[i].device_id = i;
239 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
245 mvi->tx = dma_alloc_coherent(mvi->dev,
246 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
247 &mvi->tx_dma, GFP_KERNEL);
248 if (!mvi->tx)
250 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
251 &mvi->rx_fis_dma, GFP_KERNEL);
252 if (!mvi->rx_fis)
255 mvi->rx = dma_alloc_coherent(mvi->dev,
256 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
257 &mvi->rx_dma, GFP_KERNEL);
258 if (!mvi->rx)
260 mvi->rx[0] = cpu_to_le32(0xfff);
261 mvi->rx_cons = 0xfff;
263 mvi->slot = dma_alloc_coherent(mvi->dev,
264 sizeof(*mvi->slot) * slot_nr,
265 &mvi->slot_dma, GFP_KERNEL);
266 if (!mvi->slot)
269 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
271 &mvi->bulk_buffer_dma, GFP_KERNEL);
272 if (!mvi->bulk_buffer)
275 mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
277 &mvi->bulk_buffer_dma1, GFP_KERNEL);
278 if (!mvi->bulk_buffer1)
281 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
282 mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev,
284 if (!mvi->dma_pool) {
288 mvi->tags_num = slot_nr;
291 mvs_tag_init(mvi);
298 int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
301 struct pci_dev *pdev = mvi->pdev;
313 mvi->regs_ex = ioremap(res_start, res_len);
315 mvi->regs_ex = (void *)res_start;
316 if (!mvi->regs_ex)
323 iounmap(mvi->regs_ex);
324 mvi->regs_ex = NULL;
328 mvi->regs = ioremap(res_start, res_len);
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
351 struct mvs_info *mvi = NULL;
354 mvi = kzalloc(sizeof(*mvi) +
357 if (!mvi)
360 mvi->pdev = pdev;
361 mvi->dev = &pdev->dev;
362 mvi->chip_id = ent->driver_data;
363 mvi->chip = &mvs_chips[mvi->chip_id];
364 INIT_LIST_HEAD(&mvi->wq_list);
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
373 mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
374 if (!mvi->tags)
377 if (MVS_CHIP_DISP->chip_ioremap(mvi))
379 if (!mvs_alloc(mvi, shost))
380 return mvi;
382 mvs_free(mvi);
449 struct mvs_info *mvi = NULL;
454 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
457 &mvi->phy[i].sas_phy;
459 &mvi->port[i].sas_port;
464 sha->dev = mvi->dev;
466 sha->sas_addr = &mvi->sas_addr[0];
470 if (mvi->flags & MVF_FLAG_SOC)
477 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
478 sha->core.shost = mvi->shost;
481 static void mvs_init_sas_add(struct mvs_info *mvi)
484 for (i = 0; i < mvi->chip->n_phy; i++) {
485 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
486 mvi->phy[i].dev_sas_addr =
487 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
490 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
496 struct mvs_info *mvi;
543 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
544 if (!mvi) {
549 memset(&mvi->hba_info_param, 0xFF,
552 mvs_init_sas_add(mvi);
554 mvi->instance = nhost;
555 rc = MVS_CHIP_DISP->chip_init(mvi);
557 mvs_free(mvi);
582 MVS_CHIP_DISP->interrupt_enable(mvi);
584 scsi_scan_host(mvi->shost);
591 scsi_remove_host(mvi->shost);
604 struct mvs_info *mvi = NULL;
607 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
614 sas_remove_host(mvi->shost);
616 MVS_CHIP_DISP->interrupt_disable(mvi);
617 free_irq(mvi->pdev->irq, sha);
619 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
620 mvs_free(mvi);
715 struct mvs_info *mvi = NULL;
734 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
736 if (unlikely(!mvi))
740 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
742 MVS_CHIP_DISP->tune_interrupt(mvi,