Lines Matching refs:mvi
78 static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
80 struct mvs_phy *phy = &mvi->phy[phy_id];
83 phy->mvi = mvi;
86 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
94 sas_phy->sas_addr = &mvi->sas_addr[0];
96 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
100 static void mvs_free(struct mvs_info *mvi)
105 if (!mvi)
108 if (mvi->flags & MVF_FLAG_SOC)
113 dma_pool_destroy(mvi->dma_pool);
115 if (mvi->tx)
116 dma_free_coherent(mvi->dev,
117 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
118 mvi->tx, mvi->tx_dma);
119 if (mvi->rx_fis)
120 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
121 mvi->rx_fis, mvi->rx_fis_dma);
122 if (mvi->rx)
123 dma_free_coherent(mvi->dev,
124 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
125 mvi->rx, mvi->rx_dma);
126 if (mvi->slot)
127 dma_free_coherent(mvi->dev,
128 sizeof(*mvi->slot) * slot_nr,
129 mvi->slot, mvi->slot_dma);
131 if (mvi->bulk_buffer)
132 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
133 mvi->bulk_buffer, mvi->bulk_buffer_dma);
134 if (mvi->bulk_buffer1)
135 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
136 mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
138 MVS_CHIP_DISP->chip_iounmap(mvi);
139 if (mvi->shost)
140 scsi_host_put(mvi->shost);
141 list_for_each_entry(mwq, &mvi->wq_list, entry)
143 kfree(mvi->rsvd_tags);
144 kfree(mvi);
153 struct mvs_info *mvi;
157 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
159 if (unlikely(!mvi))
162 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
167 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
168 MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
171 MVS_CHIP_DISP->interrupt_enable(mvi);
179 struct mvs_info *mvi;
188 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
190 if (unlikely(!mvi))
193 MVS_CHIP_DISP->interrupt_disable(mvi);
196 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
199 MVS_CHIP_DISP->interrupt_enable(mvi);
208 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
209 MVS_CHIP_DISP->isr(mvi, irq, stat);
215 static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
220 if (mvi->flags & MVF_FLAG_SOC)
225 spin_lock_init(&mvi->lock);
226 for (i = 0; i < mvi->chip->n_phy; i++) {
227 mvs_phy_init(mvi, i);
228 mvi->port[i].wide_port_phymap = 0;
229 mvi->port[i].port_attached = 0;
230 INIT_LIST_HEAD(&mvi->port[i].list);
233 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
234 mvi->devices[i].dev_type = SAS_PHY_UNUSED;
235 mvi->devices[i].device_id = i;
236 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
242 mvi->tx = dma_alloc_coherent(mvi->dev,
243 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
244 &mvi->tx_dma, GFP_KERNEL);
245 if (!mvi->tx)
247 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
248 &mvi->rx_fis_dma, GFP_KERNEL);
249 if (!mvi->rx_fis)
252 mvi->rx = dma_alloc_coherent(mvi->dev,
253 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
254 &mvi->rx_dma, GFP_KERNEL);
255 if (!mvi->rx)
257 mvi->rx[0] = cpu_to_le32(0xfff);
258 mvi->rx_cons = 0xfff;
260 mvi->slot = dma_alloc_coherent(mvi->dev,
261 sizeof(*mvi->slot) * slot_nr,
262 &mvi->slot_dma, GFP_KERNEL);
263 if (!mvi->slot)
266 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 &mvi->bulk_buffer_dma, GFP_KERNEL);
269 if (!mvi->bulk_buffer)
272 mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
274 &mvi->bulk_buffer_dma1, GFP_KERNEL);
275 if (!mvi->bulk_buffer1)
278 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
279 mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev,
281 if (!mvi->dma_pool) {
292 int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
295 struct pci_dev *pdev = mvi->pdev;
307 mvi->regs_ex = ioremap(res_start, res_len);
309 mvi->regs_ex = (void *)res_start;
310 if (!mvi->regs_ex)
317 iounmap(mvi->regs_ex);
318 mvi->regs_ex = NULL;
322 mvi->regs = ioremap(res_start, res_len);
324 if (!mvi->regs) {
325 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
326 iounmap(mvi->regs_ex);
327 mvi->regs_ex = NULL;
345 struct mvs_info *mvi = NULL;
348 mvi = kzalloc(sizeof(*mvi) +
351 if (!mvi)
354 mvi->pdev = pdev;
355 mvi->dev = &pdev->dev;
356 mvi->chip_id = ent->driver_data;
357 mvi->chip = &mvs_chips[mvi->chip_id];
358 INIT_LIST_HEAD(&mvi->wq_list);
360 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
361 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
363 mvi->id = id;
364 mvi->sas = sha;
365 mvi->shost = shost;
367 mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL);
368 if (!mvi->rsvd_tags)
371 if (MVS_CHIP_DISP->chip_ioremap(mvi))
373 if (!mvs_alloc(mvi, shost))
374 return mvi;
376 mvs_free(mvi);
443 struct mvs_info *mvi = NULL;
448 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
451 &mvi->phy[i].sas_phy;
453 &mvi->port[i].sas_port;
458 sha->dev = mvi->dev;
459 sha->sas_addr = &mvi->sas_addr[0];
463 if (mvi->flags & MVF_FLAG_SOC)
472 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
473 sha->shost = mvi->shost;
476 static void mvs_init_sas_add(struct mvs_info *mvi)
479 for (i = 0; i < mvi->chip->n_phy; i++) {
480 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
481 mvi->phy[i].dev_sas_addr =
482 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
485 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
491 struct mvs_info *mvi;
537 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
538 if (!mvi) {
543 memset(&mvi->hba_info_param, 0xFF,
546 mvs_init_sas_add(mvi);
548 mvi->instance = nhost;
549 rc = MVS_CHIP_DISP->chip_init(mvi);
551 mvs_free(mvi);
579 MVS_CHIP_DISP->interrupt_enable(mvi);
581 scsi_scan_host(mvi->shost);
588 scsi_remove_host(mvi->shost);
601 struct mvs_info *mvi = NULL;
604 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
611 sas_remove_host(mvi->shost);
613 MVS_CHIP_DISP->interrupt_disable(mvi);
614 free_irq(mvi->pdev->irq, sha);
616 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
617 mvs_free(mvi);
707 struct mvs_info *mvi = NULL;
726 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
728 if (unlikely(!mvi))
732 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
734 MVS_CHIP_DISP->tune_interrupt(mvi,