Lines Matching refs:fc_pci
79 struct flexcop_pci *fc_pci = fc->bus_specific;
81 v.raw = readl(fc_pci->io_mem + r);
94 struct flexcop_pci *fc_pci = fc->bus_specific;
101 writel(v.raw, fc_pci->io_mem + r);
107 struct flexcop_pci *fc_pci =
109 struct flexcop_device *fc = fc_pci->fc_dev;
113 if (fc_pci->count == fc_pci->count_prev) {
115 if (fc_pci->stream_problem++ == 3) {
131 fc_pci->stream_problem = 0;
134 fc_pci->stream_problem = 0;
135 fc_pci->count_prev = fc_pci->count;
139 schedule_delayed_work(&fc_pci->irq_check_work,
148 struct flexcop_pci *fc_pci = dev_id;
149 struct flexcop_device *fc = fc_pci->fc_dev;
154 spin_lock_irqsave(&fc_pci->irq_lock, flags);
167 if ((fc_pci->count % 1000) == 0)
168 deb_chk("%d valid irq took place so far\n", fc_pci->count);
171 if (fc_pci->active_dma1_addr == 0)
172 flexcop_pass_dmx_packets(fc_pci->fc_dev,
173 fc_pci->dma[0].cpu_addr0,
174 fc_pci->dma[0].size / 188);
176 flexcop_pass_dmx_packets(fc_pci->fc_dev,
177 fc_pci->dma[0].cpu_addr1,
178 fc_pci->dma[0].size / 188);
180 deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
181 fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
187 u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
188 if (cur_pos > fc_pci->dma[0].size * 2)
192 jiffies_to_usecs(jiffies - fc_pci->last_irq),
194 fc_pci->last_dma1_cur_pos);
195 fc_pci->last_irq = jiffies;
200 if (cur_pos < fc_pci->last_dma1_cur_pos) {
202 (fc_pci->dma[0].size*2 - 1) -
203 fc_pci->last_dma1_cur_pos);
204 flexcop_pass_dmx_data(fc_pci->fc_dev,
205 fc_pci->dma[0].cpu_addr0 +
206 fc_pci->last_dma1_cur_pos,
207 (fc_pci->dma[0].size*2) -
208 fc_pci->last_dma1_cur_pos);
209 fc_pci->last_dma1_cur_pos = 0;
212 if (cur_pos > fc_pci->last_dma1_cur_pos) {
214 cur_pos - fc_pci->last_dma1_cur_pos);
215 flexcop_pass_dmx_data(fc_pci->fc_dev,
216 fc_pci->dma[0].cpu_addr0 +
217 fc_pci->last_dma1_cur_pos,
218 cur_pos - fc_pci->last_dma1_cur_pos);
222 fc_pci->last_dma1_cur_pos = cur_pos;
223 fc_pci->count++;
231 spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
237 struct flexcop_pci *fc_pci = fc->bus_specific;
239 flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
240 flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
246 fc_pci->last_dma1_cur_pos = 0;
249 fc_pci->count_prev = fc_pci->count;
261 static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
264 ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
269 ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
272 flexcop_dma_free(&fc_pci->dma[0]);
276 flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
278 flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
280 fc_pci->init_state |= FC_PCI_DMA_INIT;
284 static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
286 if (fc_pci->init_state & FC_PCI_DMA_INIT) {
287 flexcop_dma_free(&fc_pci->dma[0]);
288 flexcop_dma_free(&fc_pci->dma[1]);
290 fc_pci->init_state &= ~FC_PCI_DMA_INIT;
293 static int flexcop_pci_init(struct flexcop_pci *fc_pci)
297 info("card revision %x", fc_pci->pdev->revision);
299 if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
301 pci_set_master(fc_pci->pdev);
303 if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
306 fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
308 if (!fc_pci->io_mem) {
314 pci_set_drvdata(fc_pci->pdev, fc_pci);
315 spin_lock_init(&fc_pci->irq_lock);
316 if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
317 IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
320 fc_pci->init_state |= FC_PCI_INIT;
324 pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
326 pci_release_regions(fc_pci->pdev);
328 pci_disable_device(fc_pci->pdev);
332 static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
334 if (fc_pci->init_state & FC_PCI_INIT) {
335 free_irq(fc_pci->pdev->irq, fc_pci);
336 pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
337 pci_release_regions(fc_pci->pdev);
338 pci_disable_device(fc_pci->pdev);
340 fc_pci->init_state &= ~FC_PCI_INIT;
347 struct flexcop_pci *fc_pci;
356 fc_pci = fc->bus_specific;
357 fc_pci->fc_dev = fc;
376 fc_pci->pdev = pdev;
377 if ((ret = flexcop_pci_init(fc_pci)) != 0)
385 if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
388 INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
391 schedule_delayed_work(&fc_pci->irq_check_work,
400 flexcop_pci_exit(fc_pci);
411 struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
414 cancel_delayed_work(&fc_pci->irq_check_work);
416 flexcop_pci_dma_exit(fc_pci);
417 flexcop_device_exit(fc_pci->fc_dev);
418 flexcop_pci_exit(fc_pci);
419 flexcop_device_kfree(fc_pci->fc_dev);