Lines Matching refs:cio2

34 #include "ipu3-cio2.h"
111 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
113 struct device *dev = &cio2->pci_dev->dev;
115 if (cio2->dummy_lop) {
116 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
117 cio2->dummy_lop_bus_addr);
118 cio2->dummy_lop = NULL;
120 if (cio2->dummy_page) {
121 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
122 cio2->dummy_page_bus_addr);
123 cio2->dummy_page = NULL;
127 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
129 struct device *dev = &cio2->pci_dev->dev;
132 cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
133 &cio2->dummy_page_bus_addr,
135 cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
136 &cio2->dummy_lop_bus_addr,
138 if (!cio2->dummy_page || !cio2->dummy_lop) {
139 cio2_fbpt_exit_dummy(cio2);
147 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
152 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
172 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
183 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
185 cio2_fbpt_entry_enable(cio2, entry);
189 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
224 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
226 cio2_fbpt_entry_enable(cio2, entry);
229 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
231 struct device *dev = &cio2->pci_dev->dev;
306 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
310 struct device *dev = &cio2->pci_dev->dev;
347 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
356 void __iomem *const base = cio2->base;
368 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
509 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
511 struct device *dev = &cio2->pci_dev->dev;
512 void __iomem *const base = cio2->base;
539 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
541 struct device *dev = &cio2->pci_dev->dev;
542 struct cio2_queue *q = cio2->cur_queue;
582 cio2_fbpt_entry_init_dummy(cio2, entry);
588 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
657 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
659 struct device *dev = &cio2->pci_dev->dev;
660 void __iomem *const base = cio2->base;
700 cio2_buffer_done(cio2, d);
714 cio2_queue_event_sof(cio2,
715 cio2->cur_queue);
758 struct cio2_device *cio2 = cio2_ptr;
759 void __iomem *const base = cio2->base;
760 struct device *dev = &cio2->pci_dev->dev;
770 cio2_irq_handle_once(cio2, int_status);
802 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
803 struct device *dev = &cio2->pci_dev->dev;
823 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
835 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
836 struct device *dev = &cio2->pci_dev->dev;
879 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
887 /* Transfer buffer ownership to cio2 */
890 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
891 struct device *dev = &cio2->pci_dev->dev;
918 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
943 cio2_fbpt_entry_init_buf(cio2, b, entry);
957 dev_err(dev, "error: all cio2 entries were full!\n");
965 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
966 struct device *dev = &cio2->pci_dev->dev;
981 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
982 struct device *dev = &cio2->pci_dev->dev;
985 cio2->cur_queue = q;
998 r = cio2_hw_init(cio2, q);
1007 cio2->streaming = true;
1012 cio2_hw_exit(cio2, q);
1026 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027 struct device *dev = &cio2->pci_dev->dev;
1032 cio2_hw_exit(cio2, q);
1033 synchronize_irq(cio2->pci_dev->irq);
1037 cio2->streaming = false;
1324 struct cio2_device *cio2 = video_get_drvdata(vd);
1325 struct device *dev = &cio2->pci_dev->dev;
1388 struct cio2_device *cio2 = to_cio2_device(notifier);
1393 if (cio2->queue[s_asd->csi2.port].sensor)
1400 q = &cio2->queue[s_asd->csi2.port];
1404 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1414 struct cio2_device *cio2 = to_cio2_device(notifier);
1417 cio2->queue[s_asd->csi2.port].sensor = NULL;
1423 struct cio2_device *cio2 = to_cio2_device(notifier);
1424 struct device *dev = &cio2->pci_dev->dev;
1430 list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1432 q = &cio2->queue[s_asd->csi2.port];
1453 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1462 static int cio2_parse_firmware(struct cio2_device *cio2)
1464 struct device *dev = &cio2->pci_dev->dev;
1484 s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1508 cio2->notifier.ops = &cio2_async_ops;
1509 ret = v4l2_async_nf_register(&cio2->notifier);
1525 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1530 struct device *dev = &cio2->pci_dev->dev;
1560 r = cio2_fbpt_init(cio2, q);
1590 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1592 v4l2_set_subdevdata(subdev, cio2);
1593 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1607 vbq->drv_priv = cio2;
1617 "%s %td", CIO2_NAME, q - cio2->queue);
1621 vdev->lock = &cio2->lock;
1622 vdev->v4l2_dev = &cio2->v4l2_dev;
1625 video_set_drvdata(vdev, cio2);
1658 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1664 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1669 static int cio2_queues_init(struct cio2_device *cio2)
1674 r = cio2_queue_init(cio2, &cio2->queue[i]);
1683 cio2_queue_exit(cio2, &cio2->queue[i]);
1688 static void cio2_queues_exit(struct cio2_device *cio2)
1693 cio2_queue_exit(cio2, &cio2->queue[i]);
1719 struct cio2_device *cio2;
1739 cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1740 if (!cio2)
1742 cio2->pci_dev = pci_dev;
1759 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1761 pci_set_drvdata(pci_dev, cio2);
1777 r = cio2_fbpt_init_dummy(cio2);
1781 mutex_init(&cio2->lock);
1783 cio2->media_dev.dev = dev;
1784 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1785 sizeof(cio2->media_dev.model));
1786 cio2->media_dev.hw_revision = 0;
1788 media_device_init(&cio2->media_dev);
1789 r = media_device_register(&cio2->media_dev);
1793 cio2->v4l2_dev.mdev = &cio2->media_dev;
1794 r = v4l2_device_register(dev, &cio2->v4l2_dev);
1800 r = cio2_queues_init(cio2);
1804 v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1807 r = cio2_parse_firmware(cio2);
1812 CIO2_NAME, cio2);
1824 v4l2_async_nf_unregister(&cio2->notifier);
1825 v4l2_async_nf_cleanup(&cio2->notifier);
1826 cio2_queues_exit(cio2);
1828 v4l2_device_unregister(&cio2->v4l2_dev);
1830 media_device_unregister(&cio2->media_dev);
1831 media_device_cleanup(&cio2->media_dev);
1833 mutex_destroy(&cio2->lock);
1834 cio2_fbpt_exit_dummy(cio2);
1841 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1843 media_device_unregister(&cio2->media_dev);
1844 v4l2_async_nf_unregister(&cio2->notifier);
1845 v4l2_async_nf_cleanup(&cio2->notifier);
1846 cio2_queues_exit(cio2);
1847 cio2_fbpt_exit_dummy(cio2);
1848 v4l2_device_unregister(&cio2->v4l2_dev);
1849 media_device_cleanup(&cio2->media_dev);
1850 mutex_destroy(&cio2->lock);
1859 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1860 void __iomem *const base = cio2->base;
1864 dev_dbg(dev, "cio2 runtime suspend.\n");
1877 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1878 void __iomem *const base = cio2->base;
1882 dev_dbg(dev, "cio2 runtime resume.\n");
1941 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1968 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1974 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1975 struct cio2_queue *q = cio2->cur_queue;
1978 dev_dbg(dev, "cio2 suspend\n");
1979 if (!cio2->streaming)
1989 cio2_hw_exit(cio2, q);
1998 cio2_fbpt_rearrange(cio2, q);
2007 struct cio2_device *cio2 = dev_get_drvdata(dev);
2008 struct cio2_queue *q = cio2->cur_queue;
2011 dev_dbg(dev, "cio2 resume\n");
2012 if (!cio2->streaming)
2021 r = cio2_hw_init(cio2, q);
2023 dev_err(dev, "fail to init cio2 hw\n");
2030 cio2_hw_exit(cio2, q);