Lines Matching refs:mdev_state

165 	struct mdev_state *mdev_state;
171 struct mdev_state {
213 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
215 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
218 static void mbochs_create_config_space(struct mdev_state *mdev_state)
220 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
222 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
224 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
226 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
229 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
231 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
233 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
235 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
239 mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1;
241 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2],
244 mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1;
247 static int mbochs_check_framebuffer(struct mdev_state *mdev_state,
250 struct device *dev = mdev_dev(mdev_state->mdev);
251 u16 *vbe = mdev_state->vbe;
254 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
286 if (mode->offset + mode->size > mdev_state->memsize) {
305 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
308 struct device *dev = mdev_dev(mdev_state->mdev);
318 cfg_addr = (cfg_addr & mdev_state->bar_mask[index]);
326 cfg_addr |= (mdev_state->vconfig[offset] &
328 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
333 static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset,
336 struct device *dev = mdev_dev(mdev_state->mdev);
348 if (index < ARRAY_SIZE(mdev_state->vbe))
349 mdev_state->vbe[index] = reg16;
363 static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset,
366 struct device *dev = mdev_dev(mdev_state->mdev);
373 edid = &mdev_state->edid_regs;
379 memcpy(buf, mdev_state->edid_blob + offset, count);
385 if (index < ARRAY_SIZE(mdev_state->vbe))
386 reg16 = mdev_state->vbe[index];
400 static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset,
403 char *regs = (void *)&mdev_state->edid_regs;
405 if (offset + count > sizeof(mdev_state->edid_regs))
427 static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset,
430 if (offset + count > mdev_state->edid_regs.edid_max_size)
433 memcpy(mdev_state->edid_blob + offset, buf, count);
435 memcpy(buf, mdev_state->edid_blob + offset, count);
438 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
446 mutex_lock(&mdev_state->ops_lock);
450 handle_pci_cfg_write(mdev_state, pos, buf, count);
452 memcpy(buf, (mdev_state->vconfig + pos), count);
459 handle_mmio_write(mdev_state, pos, buf, count);
461 handle_mmio_read(mdev_state, pos, buf, count);
468 handle_edid_regs(mdev_state, pos, buf, count, is_write);
471 handle_edid_blob(mdev_state, pos, buf, count, is_write);
476 MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
479 pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
489 dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n",
499 mutex_unlock(&mdev_state->ops_lock);
504 static int mbochs_reset(struct mdev_state *mdev_state)
506 u32 size64k = mdev_state->memsize / (64 * 1024);
509 for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++)
510 mdev_state->vbe[i] = 0;
511 mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5;
512 mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k;
518 struct mdev_state *mdev_state =
519 container_of(vdev, struct mdev_state, vdev);
532 mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
533 if (!mdev_state->vconfig)
536 mdev_state->memsize = type->mbytes * 1024 * 1024;
537 mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT;
538 mdev_state->pages = kcalloc(mdev_state->pagecount,
541 if (!mdev_state->pages)
544 mutex_init(&mdev_state->ops_lock);
545 mdev_state->mdev = mdev;
546 INIT_LIST_HEAD(&mdev_state->dmabufs);
547 mdev_state->next_id = 1;
549 mdev_state->type = type;
550 mdev_state->edid_regs.max_xres = type->max_x;
551 mdev_state->edid_regs.max_yres = type->max_y;
552 mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET;
553 mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob);
554 mbochs_create_config_space(mdev_state);
555 mbochs_reset(mdev_state);
558 type->type.pretty_name, type->mbytes, mdev_state->pagecount);
562 kfree(mdev_state->vconfig);
570 struct mdev_state *mdev_state;
573 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
575 if (IS_ERR(mdev_state))
576 return PTR_ERR(mdev_state);
578 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
581 dev_set_drvdata(&mdev->dev, mdev_state);
585 vfio_put_device(&mdev_state->vdev);
591 struct mdev_state *mdev_state =
592 container_of(vdev, struct mdev_state, vdev);
594 atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes);
595 kfree(mdev_state->pages);
596 kfree(mdev_state->vconfig);
601 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
603 vfio_unregister_group_dev(&mdev_state->vdev);
604 vfio_put_device(&mdev_state->vdev);
610 struct mdev_state *mdev_state =
611 container_of(vdev, struct mdev_state, vdev);
621 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
633 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
645 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
671 struct mdev_state *mdev_state =
672 container_of(vdev, struct mdev_state, vdev);
685 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
697 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
709 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
727 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
730 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
732 if (!mdev_state->pages[pgoff]) {
733 mdev_state->pages[pgoff] =
735 if (!mdev_state->pages[pgoff])
739 get_page(mdev_state->pages[pgoff]);
740 return mdev_state->pages[pgoff];
743 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
748 if (WARN_ON(pgoff >= mdev_state->pagecount))
751 mutex_lock(&mdev_state->ops_lock);
752 page = __mbochs_get_page(mdev_state, pgoff);
753 mutex_unlock(&mdev_state->ops_lock);
758 static void mbochs_put_pages(struct mdev_state *mdev_state)
760 struct device *dev = mdev_dev(mdev_state->mdev);
763 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
765 for (i = 0; i < mdev_state->pagecount; i++) {
766 if (!mdev_state->pages[i])
768 put_page(mdev_state->pages[i]);
769 mdev_state->pages[i] = NULL;
778 struct mdev_state *mdev_state = vma->vm_private_data;
781 if (page_offset >= mdev_state->pagecount)
784 vmf->page = mbochs_get_page(mdev_state, page_offset);
797 struct mdev_state *mdev_state =
798 container_of(vdev, struct mdev_state, vdev);
804 if (vma->vm_end - vma->vm_start > mdev_state->memsize)
810 vma->vm_private_data = mdev_state;
834 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
849 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
866 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
895 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
907 struct mdev_state *mdev_state = dmabuf->mdev_state;
908 struct device *dev = mdev_dev(mdev_state->mdev);
916 mutex_lock(&mdev_state->ops_lock);
920 mutex_unlock(&mdev_state->ops_lock);
930 static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state,
936 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
943 dmabuf->id = mdev_state->next_id++;
952 dmabuf->pages[pg] = __mbochs_get_page(mdev_state,
958 dmabuf->mdev_state = mdev_state;
959 list_add(&dmabuf->next, &mdev_state->dmabufs);
974 mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state,
979 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
981 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
989 mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id)
993 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
995 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
1004 struct mdev_state *mdev_state = dmabuf->mdev_state;
1005 struct device *dev = mdev_state->vdev.dev;
1009 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
1033 static int mbochs_get_region_info(struct mdev_state *mdev_state,
1050 region_info->size = mdev_state->memsize;
1098 static int mbochs_query_gfx_plane(struct mdev_state *mdev_state,
1121 mutex_lock(&mdev_state->ops_lock);
1125 ret = mbochs_check_framebuffer(mdev_state, &mode);
1136 dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode);
1138 mbochs_dmabuf_alloc(mdev_state, &mode);
1140 mutex_unlock(&mdev_state->ops_lock);
1153 mdev_state->active_id != plane->dmabuf_id) {
1154 dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n",
1155 __func__, mdev_state->active_id, plane->dmabuf_id);
1156 mdev_state->active_id = plane->dmabuf_id;
1158 mutex_unlock(&mdev_state->ops_lock);
1162 static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id)
1166 mutex_lock(&mdev_state->ops_lock);
1168 dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id);
1170 mutex_unlock(&mdev_state->ops_lock);
1177 mutex_unlock(&mdev_state->ops_lock);
1188 struct mdev_state *mdev_state =
1189 container_of(vdev, struct mdev_state, vdev);
1230 ret = mbochs_get_region_info(mdev_state, &info);
1276 ret = mbochs_query_gfx_plane(mdev_state, &plane);
1293 return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id);
1300 return mbochs_reset(mdev_state);
1307 struct mdev_state *mdev_state =
1308 container_of(vdev, struct mdev_state, vdev);
1311 mutex_lock(&mdev_state->ops_lock);
1313 list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) {
1322 mbochs_put_pages(mdev_state);
1324 mutex_unlock(&mdev_state->ops_lock);
1331 struct mdev_state *mdev_state = dev_get_drvdata(dev);
1333 return sprintf(buf, "%d MB\n", mdev_state->type->mbytes);