Lines Matching defs:dlfb

79 static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info, u32 new_len);
81 /* dlfb keeps a list of urbs for efficient bulk transfers */
83 static struct urb *dlfb_get_urb(struct dlfb_data *dlfb);
84 static int dlfb_submit_urb(struct dlfb_data *dlfb, struct urb * urb, size_t len);
85 static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size);
86 static void dlfb_free_urb_list(struct dlfb_data *dlfb);
281 static int dlfb_set_video_mode(struct dlfb_data *dlfb,
290 if (!atomic_read(&dlfb->usb_active))
293 urb = dlfb_get_urb(dlfb);
309 wrptr = dlfb_set_base8bpp(wrptr, dlfb->info->fix.smem_len);
317 retval = dlfb_submit_urb(dlfb, urb, writesize);
319 dlfb->blank_mode = FB_BLANK_UNBLANK;
537 static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
543 u32 dev_addr = dlfb->base16 + byte_offset;
553 if (dlfb->backing_buffer) {
555 const u8 *back_start = (u8 *) (dlfb->backing_buffer
579 if (dlfb_submit_urb(dlfb, urb, len))
582 urb = dlfb_get_urb(dlfb);
596 static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
608 mutex_lock(&dlfb->render_mutex);
615 (x + width > dlfb->info->var.xres) ||
616 (y + height > dlfb->info->var.yres)) {
621 if (!atomic_read(&dlfb->usb_active)) {
626 urb = dlfb_get_urb(dlfb);
634 const int line_offset = dlfb->info->fix.line_length * i;
637 if (dlfb_render_hline(dlfb, &urb,
638 (char *) dlfb->info->fix.smem_start,
650 dlfb_submit_urb(dlfb, urb, len);
656 atomic_add(bytes_sent, &dlfb->bytes_sent);
657 atomic_add(bytes_identical, &dlfb->bytes_identical);
658 atomic_add(width*height*2, &dlfb->bytes_rendered);
662 &dlfb->cpu_kcycles_used);
667 mutex_unlock(&dlfb->render_mutex);
671 static void dlfb_init_damage(struct dlfb_data *dlfb)
673 dlfb->damage_x = INT_MAX;
674 dlfb->damage_x2 = 0;
675 dlfb->damage_y = INT_MAX;
676 dlfb->damage_y2 = 0;
681 struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
684 spin_lock_irq(&dlfb->damage_lock);
685 x = dlfb->damage_x;
686 x2 = dlfb->damage_x2;
687 y = dlfb->damage_y;
688 y2 = dlfb->damage_y2;
689 dlfb_init_damage(dlfb);
690 spin_unlock_irq(&dlfb->damage_lock);
693 dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
696 static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
705 spin_lock_irqsave(&dlfb->damage_lock, flags);
706 dlfb->damage_x = min(x, dlfb->damage_x);
707 dlfb->damage_x2 = max(x2, dlfb->damage_x2);
708 dlfb->damage_y = min(y, dlfb->damage_y);
709 dlfb->damage_y2 = max(y2, dlfb->damage_y2);
710 spin_unlock_irqrestore(&dlfb->damage_lock, flags);
712 schedule_work(&dlfb->damage_work);
725 struct dlfb_data *dlfb = info->par;
735 dlfb_handle_damage(dlfb, 0, start, info->var.xres,
747 struct dlfb_data *dlfb = info->par;
751 dlfb_offload_damage(dlfb, area->dx, area->dy,
758 struct dlfb_data *dlfb = info->par;
762 dlfb_offload_damage(dlfb, image->dx, image->dy,
769 struct dlfb_data *dlfb = info->par;
773 dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
788 struct dlfb_data *dlfb = info->par;
796 mutex_lock(&dlfb->render_mutex);
801 if (!atomic_read(&dlfb->usb_active))
806 urb = dlfb_get_urb(dlfb);
815 if (dlfb_render_hline(dlfb, &urb, (char *) info->fix.smem_start,
828 dlfb_submit_urb(dlfb, urb, len);
834 atomic_add(bytes_sent, &dlfb->bytes_sent);
835 atomic_add(bytes_identical, &dlfb->bytes_identical);
836 atomic_add(bytes_rendered, &dlfb->bytes_rendered);
840 &dlfb->cpu_kcycles_used);
842 mutex_unlock(&dlfb->render_mutex);
845 static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
855 ret = usb_control_msg(dlfb->udev,
856 usb_rcvctrlpipe(dlfb->udev, 0), 0x02,
860 dev_err(&dlfb->udev->dev,
877 struct dlfb_data *dlfb = info->par;
879 if (!atomic_read(&dlfb->usb_active))
885 if (copy_to_user(edid, dlfb->edid, dlfb->edid_size))
920 dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
960 struct dlfb_data *dlfb = info->par;
971 if (dlfb->virtualized)
974 dlfb->fb_count++;
993 user, info, dlfb->fb_count);
1000 struct dlfb_data *dlfb = info->par;
1002 cancel_work_sync(&dlfb->damage_work);
1004 mutex_destroy(&dlfb->render_mutex);
1014 while (!list_empty(&dlfb->deferred_free)) {
1015 struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
1020 vfree(dlfb->backing_buffer);
1021 kfree(dlfb->edid);
1022 dlfb_free_urb_list(dlfb);
1023 usb_put_dev(dlfb->udev);
1024 kfree(dlfb);
1035 struct dlfb_data *dlfb = info->par;
1037 dlfb->fb_count--;
1039 if ((dlfb->fb_count == 0) && (info->fbdefio)) {
1045 dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
1054 static int dlfb_is_valid_mode(struct fb_videomode *mode, struct dlfb_data *dlfb)
1056 if (mode->xres * mode->yres > dlfb->sku_pixel_limit)
1078 struct dlfb_data *dlfb = info->par;
1085 if (!dlfb_is_valid_mode(&mode, dlfb))
1093 struct dlfb_data *dlfb = info->par;
1105 if (!memcmp(&dlfb->current_mode, &fvs, sizeof(struct fb_var_screeninfo)))
1108 result = dlfb_realloc_framebuffer(dlfb, info, info->var.yres * line_length);
1112 result = dlfb_set_video_mode(dlfb, &info->var);
1117 dlfb->current_mode = fvs;
1120 if (dlfb->fb_count == 0) {
1129 dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
1154 struct dlfb_data *dlfb = info->par;
1159 dlfb->blank_mode, blank_mode);
1161 if ((dlfb->blank_mode == FB_BLANK_POWERDOWN) &&
1165 dlfb_set_video_mode(dlfb, &info->var);
1168 urb = dlfb_get_urb(dlfb);
1180 dlfb_submit_urb(dlfb, urb, bufptr -
1183 dlfb->blank_mode = blank_mode;
1207 static void dlfb_deferred_vfree(struct dlfb_data *dlfb, void *mem)
1213 list_add(&d->list, &dlfb->deferred_free);
1220 static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info, u32 new_len)
1242 dlfb_deferred_vfree(dlfb, (void __force *)info->screen_base);
1262 dlfb_deferred_vfree(dlfb, dlfb->backing_buffer);
1263 dlfb->backing_buffer = new_back;
1283 static int dlfb_setup_modes(struct dlfb_data *dlfb,
1316 i = dlfb_get_edid(dlfb, edid, EDID_LENGTH);
1322 dlfb->edid = edid;
1323 dlfb->edid_size = i;
1332 if (dlfb->edid) {
1333 fb_edid_to_monspecs(dlfb->edid, &info->monspecs);
1345 dlfb->edid = edid;
1346 dlfb->edid_size = default_edid_size;
1357 if (dlfb_is_valid_mode(mode, dlfb)) {
1386 if (dlfb_is_valid_mode(mode, dlfb))
1405 if ((default_vmode != NULL) && (dlfb->fb_count == 0)) {
1418 if (edid && (dlfb->edid != edid))
1430 struct dlfb_data *dlfb = fb_info->par;
1432 atomic_read(&dlfb->bytes_rendered));
1438 struct dlfb_data *dlfb = fb_info->par;
1440 atomic_read(&dlfb->bytes_identical));
1446 struct dlfb_data *dlfb = fb_info->par;
1448 atomic_read(&dlfb->bytes_sent));
1454 struct dlfb_data *dlfb = fb_info->par;
1456 atomic_read(&dlfb->cpu_kcycles_used));
1465 struct dlfb_data *dlfb = fb_info->par;
1467 if (dlfb->edid == NULL)
1470 if ((off >= dlfb->edid_size) || (count > dlfb->edid_size))
1473 if (off + count > dlfb->edid_size)
1474 count = dlfb->edid_size - off;
1476 memcpy(buf, dlfb->edid, count);
1487 struct dlfb_data *dlfb = fb_info->par;
1494 ret = dlfb_setup_modes(dlfb, fb_info, src, src_size);
1498 if (!dlfb->edid || memcmp(src, dlfb->edid, src_size))
1513 struct dlfb_data *dlfb = fb_info->par;
1515 atomic_set(&dlfb->bytes_rendered, 0);
1516 atomic_set(&dlfb->bytes_identical, 0);
1517 atomic_set(&dlfb->bytes_sent, 0);
1518 atomic_set(&dlfb->cpu_kcycles_used, 0);
1542 static int dlfb_select_std_channel(struct dlfb_data *dlfb)
1557 ret = usb_control_msg(dlfb->udev, usb_sndctrlpipe(dlfb->udev, 0),
1567 static int dlfb_parse_vendor_descriptor(struct dlfb_data *dlfb,
1623 dlfb->sku_pixel_limit = max_area;
1652 struct dlfb_data *dlfb;
1659 dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
1660 if (!dlfb) {
1661 dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__);
1665 INIT_LIST_HEAD(&dlfb->deferred_free);
1667 dlfb->udev = usb_get_dev(usbdev);
1668 usb_set_intfdata(intf, dlfb);
1680 dlfb->sku_pixel_limit = 2048 * 1152; /* default to maximum */
1682 if (!dlfb_parse_vendor_descriptor(dlfb, intf)) {
1692 dlfb->sku_pixel_limit, pixel_limit);
1693 dlfb->sku_pixel_limit = pixel_limit;
1698 info = framebuffer_alloc(0, &dlfb->udev->dev);
1704 dlfb->info = info;
1705 info->par = dlfb;
1706 info->pseudo_palette = dlfb->pseudo_palette;
1707 dlfb->ops = dlfb_ops;
1708 info->fbops = &dlfb->ops;
1710 mutex_init(&dlfb->render_mutex);
1711 dlfb_init_damage(dlfb);
1712 spin_lock_init(&dlfb->damage_lock);
1713 INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
1717 if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
1731 retval = dlfb_setup_modes(dlfb, info, NULL, 0);
1740 atomic_set(&dlfb->usb_active, 1);
1741 dlfb_select_std_channel(dlfb);
1772 ((dlfb->backing_buffer) ?
1777 if (dlfb->info) {
1778 dlfb_ops_destroy(dlfb->info);
1780 usb_put_dev(dlfb->udev);
1781 kfree(dlfb);
1788 struct dlfb_data *dlfb;
1792 dlfb = usb_get_intfdata(intf);
1793 info = dlfb->info;
1798 dlfb->virtualized = true;
1801 atomic_set(&dlfb->usb_active, 0);
1804 dlfb_free_urb_list(dlfb);
1826 struct dlfb_data *dlfb = unode->dlfb;
1839 dev_err(&dlfb->udev->dev,
1842 atomic_set(&dlfb->lost_pixels, 1);
1846 urb->transfer_buffer_length = dlfb->urbs.size; /* reset to actual */
1848 spin_lock_irqsave(&dlfb->urbs.lock, flags);
1849 list_add_tail(&unode->entry, &dlfb->urbs.list);
1850 dlfb->urbs.available++;
1851 spin_unlock_irqrestore(&dlfb->urbs.lock, flags);
1853 up(&dlfb->urbs.limit_sem);
1856 static void dlfb_free_urb_list(struct dlfb_data *dlfb)
1858 int count = dlfb->urbs.count;
1865 down(&dlfb->urbs.limit_sem);
1867 spin_lock_irq(&dlfb->urbs.lock);
1869 node = dlfb->urbs.list.next; /* have reserved one with sem */
1872 spin_unlock_irq(&dlfb->urbs.lock);
1878 usb_free_coherent(urb->dev, dlfb->urbs.size,
1884 dlfb->urbs.count = 0;
1887 static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size)
1894 spin_lock_init(&dlfb->urbs.lock);
1897 dlfb->urbs.size = size;
1898 INIT_LIST_HEAD(&dlfb->urbs.list);
1900 sema_init(&dlfb->urbs.limit_sem, 0);
1901 dlfb->urbs.count = 0;
1902 dlfb->urbs.available = 0;
1904 while (dlfb->urbs.count * size < wanted_size) {
1908 unode->dlfb = dlfb;
1917 buf = usb_alloc_coherent(dlfb->udev, size, GFP_KERNEL,
1924 dlfb_free_urb_list(dlfb);
1931 usb_fill_bulk_urb(urb, dlfb->udev,
1932 usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
1936 list_add_tail(&unode->entry, &dlfb->urbs.list);
1938 up(&dlfb->urbs.limit_sem);
1939 dlfb->urbs.count++;
1940 dlfb->urbs.available++;
1943 return dlfb->urbs.count;
1946 static struct urb *dlfb_get_urb(struct dlfb_data *dlfb)
1953 ret = down_timeout(&dlfb->urbs.limit_sem, GET_URB_TIMEOUT);
1955 atomic_set(&dlfb->lost_pixels, 1);
1956 dev_warn(&dlfb->udev->dev,
1958 ret, dlfb->urbs.available);
1962 spin_lock_irq(&dlfb->urbs.lock);
1964 BUG_ON(list_empty(&dlfb->urbs.list)); /* reserved one with limit_sem */
1965 entry = dlfb->urbs.list.next;
1967 dlfb->urbs.available--;
1969 spin_unlock_irq(&dlfb->urbs.lock);
1975 static int dlfb_submit_urb(struct dlfb_data *dlfb, struct urb *urb, size_t len)
1979 BUG_ON(len > dlfb->urbs.size);
1985 atomic_set(&dlfb->lost_pixels, 1);
1986 dev_err(&dlfb->udev->dev, "submit urb error: %d\n", ret);