Lines Matching refs:udl

26 static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
28 static int udl_parse_vendor_descriptor(struct udl_device *udl)
30 struct usb_device *udev = udl_to_usb_device(udl);
73 udl->sku_pixel_limit = max_area;
97 int udl_select_std_channel(struct udl_device *udl)
106 struct usb_device *udev = udl_to_usb_device(udl);
124 struct udl_device *udl = unode->dev;
138 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
140 spin_lock_irqsave(&udl->urbs.lock, flags);
141 list_add_tail(&unode->entry, &udl->urbs.list);
142 udl->urbs.available++;
143 spin_unlock_irqrestore(&udl->urbs.lock, flags);
145 wake_up(&udl->urbs.sleep);
150 struct udl_device *udl = to_udl(dev);
157 while (udl->urbs.count) {
158 spin_lock_irq(&udl->urbs.lock);
159 urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
160 udl->urbs.count--;
161 spin_unlock_irq(&udl->urbs.lock);
166 usb_free_coherent(urb->dev, udl->urbs.size,
172 wake_up_all(&udl->urbs.sleep);
177 struct udl_device *udl = to_udl(dev);
182 struct usb_device *udev = udl_to_usb_device(udl);
184 spin_lock_init(&udl->urbs.lock);
185 INIT_LIST_HEAD(&udl->urbs.list);
186 init_waitqueue_head(&udl->urbs.sleep);
187 udl->urbs.count = 0;
188 udl->urbs.available = 0;
191 udl->urbs.size = size;
193 while (udl->urbs.count * size < wanted_size) {
197 unode->dev = udl;
224 list_add_tail(&unode->entry, &udl->urbs.list);
226 udl->urbs.count++;
227 udl->urbs.available++;
230 DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
232 return udl->urbs.count;
235 static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
239 assert_spin_locked(&udl->urbs.lock);
242 if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
243 !udl->urbs.count ||
244 !list_empty(&udl->urbs.list),
245 udl->urbs.lock, timeout)) {
247 udl->urbs.available);
251 if (!udl->urbs.count)
254 unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
256 udl->urbs.available--;
264 struct udl_device *udl = to_udl(dev);
267 spin_lock_irq(&udl->urbs.lock);
268 urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
269 spin_unlock_irq(&udl->urbs.lock);
275 struct udl_device *udl = to_udl(dev);
278 if (WARN_ON(len > udl->urbs.size)) {
295 struct udl_device *udl = to_udl(dev);
297 spin_lock_irq(&udl->urbs.lock);
299 if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
300 udl->urbs.available == udl->urbs.count,
301 udl->urbs.lock,
304 spin_unlock_irq(&udl->urbs.lock);
307 int udl_init(struct udl_device *udl)
309 struct drm_device *dev = &udl->drm;
314 udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
315 if (!udl->dmadev)
318 mutex_init(&udl->gem_lock);
320 if (!udl_parse_vendor_descriptor(udl)) {
326 if (udl_select_std_channel(udl))
344 if (udl->urbs.count)
346 put_device(udl->dmadev);
353 struct udl_device *udl = to_udl(dev);
356 put_device(udl->dmadev);
357 udl->dmadev = NULL;