1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB Raw Gadget driver.
4 * See Documentation/usb/raw-gadget.rst for more details.
5 *
6 * Andrey Konovalov <andreyknvl@gmail.com>
7 */
8
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/idr.h>
14 #include <linux/kref.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/semaphore.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/wait.h>
22
23 #include <linux/usb.h>
24 #include <linux/usb/ch9.h>
25 #include <linux/usb/ch11.h>
26 #include <linux/usb/gadget.h>
27
28 #include <uapi/linux/usb/raw_gadget.h>
29
30 #define DRIVER_DESC "USB Raw Gadget"
31 #define DRIVER_NAME "raw-gadget"
32
33 MODULE_DESCRIPTION(DRIVER_DESC);
34 MODULE_AUTHOR("Andrey Konovalov");
35 MODULE_LICENSE("GPL");
36
37 /*----------------------------------------------------------------------*/
38
39 static DEFINE_IDA(driver_id_numbers);
40 #define DRIVER_DRIVER_NAME_LENGTH_MAX 32
41
42 #define RAW_EVENT_QUEUE_SIZE 16
43
44 struct raw_event_queue {
45 /* See the comment in raw_event_queue_fetch() for locking details. */
46 spinlock_t lock;
47 struct semaphore sema;
48 struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
49 int size;
50 };
51
raw_event_queue_init(struct raw_event_queue *queue)52 static void raw_event_queue_init(struct raw_event_queue *queue)
53 {
54 spin_lock_init(&queue->lock);
55 sema_init(&queue->sema, 0);
56 queue->size = 0;
57 }
58
raw_event_queue_add(struct raw_event_queue *queue, enum usb_raw_event_type type, size_t length, const void *data)59 static int raw_event_queue_add(struct raw_event_queue *queue,
60 enum usb_raw_event_type type, size_t length, const void *data)
61 {
62 unsigned long flags;
63 struct usb_raw_event *event;
64
65 spin_lock_irqsave(&queue->lock, flags);
66 if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
67 spin_unlock_irqrestore(&queue->lock, flags);
68 return -ENOMEM;
69 }
70 event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
71 if (!event) {
72 spin_unlock_irqrestore(&queue->lock, flags);
73 return -ENOMEM;
74 }
75 event->type = type;
76 event->length = length;
77 if (event->length)
78 memcpy(&event->data[0], data, length);
79 queue->events[queue->size] = event;
80 queue->size++;
81 up(&queue->sema);
82 spin_unlock_irqrestore(&queue->lock, flags);
83 return 0;
84 }
85
raw_event_queue_fetch( struct raw_event_queue *queue)86 static struct usb_raw_event *raw_event_queue_fetch(
87 struct raw_event_queue *queue)
88 {
89 int ret;
90 unsigned long flags;
91 struct usb_raw_event *event;
92
93 /*
94 * This function can be called concurrently. We first check that
95 * there's at least one event queued by decrementing the semaphore,
96 * and then take the lock to protect queue struct fields.
97 */
98 ret = down_interruptible(&queue->sema);
99 if (ret)
100 return ERR_PTR(ret);
101 spin_lock_irqsave(&queue->lock, flags);
102 /*
103 * queue->size must have the same value as queue->sema counter (before
104 * the down_interruptible() call above), so this check is a fail-safe.
105 */
106 if (WARN_ON(!queue->size)) {
107 spin_unlock_irqrestore(&queue->lock, flags);
108 return ERR_PTR(-ENODEV);
109 }
110 event = queue->events[0];
111 queue->size--;
112 memmove(&queue->events[0], &queue->events[1],
113 queue->size * sizeof(queue->events[0]));
114 spin_unlock_irqrestore(&queue->lock, flags);
115 return event;
116 }
117
raw_event_queue_destroy(struct raw_event_queue *queue)118 static void raw_event_queue_destroy(struct raw_event_queue *queue)
119 {
120 int i;
121
122 for (i = 0; i < queue->size; i++)
123 kfree(queue->events[i]);
124 queue->size = 0;
125 }
126
127 /*----------------------------------------------------------------------*/
128
129 struct raw_dev;
130
131 enum ep_state {
132 STATE_EP_DISABLED,
133 STATE_EP_ENABLED,
134 };
135
136 struct raw_ep {
137 struct raw_dev *dev;
138 enum ep_state state;
139 struct usb_ep *ep;
140 u8 addr;
141 struct usb_request *req;
142 bool urb_queued;
143 bool disabling;
144 ssize_t status;
145 };
146
147 enum dev_state {
148 STATE_DEV_INVALID = 0,
149 STATE_DEV_OPENED,
150 STATE_DEV_INITIALIZED,
151 STATE_DEV_REGISTERING,
152 STATE_DEV_RUNNING,
153 STATE_DEV_CLOSED,
154 STATE_DEV_FAILED
155 };
156
157 struct raw_dev {
158 struct kref count;
159 spinlock_t lock;
160
161 const char *udc_name;
162 struct usb_gadget_driver driver;
163
164 /* Reference to misc device: */
165 struct device *dev;
166
167 /* Make driver names unique */
168 int driver_id_number;
169
170 /* Protected by lock: */
171 enum dev_state state;
172 bool gadget_registered;
173 struct usb_gadget *gadget;
174 struct usb_request *req;
175 bool ep0_in_pending;
176 bool ep0_out_pending;
177 bool ep0_urb_queued;
178 ssize_t ep0_status;
179 struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
180 int eps_num;
181
182 struct completion ep0_done;
183 struct raw_event_queue queue;
184 };
185
dev_new(void)186 static struct raw_dev *dev_new(void)
187 {
188 struct raw_dev *dev;
189
190 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
191 if (!dev)
192 return NULL;
193 /* Matches kref_put() in raw_release(). */
194 kref_init(&dev->count);
195 spin_lock_init(&dev->lock);
196 init_completion(&dev->ep0_done);
197 raw_event_queue_init(&dev->queue);
198 dev->driver_id_number = -1;
199 return dev;
200 }
201
dev_free(struct kref *kref)202 static void dev_free(struct kref *kref)
203 {
204 struct raw_dev *dev = container_of(kref, struct raw_dev, count);
205 int i;
206
207 kfree(dev->udc_name);
208 kfree(dev->driver.udc_name);
209 kfree(dev->driver.driver.name);
210 if (dev->driver_id_number >= 0)
211 ida_free(&driver_id_numbers, dev->driver_id_number);
212 if (dev->req) {
213 if (dev->ep0_urb_queued)
214 usb_ep_dequeue(dev->gadget->ep0, dev->req);
215 usb_ep_free_request(dev->gadget->ep0, dev->req);
216 }
217 raw_event_queue_destroy(&dev->queue);
218 for (i = 0; i < dev->eps_num; i++) {
219 if (dev->eps[i].state == STATE_EP_DISABLED)
220 continue;
221 usb_ep_disable(dev->eps[i].ep);
222 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
223 kfree(dev->eps[i].ep->desc);
224 dev->eps[i].state = STATE_EP_DISABLED;
225 }
226 kfree(dev);
227 }
228
229 /*----------------------------------------------------------------------*/
230
raw_queue_event(struct raw_dev *dev, enum usb_raw_event_type type, size_t length, const void *data)231 static int raw_queue_event(struct raw_dev *dev,
232 enum usb_raw_event_type type, size_t length, const void *data)
233 {
234 int ret = 0;
235 unsigned long flags;
236
237 ret = raw_event_queue_add(&dev->queue, type, length, data);
238 if (ret < 0) {
239 spin_lock_irqsave(&dev->lock, flags);
240 dev->state = STATE_DEV_FAILED;
241 spin_unlock_irqrestore(&dev->lock, flags);
242 }
243 return ret;
244 }
245
gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)246 static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
247 {
248 struct raw_dev *dev = req->context;
249 unsigned long flags;
250
251 spin_lock_irqsave(&dev->lock, flags);
252 if (req->status)
253 dev->ep0_status = req->status;
254 else
255 dev->ep0_status = req->actual;
256 if (dev->ep0_in_pending)
257 dev->ep0_in_pending = false;
258 else
259 dev->ep0_out_pending = false;
260 spin_unlock_irqrestore(&dev->lock, flags);
261
262 complete(&dev->ep0_done);
263 }
264
get_ep_addr(const char *name)265 static u8 get_ep_addr(const char *name)
266 {
267 /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
268 * parse the endpoint address from its name. We deliberately use
269 * deprecated simple_strtoul() function here, as the number isn't
270 * followed by '\0' nor '\n'.
271 */
272 if (isdigit(name[2]))
273 return simple_strtoul(&name[2], NULL, 10);
274 /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
275 return USB_RAW_EP_ADDR_ANY;
276 }
277
gadget_bind(struct usb_gadget *gadget, struct usb_gadget_driver *driver)278 static int gadget_bind(struct usb_gadget *gadget,
279 struct usb_gadget_driver *driver)
280 {
281 int ret = 0, i = 0;
282 struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
283 struct usb_request *req;
284 struct usb_ep *ep;
285 unsigned long flags;
286
287 if (strcmp(gadget->name, dev->udc_name) != 0)
288 return -ENODEV;
289
290 set_gadget_data(gadget, dev);
291 req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
292 if (!req) {
293 dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
294 set_gadget_data(gadget, NULL);
295 return -ENOMEM;
296 }
297
298 spin_lock_irqsave(&dev->lock, flags);
299 dev->req = req;
300 dev->req->context = dev;
301 dev->req->complete = gadget_ep0_complete;
302 dev->gadget = gadget;
303 gadget_for_each_ep(ep, dev->gadget) {
304 dev->eps[i].ep = ep;
305 dev->eps[i].addr = get_ep_addr(ep->name);
306 dev->eps[i].state = STATE_EP_DISABLED;
307 i++;
308 }
309 dev->eps_num = i;
310 spin_unlock_irqrestore(&dev->lock, flags);
311
312 ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
313 if (ret < 0) {
314 dev_err(&gadget->dev, "failed to queue event\n");
315 set_gadget_data(gadget, NULL);
316 return ret;
317 }
318
319 /* Matches kref_put() in gadget_unbind(). */
320 kref_get(&dev->count);
321 return ret;
322 }
323
gadget_unbind(struct usb_gadget *gadget)324 static void gadget_unbind(struct usb_gadget *gadget)
325 {
326 struct raw_dev *dev = get_gadget_data(gadget);
327
328 set_gadget_data(gadget, NULL);
329 /* Matches kref_get() in gadget_bind(). */
330 kref_put(&dev->count, dev_free);
331 }
332
gadget_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)333 static int gadget_setup(struct usb_gadget *gadget,
334 const struct usb_ctrlrequest *ctrl)
335 {
336 int ret = 0;
337 struct raw_dev *dev = get_gadget_data(gadget);
338 unsigned long flags;
339
340 spin_lock_irqsave(&dev->lock, flags);
341 if (dev->state != STATE_DEV_RUNNING) {
342 dev_err(&gadget->dev, "ignoring, device is not running\n");
343 ret = -ENODEV;
344 goto out_unlock;
345 }
346 if (dev->ep0_in_pending || dev->ep0_out_pending) {
347 dev_dbg(&gadget->dev, "stalling, request already pending\n");
348 ret = -EBUSY;
349 goto out_unlock;
350 }
351 if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
352 dev->ep0_in_pending = true;
353 else
354 dev->ep0_out_pending = true;
355 spin_unlock_irqrestore(&dev->lock, flags);
356
357 ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
358 if (ret < 0)
359 dev_err(&gadget->dev, "failed to queue event\n");
360 goto out;
361
362 out_unlock:
363 spin_unlock_irqrestore(&dev->lock, flags);
364 out:
365 return ret;
366 }
367
368 /* These are currently unused but present in case UDC driver requires them. */
gadget_disconnect(struct usb_gadget *gadget)369 static void gadget_disconnect(struct usb_gadget *gadget) { }
gadget_suspend(struct usb_gadget *gadget)370 static void gadget_suspend(struct usb_gadget *gadget) { }
gadget_resume(struct usb_gadget *gadget)371 static void gadget_resume(struct usb_gadget *gadget) { }
gadget_reset(struct usb_gadget *gadget)372 static void gadget_reset(struct usb_gadget *gadget) { }
373
374 /*----------------------------------------------------------------------*/
375
376 static struct miscdevice raw_misc_device;
377
raw_open(struct inode *inode, struct file *fd)378 static int raw_open(struct inode *inode, struct file *fd)
379 {
380 struct raw_dev *dev;
381
382 /* Nonblocking I/O is not supported yet. */
383 if (fd->f_flags & O_NONBLOCK)
384 return -EINVAL;
385
386 dev = dev_new();
387 if (!dev)
388 return -ENOMEM;
389 fd->private_data = dev;
390 dev->state = STATE_DEV_OPENED;
391 dev->dev = raw_misc_device.this_device;
392 return 0;
393 }
394
raw_release(struct inode *inode, struct file *fd)395 static int raw_release(struct inode *inode, struct file *fd)
396 {
397 int ret = 0;
398 struct raw_dev *dev = fd->private_data;
399 unsigned long flags;
400 bool unregister = false;
401
402 spin_lock_irqsave(&dev->lock, flags);
403 dev->state = STATE_DEV_CLOSED;
404 if (!dev->gadget) {
405 spin_unlock_irqrestore(&dev->lock, flags);
406 goto out_put;
407 }
408 if (dev->gadget_registered)
409 unregister = true;
410 dev->gadget_registered = false;
411 spin_unlock_irqrestore(&dev->lock, flags);
412
413 if (unregister) {
414 ret = usb_gadget_unregister_driver(&dev->driver);
415 if (ret != 0)
416 dev_err(dev->dev,
417 "usb_gadget_unregister_driver() failed with %d\n",
418 ret);
419 /* Matches kref_get() in raw_ioctl_run(). */
420 kref_put(&dev->count, dev_free);
421 }
422
423 out_put:
424 /* Matches dev_new() in raw_open(). */
425 kref_put(&dev->count, dev_free);
426 return ret;
427 }
428
429 /*----------------------------------------------------------------------*/
430
raw_ioctl_init(struct raw_dev *dev, unsigned long value)431 static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
432 {
433 int ret = 0;
434 int driver_id_number;
435 struct usb_raw_init arg;
436 char *udc_driver_name;
437 char *udc_device_name;
438 char *driver_driver_name;
439 unsigned long flags;
440
441 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
442 return -EFAULT;
443
444 switch (arg.speed) {
445 case USB_SPEED_UNKNOWN:
446 arg.speed = USB_SPEED_HIGH;
447 break;
448 case USB_SPEED_LOW:
449 case USB_SPEED_FULL:
450 case USB_SPEED_HIGH:
451 case USB_SPEED_SUPER:
452 break;
453 default:
454 return -EINVAL;
455 }
456
457 driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
458 if (driver_id_number < 0)
459 return driver_id_number;
460
461 driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
462 if (!driver_driver_name) {
463 ret = -ENOMEM;
464 goto out_free_driver_id_number;
465 }
466 snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
467 DRIVER_NAME ".%d", driver_id_number);
468
469 udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
470 if (!udc_driver_name) {
471 ret = -ENOMEM;
472 goto out_free_driver_driver_name;
473 }
474 ret = strscpy(udc_driver_name, &arg.driver_name[0],
475 UDC_NAME_LENGTH_MAX);
476 if (ret < 0)
477 goto out_free_udc_driver_name;
478 ret = 0;
479
480 udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
481 if (!udc_device_name) {
482 ret = -ENOMEM;
483 goto out_free_udc_driver_name;
484 }
485 ret = strscpy(udc_device_name, &arg.device_name[0],
486 UDC_NAME_LENGTH_MAX);
487 if (ret < 0)
488 goto out_free_udc_device_name;
489 ret = 0;
490
491 spin_lock_irqsave(&dev->lock, flags);
492 if (dev->state != STATE_DEV_OPENED) {
493 dev_dbg(dev->dev, "fail, device is not opened\n");
494 ret = -EINVAL;
495 goto out_unlock;
496 }
497 dev->udc_name = udc_driver_name;
498
499 dev->driver.function = DRIVER_DESC;
500 dev->driver.max_speed = arg.speed;
501 dev->driver.setup = gadget_setup;
502 dev->driver.disconnect = gadget_disconnect;
503 dev->driver.bind = gadget_bind;
504 dev->driver.unbind = gadget_unbind;
505 dev->driver.suspend = gadget_suspend;
506 dev->driver.resume = gadget_resume;
507 dev->driver.reset = gadget_reset;
508 dev->driver.driver.name = driver_driver_name;
509 dev->driver.udc_name = udc_device_name;
510 dev->driver.match_existing_only = 1;
511 dev->driver_id_number = driver_id_number;
512
513 dev->state = STATE_DEV_INITIALIZED;
514 spin_unlock_irqrestore(&dev->lock, flags);
515 return ret;
516
517 out_unlock:
518 spin_unlock_irqrestore(&dev->lock, flags);
519 out_free_udc_device_name:
520 kfree(udc_device_name);
521 out_free_udc_driver_name:
522 kfree(udc_driver_name);
523 out_free_driver_driver_name:
524 kfree(driver_driver_name);
525 out_free_driver_id_number:
526 ida_free(&driver_id_numbers, driver_id_number);
527 return ret;
528 }
529
raw_ioctl_run(struct raw_dev *dev, unsigned long value)530 static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
531 {
532 int ret = 0;
533 unsigned long flags;
534
535 if (value)
536 return -EINVAL;
537
538 spin_lock_irqsave(&dev->lock, flags);
539 if (dev->state != STATE_DEV_INITIALIZED) {
540 dev_dbg(dev->dev, "fail, device is not initialized\n");
541 ret = -EINVAL;
542 goto out_unlock;
543 }
544 dev->state = STATE_DEV_REGISTERING;
545 spin_unlock_irqrestore(&dev->lock, flags);
546
547 ret = usb_gadget_probe_driver(&dev->driver);
548
549 spin_lock_irqsave(&dev->lock, flags);
550 if (ret) {
551 dev_err(dev->dev,
552 "fail, usb_gadget_probe_driver returned %d\n", ret);
553 dev->state = STATE_DEV_FAILED;
554 goto out_unlock;
555 }
556 dev->gadget_registered = true;
557 dev->state = STATE_DEV_RUNNING;
558 /* Matches kref_put() in raw_release(). */
559 kref_get(&dev->count);
560
561 out_unlock:
562 spin_unlock_irqrestore(&dev->lock, flags);
563 return ret;
564 }
565
raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)566 static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
567 {
568 struct usb_raw_event arg;
569 unsigned long flags;
570 struct usb_raw_event *event;
571 uint32_t length;
572
573 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
574 return -EFAULT;
575
576 spin_lock_irqsave(&dev->lock, flags);
577 if (dev->state != STATE_DEV_RUNNING) {
578 dev_dbg(dev->dev, "fail, device is not running\n");
579 spin_unlock_irqrestore(&dev->lock, flags);
580 return -EINVAL;
581 }
582 if (!dev->gadget) {
583 dev_dbg(dev->dev, "fail, gadget is not bound\n");
584 spin_unlock_irqrestore(&dev->lock, flags);
585 return -EBUSY;
586 }
587 spin_unlock_irqrestore(&dev->lock, flags);
588
589 event = raw_event_queue_fetch(&dev->queue);
590 if (PTR_ERR(event) == -EINTR) {
591 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
592 return -EINTR;
593 }
594 if (IS_ERR(event)) {
595 dev_err(&dev->gadget->dev, "failed to fetch event\n");
596 spin_lock_irqsave(&dev->lock, flags);
597 dev->state = STATE_DEV_FAILED;
598 spin_unlock_irqrestore(&dev->lock, flags);
599 return -ENODEV;
600 }
601 length = min(arg.length, event->length);
602 if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
603 kfree(event);
604 return -EFAULT;
605 }
606
607 kfree(event);
608 return 0;
609 }
610
raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, bool get_from_user)611 static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
612 bool get_from_user)
613 {
614 void *data;
615
616 if (copy_from_user(io, ptr, sizeof(*io)))
617 return ERR_PTR(-EFAULT);
618 if (io->ep >= USB_RAW_EPS_NUM_MAX)
619 return ERR_PTR(-EINVAL);
620 if (!usb_raw_io_flags_valid(io->flags))
621 return ERR_PTR(-EINVAL);
622 if (io->length > PAGE_SIZE)
623 return ERR_PTR(-EINVAL);
624 if (get_from_user)
625 data = memdup_user(ptr + sizeof(*io), io->length);
626 else {
627 data = kmalloc(io->length, GFP_KERNEL);
628 if (!data)
629 data = ERR_PTR(-ENOMEM);
630 }
631 return data;
632 }
633
raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io, void *data, bool in)634 static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
635 void *data, bool in)
636 {
637 int ret = 0;
638 unsigned long flags;
639
640 spin_lock_irqsave(&dev->lock, flags);
641 if (dev->state != STATE_DEV_RUNNING) {
642 dev_dbg(dev->dev, "fail, device is not running\n");
643 ret = -EINVAL;
644 goto out_unlock;
645 }
646 if (!dev->gadget) {
647 dev_dbg(dev->dev, "fail, gadget is not bound\n");
648 ret = -EBUSY;
649 goto out_unlock;
650 }
651 if (dev->ep0_urb_queued) {
652 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
653 ret = -EBUSY;
654 goto out_unlock;
655 }
656 if ((in && !dev->ep0_in_pending) ||
657 (!in && !dev->ep0_out_pending)) {
658 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
659 ret = -EBUSY;
660 goto out_unlock;
661 }
662 if (WARN_ON(in && dev->ep0_out_pending)) {
663 ret = -ENODEV;
664 dev->state = STATE_DEV_FAILED;
665 goto out_unlock;
666 }
667 if (WARN_ON(!in && dev->ep0_in_pending)) {
668 ret = -ENODEV;
669 dev->state = STATE_DEV_FAILED;
670 goto out_unlock;
671 }
672
673 dev->req->buf = data;
674 dev->req->length = io->length;
675 dev->req->zero = usb_raw_io_flags_zero(io->flags);
676 dev->ep0_urb_queued = true;
677 spin_unlock_irqrestore(&dev->lock, flags);
678
679 ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
680 if (ret) {
681 dev_err(&dev->gadget->dev,
682 "fail, usb_ep_queue returned %d\n", ret);
683 spin_lock_irqsave(&dev->lock, flags);
684 dev->state = STATE_DEV_FAILED;
685 goto out_queue_failed;
686 }
687
688 ret = wait_for_completion_interruptible(&dev->ep0_done);
689 if (ret) {
690 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
691 usb_ep_dequeue(dev->gadget->ep0, dev->req);
692 wait_for_completion(&dev->ep0_done);
693 spin_lock_irqsave(&dev->lock, flags);
694 if (dev->ep0_status == -ECONNRESET)
695 dev->ep0_status = -EINTR;
696 goto out_interrupted;
697 }
698
699 spin_lock_irqsave(&dev->lock, flags);
700
701 out_interrupted:
702 ret = dev->ep0_status;
703 out_queue_failed:
704 dev->ep0_urb_queued = false;
705 out_unlock:
706 spin_unlock_irqrestore(&dev->lock, flags);
707 return ret;
708 }
709
raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)710 static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
711 {
712 int ret = 0;
713 void *data;
714 struct usb_raw_ep_io io;
715
716 data = raw_alloc_io_data(&io, (void __user *)value, true);
717 if (IS_ERR(data))
718 return PTR_ERR(data);
719 ret = raw_process_ep0_io(dev, &io, data, true);
720 kfree(data);
721 return ret;
722 }
723
raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)724 static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
725 {
726 int ret = 0;
727 void *data;
728 struct usb_raw_ep_io io;
729 unsigned int length;
730
731 data = raw_alloc_io_data(&io, (void __user *)value, false);
732 if (IS_ERR(data))
733 return PTR_ERR(data);
734 ret = raw_process_ep0_io(dev, &io, data, false);
735 if (ret < 0)
736 goto free;
737
738 length = min(io.length, (unsigned int)ret);
739 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
740 ret = -EFAULT;
741 else
742 ret = length;
743 free:
744 kfree(data);
745 return ret;
746 }
747
raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)748 static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
749 {
750 int ret = 0;
751 unsigned long flags;
752
753 if (value)
754 return -EINVAL;
755 spin_lock_irqsave(&dev->lock, flags);
756 if (dev->state != STATE_DEV_RUNNING) {
757 dev_dbg(dev->dev, "fail, device is not running\n");
758 ret = -EINVAL;
759 goto out_unlock;
760 }
761 if (!dev->gadget) {
762 dev_dbg(dev->dev, "fail, gadget is not bound\n");
763 ret = -EBUSY;
764 goto out_unlock;
765 }
766 if (dev->ep0_urb_queued) {
767 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
768 ret = -EBUSY;
769 goto out_unlock;
770 }
771 if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
772 dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
773 ret = -EBUSY;
774 goto out_unlock;
775 }
776
777 ret = usb_ep_set_halt(dev->gadget->ep0);
778 if (ret < 0)
779 dev_err(&dev->gadget->dev,
780 "fail, usb_ep_set_halt returned %d\n", ret);
781
782 if (dev->ep0_in_pending)
783 dev->ep0_in_pending = false;
784 else
785 dev->ep0_out_pending = false;
786
787 out_unlock:
788 spin_unlock_irqrestore(&dev->lock, flags);
789 return ret;
790 }
791
raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)792 static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
793 {
794 int ret = 0, i;
795 unsigned long flags;
796 struct usb_endpoint_descriptor *desc;
797 struct raw_ep *ep;
798
799 desc = memdup_user((void __user *)value, sizeof(*desc));
800 if (IS_ERR(desc))
801 return PTR_ERR(desc);
802
803 /*
804 * Endpoints with a maxpacket length of 0 can cause crashes in UDC
805 * drivers.
806 */
807 if (usb_endpoint_maxp(desc) == 0) {
808 dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
809 kfree(desc);
810 return -EINVAL;
811 }
812
813 spin_lock_irqsave(&dev->lock, flags);
814 if (dev->state != STATE_DEV_RUNNING) {
815 dev_dbg(dev->dev, "fail, device is not running\n");
816 ret = -EINVAL;
817 goto out_free;
818 }
819 if (!dev->gadget) {
820 dev_dbg(dev->dev, "fail, gadget is not bound\n");
821 ret = -EBUSY;
822 goto out_free;
823 }
824
825 for (i = 0; i < dev->eps_num; i++) {
826 ep = &dev->eps[i];
827 if (ep->state != STATE_EP_DISABLED)
828 continue;
829 if (ep->addr != usb_endpoint_num(desc) &&
830 ep->addr != USB_RAW_EP_ADDR_ANY)
831 continue;
832 if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
833 continue;
834 ep->ep->desc = desc;
835 ret = usb_ep_enable(ep->ep);
836 if (ret < 0) {
837 dev_err(&dev->gadget->dev,
838 "fail, usb_ep_enable returned %d\n", ret);
839 goto out_free;
840 }
841 ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
842 if (!ep->req) {
843 dev_err(&dev->gadget->dev,
844 "fail, usb_ep_alloc_request failed\n");
845 usb_ep_disable(ep->ep);
846 ret = -ENOMEM;
847 goto out_free;
848 }
849 ep->state = STATE_EP_ENABLED;
850 ep->ep->driver_data = ep;
851 ret = i;
852 goto out_unlock;
853 }
854
855 dev_dbg(&dev->gadget->dev, "fail, no gadget endpoints available\n");
856 ret = -EBUSY;
857
858 out_free:
859 kfree(desc);
860 out_unlock:
861 spin_unlock_irqrestore(&dev->lock, flags);
862 return ret;
863 }
864
raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)865 static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
866 {
867 int ret = 0, i = value;
868 unsigned long flags;
869
870 spin_lock_irqsave(&dev->lock, flags);
871 if (dev->state != STATE_DEV_RUNNING) {
872 dev_dbg(dev->dev, "fail, device is not running\n");
873 ret = -EINVAL;
874 goto out_unlock;
875 }
876 if (!dev->gadget) {
877 dev_dbg(dev->dev, "fail, gadget is not bound\n");
878 ret = -EBUSY;
879 goto out_unlock;
880 }
881 if (i < 0 || i >= dev->eps_num) {
882 dev_dbg(dev->dev, "fail, invalid endpoint\n");
883 ret = -EBUSY;
884 goto out_unlock;
885 }
886 if (dev->eps[i].state == STATE_EP_DISABLED) {
887 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
888 ret = -EINVAL;
889 goto out_unlock;
890 }
891 if (dev->eps[i].disabling) {
892 dev_dbg(&dev->gadget->dev,
893 "fail, disable already in progress\n");
894 ret = -EINVAL;
895 goto out_unlock;
896 }
897 if (dev->eps[i].urb_queued) {
898 dev_dbg(&dev->gadget->dev,
899 "fail, waiting for urb completion\n");
900 ret = -EINVAL;
901 goto out_unlock;
902 }
903 dev->eps[i].disabling = true;
904 spin_unlock_irqrestore(&dev->lock, flags);
905
906 usb_ep_disable(dev->eps[i].ep);
907
908 spin_lock_irqsave(&dev->lock, flags);
909 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
910 kfree(dev->eps[i].ep->desc);
911 dev->eps[i].state = STATE_EP_DISABLED;
912 dev->eps[i].disabling = false;
913
914 out_unlock:
915 spin_unlock_irqrestore(&dev->lock, flags);
916 return ret;
917 }
918
raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev, unsigned long value, bool set, bool halt)919 static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
920 unsigned long value, bool set, bool halt)
921 {
922 int ret = 0, i = value;
923 unsigned long flags;
924
925 spin_lock_irqsave(&dev->lock, flags);
926 if (dev->state != STATE_DEV_RUNNING) {
927 dev_dbg(dev->dev, "fail, device is not running\n");
928 ret = -EINVAL;
929 goto out_unlock;
930 }
931 if (!dev->gadget) {
932 dev_dbg(dev->dev, "fail, gadget is not bound\n");
933 ret = -EBUSY;
934 goto out_unlock;
935 }
936 if (i < 0 || i >= dev->eps_num) {
937 dev_dbg(dev->dev, "fail, invalid endpoint\n");
938 ret = -EBUSY;
939 goto out_unlock;
940 }
941 if (dev->eps[i].state == STATE_EP_DISABLED) {
942 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
943 ret = -EINVAL;
944 goto out_unlock;
945 }
946 if (dev->eps[i].disabling) {
947 dev_dbg(&dev->gadget->dev,
948 "fail, disable is in progress\n");
949 ret = -EINVAL;
950 goto out_unlock;
951 }
952 if (dev->eps[i].urb_queued) {
953 dev_dbg(&dev->gadget->dev,
954 "fail, waiting for urb completion\n");
955 ret = -EINVAL;
956 goto out_unlock;
957 }
958 if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
959 dev_dbg(&dev->gadget->dev,
960 "fail, can't halt/wedge ISO endpoint\n");
961 ret = -EINVAL;
962 goto out_unlock;
963 }
964
965 if (set && halt) {
966 ret = usb_ep_set_halt(dev->eps[i].ep);
967 if (ret < 0)
968 dev_err(&dev->gadget->dev,
969 "fail, usb_ep_set_halt returned %d\n", ret);
970 } else if (!set && halt) {
971 ret = usb_ep_clear_halt(dev->eps[i].ep);
972 if (ret < 0)
973 dev_err(&dev->gadget->dev,
974 "fail, usb_ep_clear_halt returned %d\n", ret);
975 } else if (set && !halt) {
976 ret = usb_ep_set_wedge(dev->eps[i].ep);
977 if (ret < 0)
978 dev_err(&dev->gadget->dev,
979 "fail, usb_ep_set_wedge returned %d\n", ret);
980 }
981
982 out_unlock:
983 spin_unlock_irqrestore(&dev->lock, flags);
984 return ret;
985 }
986
gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)987 static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
988 {
989 struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
990 struct raw_dev *dev = r_ep->dev;
991 unsigned long flags;
992
993 spin_lock_irqsave(&dev->lock, flags);
994 if (req->status)
995 r_ep->status = req->status;
996 else
997 r_ep->status = req->actual;
998 spin_unlock_irqrestore(&dev->lock, flags);
999
1000 complete((struct completion *)req->context);
1001 }
1002
raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, void *data, bool in)1003 static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
1004 void *data, bool in)
1005 {
1006 int ret = 0;
1007 unsigned long flags;
1008 struct raw_ep *ep;
1009 DECLARE_COMPLETION_ONSTACK(done);
1010
1011 spin_lock_irqsave(&dev->lock, flags);
1012 if (dev->state != STATE_DEV_RUNNING) {
1013 dev_dbg(dev->dev, "fail, device is not running\n");
1014 ret = -EINVAL;
1015 goto out_unlock;
1016 }
1017 if (!dev->gadget) {
1018 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1019 ret = -EBUSY;
1020 goto out_unlock;
1021 }
1022 if (io->ep >= dev->eps_num) {
1023 dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
1024 ret = -EINVAL;
1025 goto out_unlock;
1026 }
1027 ep = &dev->eps[io->ep];
1028 if (ep->state != STATE_EP_ENABLED) {
1029 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
1030 ret = -EBUSY;
1031 goto out_unlock;
1032 }
1033 if (ep->disabling) {
1034 dev_dbg(&dev->gadget->dev,
1035 "fail, endpoint is already being disabled\n");
1036 ret = -EBUSY;
1037 goto out_unlock;
1038 }
1039 if (ep->urb_queued) {
1040 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
1041 ret = -EBUSY;
1042 goto out_unlock;
1043 }
1044 if (in != usb_endpoint_dir_in(ep->ep->desc)) {
1045 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
1046 ret = -EINVAL;
1047 goto out_unlock;
1048 }
1049
1050 ep->dev = dev;
1051 ep->req->context = &done;
1052 ep->req->complete = gadget_ep_complete;
1053 ep->req->buf = data;
1054 ep->req->length = io->length;
1055 ep->req->zero = usb_raw_io_flags_zero(io->flags);
1056 ep->urb_queued = true;
1057 spin_unlock_irqrestore(&dev->lock, flags);
1058
1059 ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
1060 if (ret) {
1061 dev_err(&dev->gadget->dev,
1062 "fail, usb_ep_queue returned %d\n", ret);
1063 spin_lock_irqsave(&dev->lock, flags);
1064 dev->state = STATE_DEV_FAILED;
1065 goto out_queue_failed;
1066 }
1067
1068 ret = wait_for_completion_interruptible(&done);
1069 if (ret) {
1070 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
1071 usb_ep_dequeue(ep->ep, ep->req);
1072 wait_for_completion(&done);
1073 spin_lock_irqsave(&dev->lock, flags);
1074 if (ep->status == -ECONNRESET)
1075 ep->status = -EINTR;
1076 goto out_interrupted;
1077 }
1078
1079 spin_lock_irqsave(&dev->lock, flags);
1080
1081 out_interrupted:
1082 ret = ep->status;
1083 out_queue_failed:
1084 ep->urb_queued = false;
1085 out_unlock:
1086 spin_unlock_irqrestore(&dev->lock, flags);
1087 return ret;
1088 }
1089
raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)1090 static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
1091 {
1092 int ret = 0;
1093 char *data;
1094 struct usb_raw_ep_io io;
1095
1096 data = raw_alloc_io_data(&io, (void __user *)value, true);
1097 if (IS_ERR(data))
1098 return PTR_ERR(data);
1099 ret = raw_process_ep_io(dev, &io, data, true);
1100 kfree(data);
1101 return ret;
1102 }
1103
raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)1104 static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
1105 {
1106 int ret = 0;
1107 char *data;
1108 struct usb_raw_ep_io io;
1109 unsigned int length;
1110
1111 data = raw_alloc_io_data(&io, (void __user *)value, false);
1112 if (IS_ERR(data))
1113 return PTR_ERR(data);
1114 ret = raw_process_ep_io(dev, &io, data, false);
1115 if (ret < 0)
1116 goto free;
1117
1118 length = min(io.length, (unsigned int)ret);
1119 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
1120 ret = -EFAULT;
1121 else
1122 ret = length;
1123 free:
1124 kfree(data);
1125 return ret;
1126 }
1127
raw_ioctl_configure(struct raw_dev *dev, unsigned long value)1128 static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
1129 {
1130 int ret = 0;
1131 unsigned long flags;
1132
1133 if (value)
1134 return -EINVAL;
1135 spin_lock_irqsave(&dev->lock, flags);
1136 if (dev->state != STATE_DEV_RUNNING) {
1137 dev_dbg(dev->dev, "fail, device is not running\n");
1138 ret = -EINVAL;
1139 goto out_unlock;
1140 }
1141 if (!dev->gadget) {
1142 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1143 ret = -EBUSY;
1144 goto out_unlock;
1145 }
1146 usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
1147
1148 out_unlock:
1149 spin_unlock_irqrestore(&dev->lock, flags);
1150 return ret;
1151 }
1152
raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)1153 static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
1154 {
1155 int ret = 0;
1156 unsigned long flags;
1157
1158 spin_lock_irqsave(&dev->lock, flags);
1159 if (dev->state != STATE_DEV_RUNNING) {
1160 dev_dbg(dev->dev, "fail, device is not running\n");
1161 ret = -EINVAL;
1162 goto out_unlock;
1163 }
1164 if (!dev->gadget) {
1165 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1166 ret = -EBUSY;
1167 goto out_unlock;
1168 }
1169 usb_gadget_vbus_draw(dev->gadget, 2 * value);
1170
1171 out_unlock:
1172 spin_unlock_irqrestore(&dev->lock, flags);
1173 return ret;
1174 }
1175
fill_ep_caps(struct usb_ep_caps *caps, struct usb_raw_ep_caps *raw_caps)1176 static void fill_ep_caps(struct usb_ep_caps *caps,
1177 struct usb_raw_ep_caps *raw_caps)
1178 {
1179 raw_caps->type_control = caps->type_control;
1180 raw_caps->type_iso = caps->type_iso;
1181 raw_caps->type_bulk = caps->type_bulk;
1182 raw_caps->type_int = caps->type_int;
1183 raw_caps->dir_in = caps->dir_in;
1184 raw_caps->dir_out = caps->dir_out;
1185 }
1186
fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)1187 static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
1188 {
1189 limits->maxpacket_limit = ep->maxpacket_limit;
1190 limits->max_streams = ep->max_streams;
1191 }
1192
raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)1193 static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
1194 {
1195 int ret = 0, i;
1196 unsigned long flags;
1197 struct usb_raw_eps_info *info;
1198 struct raw_ep *ep;
1199
1200 info = kmalloc(sizeof(*info), GFP_KERNEL);
1201 if (!info) {
1202 ret = -ENOMEM;
1203 goto out;
1204 }
1205
1206 spin_lock_irqsave(&dev->lock, flags);
1207 if (dev->state != STATE_DEV_RUNNING) {
1208 dev_dbg(dev->dev, "fail, device is not running\n");
1209 ret = -EINVAL;
1210 spin_unlock_irqrestore(&dev->lock, flags);
1211 goto out_free;
1212 }
1213 if (!dev->gadget) {
1214 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1215 ret = -EBUSY;
1216 spin_unlock_irqrestore(&dev->lock, flags);
1217 goto out_free;
1218 }
1219
1220 memset(info, 0, sizeof(*info));
1221 for (i = 0; i < dev->eps_num; i++) {
1222 ep = &dev->eps[i];
1223 strscpy(&info->eps[i].name[0], ep->ep->name,
1224 USB_RAW_EP_NAME_MAX);
1225 info->eps[i].addr = ep->addr;
1226 fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
1227 fill_ep_limits(ep->ep, &info->eps[i].limits);
1228 }
1229 ret = dev->eps_num;
1230 spin_unlock_irqrestore(&dev->lock, flags);
1231
1232 if (copy_to_user((void __user *)value, info, sizeof(*info)))
1233 ret = -EFAULT;
1234
1235 out_free:
1236 kfree(info);
1237 out:
1238 return ret;
1239 }
1240
raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)1241 static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
1242 {
1243 struct raw_dev *dev = fd->private_data;
1244 int ret = 0;
1245
1246 if (!dev)
1247 return -EBUSY;
1248
1249 switch (cmd) {
1250 case USB_RAW_IOCTL_INIT:
1251 ret = raw_ioctl_init(dev, value);
1252 break;
1253 case USB_RAW_IOCTL_RUN:
1254 ret = raw_ioctl_run(dev, value);
1255 break;
1256 case USB_RAW_IOCTL_EVENT_FETCH:
1257 ret = raw_ioctl_event_fetch(dev, value);
1258 break;
1259 case USB_RAW_IOCTL_EP0_WRITE:
1260 ret = raw_ioctl_ep0_write(dev, value);
1261 break;
1262 case USB_RAW_IOCTL_EP0_READ:
1263 ret = raw_ioctl_ep0_read(dev, value);
1264 break;
1265 case USB_RAW_IOCTL_EP_ENABLE:
1266 ret = raw_ioctl_ep_enable(dev, value);
1267 break;
1268 case USB_RAW_IOCTL_EP_DISABLE:
1269 ret = raw_ioctl_ep_disable(dev, value);
1270 break;
1271 case USB_RAW_IOCTL_EP_WRITE:
1272 ret = raw_ioctl_ep_write(dev, value);
1273 break;
1274 case USB_RAW_IOCTL_EP_READ:
1275 ret = raw_ioctl_ep_read(dev, value);
1276 break;
1277 case USB_RAW_IOCTL_CONFIGURE:
1278 ret = raw_ioctl_configure(dev, value);
1279 break;
1280 case USB_RAW_IOCTL_VBUS_DRAW:
1281 ret = raw_ioctl_vbus_draw(dev, value);
1282 break;
1283 case USB_RAW_IOCTL_EPS_INFO:
1284 ret = raw_ioctl_eps_info(dev, value);
1285 break;
1286 case USB_RAW_IOCTL_EP0_STALL:
1287 ret = raw_ioctl_ep0_stall(dev, value);
1288 break;
1289 case USB_RAW_IOCTL_EP_SET_HALT:
1290 ret = raw_ioctl_ep_set_clear_halt_wedge(
1291 dev, value, true, true);
1292 break;
1293 case USB_RAW_IOCTL_EP_CLEAR_HALT:
1294 ret = raw_ioctl_ep_set_clear_halt_wedge(
1295 dev, value, false, true);
1296 break;
1297 case USB_RAW_IOCTL_EP_SET_WEDGE:
1298 ret = raw_ioctl_ep_set_clear_halt_wedge(
1299 dev, value, true, false);
1300 break;
1301 default:
1302 ret = -EINVAL;
1303 }
1304
1305 return ret;
1306 }
1307
1308 /*----------------------------------------------------------------------*/
1309
1310 static const struct file_operations raw_fops = {
1311 .open = raw_open,
1312 .unlocked_ioctl = raw_ioctl,
1313 .compat_ioctl = raw_ioctl,
1314 .release = raw_release,
1315 .llseek = no_llseek,
1316 };
1317
1318 static struct miscdevice raw_misc_device = {
1319 .minor = MISC_DYNAMIC_MINOR,
1320 .name = DRIVER_NAME,
1321 .fops = &raw_fops,
1322 };
1323
1324 module_misc_device(raw_misc_device);
1325