1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * f_fs.c -- user mode file system API for USB composite function controllers
4 *
5 * Copyright (C) 2010 Samsung Electronics
6 * Author: Michal Nazarewicz <mina86@mina86.com>
7 *
8 * Based on inode.c (GadgetFS) which was:
9 * Copyright (C) 2003-2004 David Brownell
10 * Copyright (C) 2003 Agilent Technologies
11 */
12
13 /* #define DEBUG */
14 /* #define VERBOSE_DEBUG */
15
16 #include <linux/export.h>
17 #include <linux/hid.h>
18 #include <linux/miscdevice.h>
19 #include <linux/usb/functionfs.h>
20 #include <linux/kfifo.h>
21 #include <linux/module.h>
22 #include <linux/poll.h>
23 #include <linux/eventfd.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/usb/cdc.h>
26 #include <linux/interrupt.h>
27 #include "u_generic.h"
28 #include "u_f.h"
29 #include "u_os_desc.h"
30 #include "configfs.h"
31
32 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
33
34 /* Reference counter handling */
35 static void ffs_data_get(struct ffs_data *ffs);
36 static void ffs_data_put(struct ffs_data *ffs);
37 /* Creates new ffs_data object. */
38 static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
39 __attribute__((malloc));
40
41 /* Called with ffs->mutex held; take over ownership of data. */
42 static int __must_check
43 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
44 static int __must_check
45 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
46
47 /* The function structure ***************************************************/
48
49 struct ffs_ep;
50
51 struct ffs_function {
52 struct usb_configuration *conf;
53 struct usb_gadget *gadget;
54 struct ffs_data *ffs;
55
56 struct ffs_ep *eps;
57 u8 eps_revmap[16];
58 short *interfaces_nums;
59
60 struct usb_function function;
61 };
ffs_func_from_usb(struct usb_function *f)62 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
63 {
64 return container_of(f, struct ffs_function, function);
65 }
ffs_setup_state_clear_cancelled(struct ffs_data *ffs)66 static inline enum ffs_setup_state ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
67 {
68 return (enum ffs_setup_state)
69 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
70 }
71 static void ffs_func_eps_disable(struct ffs_function *func);
72 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
73
74 static int ffs_func_bind(struct usb_configuration *,
75 struct usb_function *);
76 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
77 static void ffs_func_disable(struct usb_function *);
78 static int ffs_func_setup(struct usb_function *,
79 const struct usb_ctrlrequest *);
80 static bool ffs_func_req_match(struct usb_function *,
81 const struct usb_ctrlrequest *,
82 bool config0);
83 static void ffs_func_suspend(struct usb_function *);
84 static void ffs_func_resume(struct usb_function *);
85
86 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
87 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
88
89 /* The endpoints structures *************************************************/
90 struct ffs_ep {
91 struct usb_ep *ep; /* P: ffs->eps_lock */
92 struct usb_request *req; /* P: epfile->mutex */
93
94 /* [0]: full speed, [1]: high speed, [2]: super speed */
95 struct usb_endpoint_descriptor *descs[3];
96
97 u8 num;
98
99 int status; /* P: epfile->mutex */
100 };
101
102 struct ffs_epfile {
103 /* Protects ep->ep and ep->req. */
104 struct mutex mutex;
105 struct list_head memory_list;
106 struct ffs_data *ffs;
107 struct ffs_ep *ep; /* P: ffs->eps_lock */
108 /*
109 * Buffer for holding data from partial reads which may happen since
110 * we’re rounding user read requests to a multiple of a max packet size.
111 *
112 * The pointer is initialised with NULL value and may be set by
113 * __ffs_epfile_read_data function to point to a temporary buffer.
114 *
115 * In normal operation, calls to __ffs_epfile_read_buffered will consume
116 * data from said buffer and eventually free it. Importantly, while the
117 * function is using the buffer, it sets the pointer to NULL. This is
118 * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
119 * can never run concurrently (they are synchronised by epfile->mutex)
120 * so the latter will not assign a new value to the pointer.
121 *
122 * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
123 * valid) and sets the pointer to READ_BUFFER_DROP value. This special
124 * value is crux of the synchronisation between ffs_func_eps_disable and
125 * __ffs_epfile_read_data.
126 *
127 * Once __ffs_epfile_read_data is about to finish it will try to set the
128 * pointer back to its old value (as described above), but seeing as the
129 * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
130 * the buffer.
131 *
132 * == State transitions ==
133 *
134 * • ptr == NULL: (initial state)
135 * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
136 * ◦ __ffs_epfile_read_buffered: nop
137 * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
138 * ◦ reading finishes: n/a, not in ‘and reading’ state
139 * • ptr == DROP:
140 * ◦ __ffs_epfile_read_buffer_free: nop
141 * ◦ __ffs_epfile_read_buffered: go to ptr == NULL
142 * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
143 * ◦ reading finishes: n/a, not in ‘and reading’ state
144 * • ptr == buf:
145 * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
146 * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
147 * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
148 * is always called first
149 * ◦ reading finishes: n/a, not in ‘and reading’ state
150 * • ptr == NULL and reading:
151 * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
152 * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
153 * ◦ __ffs_epfile_read_data: n/a, mutex is held
154 * ◦ reading finishes and …
155 * … all data read: free buf, go to ptr == NULL
156 * … otherwise: go to ptr == buf and reading
157 * • ptr == DROP and reading:
158 * ◦ __ffs_epfile_read_buffer_free: nop
159 * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
160 * ◦ __ffs_epfile_read_data: n/a, mutex is held
161 * ◦ reading finishes: free buf, go to ptr == DROP
162 */
163 struct ffs_buffer *read_buffer;
164 #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
165
166 char name[MAX_NAMELEN];
167 dev_t devno;
168 struct cdev cdev;
169 struct device *device;
170
171 unsigned char in; /* P: ffs->eps_lock */
172 unsigned char isoc; /* P: ffs->eps_lock */
173
174 struct kfifo reqEventFifo;
175 wait_queue_head_t wait_que;
176
177 unsigned char _pad;
178 };
179
180 struct ffs_buffer {
181 size_t length;
182 char *data;
183 char storage[];
184 };
185
186 /* ffs_io_data structure ***************************************************/
187
188 struct ffs_io_data {
189 uint32_t aio;
190 uint32_t read;
191 uint32_t len;
192 uint32_t timeout;
193 uint64_t buf;
194 uint32_t actual;
195 int status;
196 struct tasklet_struct task;
197 struct usb_ep *ep;
198 struct usb_request *req;
199 struct ffs_epfile *epfile;
200 struct ffs_data *ffs;
201 };
202
203 struct ffs_desc_helper {
204 struct ffs_data *ffs;
205 unsigned interfaces_count;
206 unsigned eps_count;
207 };
208
209 static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
210 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
211
212 /* Devices management *******************************************************/
213
214 DEFINE_MUTEX(ffs_lock_adapter);
215 EXPORT_SYMBOL_GPL(ffs_lock_adapter);
216
217 static struct ffs_dev *_ffs_find_dev(const char *name);
218 static struct ffs_dev *_ffs_alloc_dev(void);
219 static void _ffs_free_dev(struct ffs_dev *dev);
220 static void *ffs_acquire_dev(const char *dev_name);
221 static void ffs_release_dev(struct ffs_data *ffs_data);
222 static int ffs_ready(struct ffs_data *ffs);
223 static void ffs_closed(struct ffs_data *ffs);
224
225 /* Misc helper functions ****************************************************/
226
227 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
228 __attribute__((warn_unused_result, nonnull));
229 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
230 __attribute__((warn_unused_result, nonnull));
231
232 struct class *ffs_class;
233
ffs_devnode(const struct device *dev, umode_t *mode)234 static char *ffs_devnode(const struct device *dev, umode_t *mode)
235 {
236 if (mode)
237 *mode = 0666;
238 return kasprintf(GFP_KERNEL, "functionfs/%s", dev_name(dev));
239 }
240
241 /* Control file aka ep0 *****************************************************/
generic_find_ep0_memory_area(struct ffs_data *ffs, uint64_t buf, uint32_t len)242 static struct ffs_memory *generic_find_ep0_memory_area(struct ffs_data *ffs, uint64_t buf, uint32_t len)
243 {
244 struct ffs_memory *ffsm = NULL;
245 struct ffs_memory *iter = NULL;
246 uint64_t buf_start = buf;
247 unsigned long flags;
248
249 spin_lock_irqsave(&ffs->mem_lock, flags);
250 list_for_each_entry(iter, &ffs->memory_list, memlist) {
251 if (buf_start >= iter->vm_start &&
252 buf_start < iter->vm_start + iter->size) {
253 if (len <= iter->vm_start + iter->size - buf_start) {
254 ffsm = iter;
255 break;
256 }
257 }
258 }
259 spin_unlock_irqrestore(&ffs->mem_lock, flags);
260 return ffsm;
261 }
262
ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)263 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
264 {
265 struct ffs_data *ffs = req->context;
266
267 complete(&ffs->ep0req_completion);
268
269 ffs->setup_state = FFS_NO_SETUP;
270 }
271
ffs_ep0_async_io_complete(struct usb_ep *_ep, struct usb_request *req)272 static void ffs_ep0_async_io_complete(struct usb_ep *_ep, struct usb_request *req)
273 {
274 struct ffs_io_data *io_data = req->context;
275 struct ffs_data *ffs = io_data->ffs;
276 ENTER();
277
278 io_data->status = io_data->req->status;
279 io_data->actual = io_data->req->actual;
280 kfifo_in(&ffs->reqEventFifo, &io_data->buf, sizeof(struct UsbFnReqEvent));
281 wake_up_all(&ffs->wait_que);
282
283 list_del(&req->list);
284 usb_ep_free_request(io_data->ep, io_data->req);
285 kfree(io_data);
286
287 }
288
289 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
290 __releases(&ffs->ev.waitq.lock)
291 {
292 struct usb_request *req = ffs->ep0req;
293 int ret;
294
295 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
296
297 spin_unlock_irq(&ffs->ev.waitq.lock);
298
299 req->buf = data;
300 req->length = len;
301
302 /*
303 * UDC layer requires to provide a buffer even for ZLP, but should
304 * not use it at all. Let's provide some poisoned pointer to catch
305 * possible bug in the driver.
306 */
307 if (req->buf == NULL)
308 req->buf = (void *)0xDEADBABE;
309
310 reinit_completion(&ffs->ep0req_completion);
311
312 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
313 if (unlikely(ret < 0))
314 return ret;
315
316 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
317 if (unlikely(ret)) {
318 usb_ep_dequeue(ffs->gadget->ep0, req);
319 return -EINTR;
320 }
321
322 ffs->setup_state = FFS_NO_SETUP;
323 return req->status ? req->status : req->actual;
324 }
325
__ffs_ep0_stall(struct ffs_data *ffs)326 static int __ffs_ep0_stall(struct ffs_data *ffs)
327 {
328 if (ffs->ev.can_stall) {
329 pr_vdebug("ep0 stall\n");
330 usb_ep_set_halt(ffs->gadget->ep0);
331 ffs->setup_state = FFS_NO_SETUP;
332 return -EL2HLT;
333 } else {
334 pr_debug("bogus ep0 stall!\n");
335 return -ESRCH;
336 }
337 }
338
ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr)339 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr)
340 {
341 struct ffs_data *ffs = file->private_data;
342 ssize_t ret;
343 char *data = NULL;
344
345 ENTER();
346
347 /* Fast check if setup was canceled */
348 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
349 return -EIDRM;
350
351 /* Acquire mutex */
352 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
353 if (unlikely(ret < 0))
354 return ret;
355
356 /* Check state */
357 switch (ffs->state) {
358 case FFS_READ_DESCRIPTORS:
359 case FFS_READ_STRINGS:
360 /* Copy data */
361 if (unlikely(len < 16)) {
362 ret = -EINVAL;
363 break;
364 }
365
366 data = ffs_prepare_buffer(buf, len);
367 if (IS_ERR(data)) {
368 ret = PTR_ERR(data);
369 break;
370 }
371
372 /* Handle data */
373 if (ffs->state == FFS_READ_DESCRIPTORS) {
374 pr_info("read descriptors\n");
375 ret = __ffs_data_got_descs(ffs, data, len);
376 if (unlikely(ret < 0))
377 break;
378
379 ffs->state = FFS_READ_STRINGS;
380 ret = len;
381 } else {
382 pr_info("read strings\n");
383 ret = __ffs_data_got_strings(ffs, data, len);
384 if (unlikely(ret < 0))
385 break;
386
387 ret = ffs_epfiles_create(ffs);
388 if (unlikely(ret)) {
389 ffs->state = FFS_CLOSING;
390 break;
391 }
392
393 ffs->state = FFS_ACTIVE;
394 mutex_unlock(&ffs->mutex);
395
396 ret = ffs_ready(ffs);
397 if (unlikely(ret < 0)) {
398 ffs->state = FFS_CLOSING;
399 return ret;
400 }
401
402 return len;
403 }
404 break;
405
406 case FFS_ACTIVE:
407 data = NULL;
408 /*
409 * We're called from user space, we can use _irq
410 * rather then _irqsave
411 */
412 spin_lock_irq(&ffs->ev.waitq.lock);
413 switch (ffs_setup_state_clear_cancelled(ffs)) {
414 case FFS_SETUP_CANCELLED:
415 ret = -EIDRM;
416 goto done_spin;
417
418 case FFS_NO_SETUP:
419 ret = -ESRCH;
420 goto done_spin;
421
422 case FFS_SETUP_PENDING:
423 break;
424 }
425
426 /* FFS_SETUP_PENDING */
427 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
428 spin_unlock_irq(&ffs->ev.waitq.lock);
429 ret = __ffs_ep0_stall(ffs);
430 break;
431 }
432
433 /* FFS_SETUP_PENDING and not stall */
434 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
435
436 spin_unlock_irq(&ffs->ev.waitq.lock);
437
438 data = ffs_prepare_buffer(buf, len);
439 if (IS_ERR(data)) {
440 ret = PTR_ERR(data);
441 break;
442 }
443
444 spin_lock_irq(&ffs->ev.waitq.lock);
445
446 /*
447 * We are guaranteed to be still in FFS_ACTIVE state
448 * but the state of setup could have changed from
449 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
450 * to check for that. If that happened we copied data
451 * from user space in vain but it's unlikely.
452 *
453 * For sure we are not in FFS_NO_SETUP since this is
454 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
455 * transition can be performed and it's protected by
456 * mutex.
457 */
458 if (ffs_setup_state_clear_cancelled(ffs) ==
459 FFS_SETUP_CANCELLED) {
460 ret = -EIDRM;
461 done_spin:
462 spin_unlock_irq(&ffs->ev.waitq.lock);
463 } else {
464 /* unlocks spinlock */
465 ret = __ffs_ep0_queue_wait(ffs, data, len);
466 }
467 kfree(data);
468 break;
469
470 default:
471 ret = -EBADFD;
472 break;
473 }
474
475 mutex_unlock(&ffs->mutex);
476 return ret;
477 }
478
479 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
480 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, size_t n)
481 __releases(&ffs->ev.waitq.lock)
482 {
483 /*
484 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
485 * size of ffs->ev.types array (which is four) so that's how much space
486 * we reserve.
487 */
488 struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
489 const size_t size = n * sizeof *events;
490 unsigned i = 0;
491
492 memset(events, 0, size);
493
494 do {
495 events[i].type = ffs->ev.types[i];
496 if (events[i].type == FUNCTIONFS_SETUP) {
497 events[i].u.setup = ffs->ev.setup;
498 ffs->setup_state = FFS_SETUP_PENDING;
499 }
500 } while (++i < n);
501
502 ffs->ev.count -= n;
503 if (ffs->ev.count)
504 memmove(ffs->ev.types, ffs->ev.types + n, ffs->ev.count * sizeof *ffs->ev.types);
505
506 spin_unlock_irq(&ffs->ev.waitq.lock);
507 mutex_unlock(&ffs->mutex);
508
509 return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
510 }
511
ffs_ep0_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)512 static ssize_t ffs_ep0_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
513 {
514 struct ffs_data *ffs = file->private_data;
515 char *data = NULL;
516 size_t n;
517 int ret;
518
519 ENTER();
520
521 /* Fast check if setup was canceled */
522 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
523 return -EIDRM;
524
525 /* Acquire mutex */
526 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
527 if (unlikely(ret < 0))
528 return ret;
529
530 /* Check state */
531 if (ffs->state != FFS_ACTIVE) {
532 ret = -EBADFD;
533 goto done_mutex;
534 }
535
536 /*
537 * We're called from user space, we can use _irq rather then
538 * _irqsave
539 */
540 spin_lock_irq(&ffs->ev.waitq.lock);
541
542 switch (ffs_setup_state_clear_cancelled(ffs)) {
543 case FFS_SETUP_CANCELLED:
544 ret = -EIDRM;
545 break;
546
547 case FFS_NO_SETUP:
548 n = len / sizeof(struct usb_functionfs_event);
549 if (unlikely(!n)) {
550 ret = -EINVAL;
551 break;
552 }
553
554 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
555 ret = -EAGAIN;
556 break;
557 }
558
559 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
560 ffs->ev.count)) {
561 ret = -EINTR;
562 break;
563 }
564
565 /* unlocks spinlock */
566 return __ffs_ep0_read_events(ffs, buf,
567 min(n, (size_t)ffs->ev.count));
568
569 case FFS_SETUP_PENDING:
570 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
571 spin_unlock_irq(&ffs->ev.waitq.lock);
572 ret = __ffs_ep0_stall(ffs);
573 goto done_mutex;
574 }
575
576 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
577
578 spin_unlock_irq(&ffs->ev.waitq.lock);
579
580 if (likely(len)) {
581 data = kmalloc(len, GFP_KERNEL);
582 if (unlikely(!data)) {
583 ret = -ENOMEM;
584 goto done_mutex;
585 }
586 }
587
588 spin_lock_irq(&ffs->ev.waitq.lock);
589
590 /* See ffs_ep0_write() */
591 if (ffs_setup_state_clear_cancelled(ffs) ==
592 FFS_SETUP_CANCELLED) {
593 ret = -EIDRM;
594 break;
595 }
596
597 /* unlocks spinlock */
598 ret = __ffs_ep0_queue_wait(ffs, data, len);
599 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
600 ret = -EFAULT;
601 goto done_mutex;
602
603 default:
604 ret = -EBADFD;
605 break;
606 }
607
608 spin_unlock_irq(&ffs->ev.waitq.lock);
609 done_mutex:
610 mutex_unlock(&ffs->mutex);
611 kfree(data);
612 return ret;
613 }
614
ffs_ep0_open(struct inode *inode, struct file *file)615 static int ffs_ep0_open(struct inode *inode, struct file *file)
616 {
617 struct ffs_data *ffs = container_of(inode->i_cdev, struct ffs_data, cdev);
618 ENTER();
619
620 if (unlikely(ffs->state == FFS_CLOSING))
621 return -EBUSY;
622
623 file->private_data = ffs;
624 return 0;
625 }
626
ffs_ep0_release(struct inode *inode, struct file *file)627 static int ffs_ep0_release(struct inode *inode, struct file *file)
628 {
629 ENTER();
630 return 0;
631 }
632
ffs_ep0_iorw(struct file *file, struct ffs_io_data *io_data)633 static ssize_t ffs_ep0_iorw(struct file *file, struct ffs_io_data *io_data)
634 {
635 struct ffs_data *ffs = file->private_data;
636 struct usb_request *req = NULL;
637 ssize_t ret, data_len = io_data->len;
638 bool interrupted = false;
639 struct ffs_memory *ffsm = NULL;
640
641 /* Are we still active? */
642 if (WARN_ON(ffs->state != FFS_ACTIVE))
643 return -ENODEV;
644 ffsm = generic_find_ep0_memory_area(ffs, io_data->buf, data_len);
645 if (ffsm == NULL)
646 {
647 return -ENODEV;
648 }
649 if (!io_data->aio) {
650 reinit_completion(&ffs->ep0req_completion);
651
652 req = ffs->ep0req;
653 req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
654 req->length = data_len;
655 req->complete = ffs_ep0_complete;
656
657 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
658 if (unlikely(ret < 0))
659 goto error;
660
661 if (io_data->timeout > 0) {
662 ret = wait_for_completion_interruptible_timeout(&ffs->ep0req_completion, io_data->timeout);
663 if (ret < 0) {
664 /*
665 * To avoid race condition with ffs_epfile_io_complete,
666 * dequeue the request first then check
667 * status. usb_ep_dequeue API should guarantee no race
668 * condition with req->complete callback.
669 */
670 usb_ep_dequeue(ffs->gadget->ep0, req);
671 wait_for_completion(&ffs->ep0req_completion);
672 interrupted = req->status < 0;
673 } else if (ret == 0) {
674 ret = -EBUSY;
675 usb_ep_dequeue(ffs->gadget->ep0, req);
676 wait_for_completion(&ffs->ep0req_completion);
677 goto error;
678 }
679 } else {
680 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
681 if (ret < 0) {
682 usb_ep_dequeue(ffs->gadget->ep0, req);
683 wait_for_completion(&ffs->ep0req_completion);
684 interrupted = req->status < 0;
685 }
686 }
687
688 if (interrupted) {
689 ret = -EINTR;
690 } else {
691 ret = req->actual;
692 }
693 goto error;
694 }
695 else if (!(req = usb_ep_alloc_request(ffs->gadget->ep0, GFP_ATOMIC))) {
696 ret = -ENOMEM;
697 }
698 else {
699 req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
700 req->length = data_len;
701
702 io_data->ep = ffs->gadget->ep0;
703 io_data->req = req;
704 io_data->ffs = ffs;
705
706 req->context = io_data;
707 req->complete = ffs_ep0_async_io_complete;
708 list_add(&req->list, &ffs->ep0req->list);
709 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
710 if (unlikely(ret)) {
711 usb_ep_free_request(ffs->gadget->ep0, req);
712 goto error;
713 }
714
715 ret = -EIOCBQUEUED;
716 }
717
718 error:
719 return ret;
720 }
721
ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)722 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
723 {
724 struct ffs_data *ffs = file->private_data;
725 long ret = 0;
726 unsigned int copied = 0;
727 struct ffs_memory *ffsm = NULL;
728 struct generic_memory mem;
729
730 ENTER();
731
732 switch (code) {
733 case FUNCTIONFS_ENDPOINT_QUEUE_INIT:
734 ret = kfifo_alloc(&ffs->reqEventFifo, MAX_REQUEST * sizeof(struct UsbFnReqEvent), GFP_KERNEL);
735 break;
736 case FUNCTIONFS_ENDPOINT_QUEUE_DEL:
737 kfifo_free(&ffs->reqEventFifo);
738 break;
739 case FUNCTIONFS_ENDPOINT_RELEASE_BUF:
740 if (copy_from_user(&mem, (void __user *)value, sizeof(mem)))
741 {
742 pr_info("copy from user failed\n");
743 return -EFAULT;
744 }
745 ffsm = generic_find_ep0_memory_area(ffs, mem.buf, mem.size);
746 if (ffsm == NULL)
747 {
748 return -EFAULT;
749 }
750 list_del(&ffsm->memlist);
751 kfree((void *)ffsm->mem);
752 kfree(ffsm);
753 break;
754 case FUNCTIONFS_ENDPOINT_READ:
755 case FUNCTIONFS_ENDPOINT_WRITE:
756 {
757 struct IoData myIoData;
758 struct ffs_io_data io_data, *p = &io_data;
759 ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
760 if (unlikely(ret)) {
761 return -EFAULT;
762 }
763 if (myIoData.aio) {
764 p = kmalloc(sizeof(io_data), GFP_KERNEL);
765 if (unlikely(!p))
766 return -ENOMEM;
767 } else {
768 memset(p, 0, sizeof(*p));
769 }
770 memcpy(p, &myIoData, sizeof(struct IoData));
771
772 ret = ffs_ep0_iorw(file, p);
773 if (ret == -EIOCBQUEUED) {
774 return 0;
775 }
776 if (p->aio)
777 kfree(p);
778 return ret;
779 }
780 case FUNCTIONFS_ENDPOINT_RW_CANCEL:
781 {
782 struct usb_request *req;
783 struct IoData myIoData;
784 ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
785 if (unlikely(ret)) {
786 return -EFAULT;
787 }
788 ffsm = generic_find_ep0_memory_area(ffs, myIoData.buf, myIoData.len);
789 if (ffsm == NULL)
790 {
791 return -EFAULT;
792 }
793 list_for_each_entry(req, &ffs->ep0req->list, list) {
794 if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
795 usb_ep_dequeue(ffs->gadget->ep0, req);
796 return 0;
797 }
798 }
799 return -EFAULT;
800 }
801 case FUNCTIONFS_ENDPOINT_GET_REQ_STATUS:
802 {
803 struct usb_request *req;
804 struct IoData myIoData;
805 ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
806 if (unlikely(ret)) {
807 return -EFAULT;
808 }
809 ffsm = generic_find_ep0_memory_area(ffs, myIoData.buf, myIoData.len);
810 if (ffsm == NULL)
811 {
812 return -EFAULT;
813 }
814 list_for_each_entry(req, &ffs->ep0req->list, list) {
815 if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
816 return req->status;
817 }
818 }
819 return -EFAULT;
820 }
821 case FUNCTIONFS_ENDPOINT_GET_EP0_EVENT:
822 if (!kfifo_is_empty(&ffs->reqEventFifo)) {
823 ret = kfifo_to_user(&ffs->reqEventFifo, (void __user *)value,
824 sizeof(struct UsbFnReqEvent), &copied) == 0 ? copied : -1;
825 if (ret > 0) {
826 ffs->setup_state = FFS_NO_SETUP;
827 return ret;
828 }
829 }
830
831 return -EFAULT;
832 }
833
834 return ret;
835 }
836
837 #ifdef CONFIG_COMPAT
ffs_ep0_compat_ioctl(struct file *file, unsigned code, unsigned long value)838 static long ffs_ep0_compat_ioctl(struct file *file, unsigned code,
839 unsigned long value)
840 {
841 return ffs_ep0_ioctl(file, code, value);
842 }
843 #endif
844
ffs_ep0_poll(struct file *file, poll_table *wait)845 static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
846 {
847 struct ffs_data *ffs = file->private_data;
848 __poll_t mask = EPOLLWRNORM;
849 int ret;
850
851 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
852 if (unlikely(ret < 0))
853 return mask;
854
855 switch (ffs->state) {
856 case FFS_READ_DESCRIPTORS:
857 case FFS_READ_STRINGS:
858 mask |= EPOLLOUT;
859 break;
860
861 case FFS_ACTIVE:
862 switch (ffs->setup_state) {
863 case FFS_NO_SETUP:
864 poll_wait(file, &ffs->ev.waitq, wait);
865 if (ffs->ev.count)
866 mask |= EPOLLIN;
867 break;
868
869 case FFS_SETUP_PENDING:
870 case FFS_SETUP_CANCELLED:
871 poll_wait(file, &ffs->wait_que, wait);
872 if (!kfifo_is_empty(&ffs->reqEventFifo))
873 {
874 mask |= EPOLLOUT;
875 }
876 break;
877 }
878 case FFS_CLOSING:
879 break;
880 case FFS_DEACTIVATED:
881 break;
882 }
883
884 mutex_unlock(&ffs->mutex);
885
886 return mask;
887 }
888
ffs_ep0_mmap(struct file *file, struct vm_area_struct *vma)889 static int ffs_ep0_mmap(struct file *file, struct vm_area_struct *vma)
890 {
891 struct ffs_data *ffs = file->private_data;
892 size_t size = vma->vm_end - vma->vm_start;
893 unsigned long flags;
894 struct ffs_memory *ffsm = NULL;
895 void *virt_mem = NULL;
896
897 if (ffs == NULL) {
898 pr_info("Invalid private parameter!\n");
899 return -EINVAL;
900 }
901 virt_mem = kmalloc(size, GFP_KERNEL);
902 if (virt_mem == NULL)
903 {
904 pr_info("%s alloc memory failed!\n", __FUNCTION__);
905 return -ENOMEM;
906 }
907 ffsm = kmalloc(sizeof(struct ffs_memory), GFP_KERNEL);
908 if (ffsm == NULL)
909 {
910 pr_info("%s alloc memory failed!\n", __FUNCTION__);
911 goto error_free_mem;
912 }
913 if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(virt_mem)>>PAGE_SHIFT,
914 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
915 goto error_free_ffsm;
916 }
917 ffsm->mem = (uint64_t)virt_mem;
918 ffsm->size = size;
919 ffsm->vm_start = vma->vm_start;
920 INIT_LIST_HEAD(&ffsm->memlist);
921 spin_lock_irqsave(&ffs->mem_lock, flags);
922 list_add_tail(&ffsm->memlist, &ffs->memory_list);
923 spin_unlock_irqrestore(&ffs->mem_lock, flags);
924 return 0;
925 error_free_ffsm:
926 kfree(ffsm);
927 error_free_mem:
928 kfree(virt_mem);
929 return -1;
930 }
931
932 static const struct file_operations ffs_ep0_operations = {
933 .owner = THIS_MODULE,
934 .llseek = no_llseek,
935 .open = ffs_ep0_open,
936 .write = ffs_ep0_write,
937 .read = ffs_ep0_read,
938 .release = ffs_ep0_release,
939 .unlocked_ioctl = ffs_ep0_ioctl,
940 #ifdef CONFIG_COMPAT
941 .compat_ioctl = ffs_ep0_compat_ioctl,
942 #endif
943 .poll = ffs_ep0_poll,
944 .mmap = ffs_ep0_mmap,
945 };
946
947 /* "Normal" endpoints operations ********************************************/
generic_find_memory_area(struct ffs_epfile *epfile, uint64_t buf, uint32_t len)948 static struct ffs_memory *generic_find_memory_area(struct ffs_epfile *epfile, uint64_t buf, uint32_t len)
949 {
950 struct ffs_memory *ffsm = NULL, *iter = NULL;
951 uint64_t buf_start = buf;
952
953 list_for_each_entry(iter, &epfile->memory_list, memlist) {
954 if (buf_start >= iter->vm_start &&
955 buf_start < iter->vm_start + iter->size) {
956 if (len <= iter->vm_start + iter->size - buf_start) {
957 ffsm = iter;
958 break;
959 }
960 }
961 }
962 return ffsm;
963 }
964
ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)965 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
966 {
967 ENTER();
968 if (likely(req->context)) {
969 struct ffs_ep *ep = _ep->driver_data;
970 ep->status = req->status ? req->status : req->actual;
971 complete(req->context);
972 }
973 }
974
epfile_task_proc(unsigned long context)975 static void epfile_task_proc(unsigned long context)
976 {
977 struct ffs_io_data *io_data = (struct ffs_io_data *)context;
978 struct ffs_epfile *epfile = io_data->epfile;
979 unsigned long flags;
980
981 spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
982 io_data->status = io_data->req->status;
983 io_data->actual = io_data->req->actual;
984 kfifo_in(&epfile->reqEventFifo, &io_data->buf, sizeof(struct UsbFnReqEvent));
985 list_del(&io_data->req->list);
986 usb_ep_free_request(io_data->ep, io_data->req);
987 kfree(io_data);
988 spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
989 wake_up_all(&epfile->wait_que);
990 }
991
ffs_epfile_async_io_complete(struct usb_ep *_ep, struct usb_request *req)992 static void ffs_epfile_async_io_complete(struct usb_ep *_ep, struct usb_request *req)
993 {
994 struct ffs_io_data *io_data = req->context;
995
996 tasklet_init(&io_data->task, epfile_task_proc, (uintptr_t)io_data);
997 tasklet_schedule(&io_data->task);
998
999 }
1000
ffs_epfile_open(struct inode *inode, struct file *file)1001 static int ffs_epfile_open(struct inode *inode, struct file *file)
1002 {
1003 struct ffs_epfile *epfile = container_of(inode->i_cdev, struct ffs_epfile, cdev);
1004 ENTER();
1005 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1006 return -ENODEV;
1007
1008 file->private_data = epfile;
1009 return 0;
1010 }
1011
ffs_epfile_release(struct inode *inode, struct file *file)1012 static int ffs_epfile_release(struct inode *inode, struct file *file)
1013 {
1014 ENTER();
1015 return 0;
1016 }
1017
ffs_epfile_mmap(struct file *file, struct vm_area_struct *vma)1018 static int ffs_epfile_mmap(struct file *file, struct vm_area_struct *vma)
1019 {
1020 struct ffs_epfile *epfile = file->private_data;
1021 size_t size = vma->vm_end - vma->vm_start;
1022 struct ffs_memory *ffsm = NULL;
1023 unsigned long flags;
1024 void *virt_mem = NULL;
1025
1026 if (epfile == NULL)
1027 {
1028 pr_info("Invalid private parameter!\n");
1029 return -EINVAL;
1030 }
1031 virt_mem = kmalloc(size, GFP_KERNEL);
1032 if (virt_mem == NULL)
1033 {
1034 pr_info("%s alloc memory failed!\n", __FUNCTION__);
1035 return -ENOMEM;
1036 }
1037 ffsm = kmalloc(sizeof(struct ffs_memory), GFP_KERNEL);
1038 if (ffsm == NULL)
1039 {
1040 pr_info("%s alloc memory failed!\n", __FUNCTION__);
1041 goto error_free_mem;
1042 }
1043 if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(virt_mem)>>PAGE_SHIFT,
1044 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1045 {
1046 goto error_free_ffsm;
1047 }
1048 ffsm->mem = (uint64_t)virt_mem;
1049 ffsm->size = size;
1050 ffsm->vm_start = vma->vm_start;
1051 INIT_LIST_HEAD(&ffsm->memlist);
1052 spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
1053 list_add_tail(&ffsm->memlist, &epfile->memory_list);
1054 spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
1055
1056 return 0;
1057 error_free_ffsm:
1058 kfree(ffsm);
1059 error_free_mem:
1060 kfree(virt_mem);
1061
1062 return -1;
1063 }
1064
ffs_epfile_iorw(struct file *file, struct ffs_io_data *io_data)1065 static ssize_t ffs_epfile_iorw(struct file *file, struct ffs_io_data *io_data)
1066 {
1067 struct ffs_epfile *epfile = file->private_data;
1068 struct usb_request *req = NULL;
1069 struct ffs_ep *ep = NULL;
1070 struct ffs_memory *ffsm = NULL;
1071 ssize_t ret, data_len = -EINVAL;
1072 int halt;
1073
1074 /* Are we still active? */
1075 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1076 return -ENODEV;
1077
1078 /* Wait for endpoint to be enabled */
1079 ep = epfile->ep;
1080 if (!ep) {
1081 if (file->f_flags & O_NONBLOCK)
1082 return -EAGAIN;
1083
1084 ret = wait_event_interruptible(
1085 epfile->ffs->wait, (ep = epfile->ep));
1086 if (ret)
1087 return -EINTR;
1088 }
1089
1090 /* Do we halt? */
1091 halt = (!io_data->read == !epfile->in);
1092 if (halt && epfile->isoc)
1093 return -EINVAL;
1094
1095 /* We will be using request and read_buffer */
1096 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
1097 if (unlikely(ret))
1098 goto error;
1099
1100 /* Allocate & copy */
1101 if (!halt) {
1102 struct usb_gadget *gadget;
1103 /*
1104 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
1105 * before the waiting completes, so do not assign to 'gadget'
1106 * earlier
1107 */
1108 gadget = epfile->ffs->gadget;
1109
1110 spin_lock_irq(&epfile->ffs->eps_lock);
1111 /* In the meantime, endpoint got disabled or changed. */
1112 if (epfile->ep != ep) {
1113 ret = -ESHUTDOWN;
1114 goto error_lock;
1115 }
1116 data_len = io_data->len;
1117 /*
1118 * Controller may require buffer size to be aligned to
1119 * maxpacketsize of an out endpoint.
1120 */
1121 if (io_data->read)
1122 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
1123 spin_unlock_irq(&epfile->ffs->eps_lock);
1124 }
1125
1126 spin_lock_irq(&epfile->ffs->eps_lock);
1127 ffsm = generic_find_memory_area(epfile, io_data->buf, io_data->len);
1128 if (ffsm == NULL)
1129 {
1130 return -EFAULT;
1131 }
1132 if (epfile->ep != ep) {
1133 /* In the meantime, endpoint got disabled or changed. */
1134 ret = -ESHUTDOWN;
1135 }
1136 else if (halt) {
1137 ret = usb_ep_set_halt(ep->ep);
1138 if (!ret)
1139 ret = -EBADMSG;
1140 }
1141 else if (!io_data->aio) {
1142 DECLARE_COMPLETION_ONSTACK(done);
1143 bool interrupted = false;
1144
1145 req = ep->req;
1146 req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
1147 req->length = data_len;
1148
1149 req->context = &done;
1150 req->complete = ffs_epfile_io_complete;
1151
1152 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1153 if (unlikely(ret < 0))
1154 goto error_lock;
1155
1156 spin_unlock_irq(&epfile->ffs->eps_lock);
1157 if (io_data->timeout > 0) {
1158 ret = wait_for_completion_interruptible_timeout(&done, io_data->timeout);
1159 if (ret < 0) {
1160 /*
1161 * To avoid race condition with ffs_epfile_io_complete,
1162 * dequeue the request first then check
1163 * status. usb_ep_dequeue API should guarantee no race
1164 * condition with req->complete callback.
1165 */
1166 usb_ep_dequeue(ep->ep, req);
1167 wait_for_completion(&done);
1168 interrupted = ep->status < 0;
1169 } else if (ret == 0) {
1170 ret = -EBUSY;
1171 usb_ep_dequeue(ep->ep, req);
1172 wait_for_completion(&done);
1173 goto error_mutex;
1174 }
1175 } else {
1176 ret = wait_for_completion_interruptible(&done);
1177 if (ret < 0) {
1178 usb_ep_dequeue(ep->ep, req);
1179 wait_for_completion(&done);
1180 interrupted = ep->status < 0;
1181 }
1182 }
1183
1184 if (interrupted) {
1185 ret = -EINTR;
1186 } else {
1187 ret = req->actual;
1188 }
1189 goto error_mutex;
1190 }
1191 else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
1192 ret = -ENOMEM;
1193 }
1194 else {
1195 req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start);
1196 req->length = data_len;
1197
1198 io_data->ep = ep->ep;
1199 io_data->req = req;
1200 io_data->epfile = epfile;
1201
1202 req->context = io_data;
1203 req->complete = ffs_epfile_async_io_complete;
1204 list_add(&req->list, &ep->req->list);
1205 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1206 if (unlikely(ret)) {
1207 usb_ep_free_request(ep->ep, req);
1208 goto error_lock;
1209 }
1210
1211 ret = -EIOCBQUEUED;
1212 }
1213
1214 error_lock:
1215 spin_unlock_irq(&epfile->ffs->eps_lock);
1216 error_mutex:
1217 mutex_unlock(&epfile->mutex);
1218 error:
1219 return ret;
1220 }
1221
ffs_epfile_ioctl(struct file *file, unsigned code, unsigned long value)1222 static long ffs_epfile_ioctl(struct file *file, unsigned code, unsigned long value)
1223 {
1224 struct ffs_epfile *epfile = file->private_data;
1225 struct ffs_ep *ep = epfile->ep;
1226 int ret = 0;
1227 struct generic_memory mem;
1228 struct ffs_memory *ffsm = NULL;
1229
1230 ENTER();
1231
1232 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1233 return -ENODEV;
1234
1235 spin_lock_irq(&epfile->ffs->eps_lock);
1236
1237 switch (code) {
1238 case FUNCTIONFS_ENDPOINT_QUEUE_INIT:
1239 ret = kfifo_alloc(&epfile->reqEventFifo, MAX_REQUEST * sizeof(struct UsbFnReqEvent), GFP_KERNEL);
1240 break;
1241 case FUNCTIONFS_ENDPOINT_QUEUE_DEL:
1242 kfifo_free(&epfile->reqEventFifo);
1243 break;
1244 case FUNCTIONFS_ENDPOINT_RELEASE_BUF:
1245 if (copy_from_user(&mem, (void __user *)value, sizeof(mem)))
1246 {
1247 pr_info("copy from user failed\n");
1248 return -EFAULT;
1249 }
1250 ffsm = generic_find_memory_area(epfile, mem.buf, mem.size);
1251 if (ffsm == NULL)
1252 {
1253 return -EFAULT;
1254 }
1255 list_del(&ffsm->memlist);
1256 kfree((void *)ffsm->mem);
1257 kfree(ffsm);
1258 break;
1259 case FUNCTIONFS_ENDPOINT_READ:
1260 case FUNCTIONFS_ENDPOINT_WRITE:
1261 {
1262 struct IoData myIoData;
1263 struct ffs_io_data io_data, *p = &io_data;
1264 ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
1265 if (unlikely(ret)) {
1266 spin_unlock_irq(&epfile->ffs->eps_lock);
1267 return -EFAULT;
1268 }
1269 if (myIoData.aio) {
1270 p = kmalloc(sizeof(io_data), GFP_KERNEL);
1271 if (unlikely(!p)) {
1272 spin_unlock_irq(&epfile->ffs->eps_lock);
1273 return -ENOMEM;
1274 }
1275 } else {
1276 memset(p, 0, sizeof(*p));
1277 }
1278 memcpy(p, &myIoData, sizeof(struct IoData));
1279
1280 spin_unlock_irq(&epfile->ffs->eps_lock);
1281 ret = ffs_epfile_iorw(file, p);
1282 if (ret == -EIOCBQUEUED) {
1283 return 0;
1284 }
1285 if (p->aio)
1286 kfree(p);
1287 return ret;
1288 }
1289 case FUNCTIONFS_ENDPOINT_RW_CANCEL:
1290 {
1291 struct usb_request *req;
1292 struct IoData myIoData;
1293 if (!ep) {
1294 spin_unlock_irq(&epfile->ffs->eps_lock);
1295 return -EFAULT;
1296 }
1297 ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData));
1298 if (unlikely(ret)) {
1299 spin_unlock_irq(&epfile->ffs->eps_lock);
1300 return -EFAULT;
1301 }
1302 ffsm = generic_find_memory_area(epfile, myIoData.buf, myIoData.len);
1303 if (ffsm == NULL)
1304 {
1305 return -EFAULT;
1306 }
1307 list_for_each_entry(req, &epfile->ep->req->list, list) {
1308 if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
1309 usb_ep_dequeue(epfile->ep->ep, req);
1310 spin_unlock_irq(&epfile->ffs->eps_lock);
1311 return 0;
1312 }
1313 }
1314 spin_unlock_irq(&epfile->ffs->eps_lock);
1315 return -EFAULT;
1316 }
1317 case FUNCTIONFS_ENDPOINT_GET_REQ_STATUS:
1318 {
1319 struct usb_request *req;
1320 struct IoData myIoData;
1321 if (!ep) {
1322 spin_unlock_irq(&epfile->ffs->eps_lock);
1323 return -EFAULT;
1324 }
1325 ret = copy_from_user(&myIoData,(void __user *)value, sizeof(struct IoData));
1326 if (unlikely(ret)) {
1327 spin_unlock_irq(&epfile->ffs->eps_lock);
1328 return -EFAULT;
1329 }
1330 ffsm = generic_find_memory_area(epfile, myIoData.buf, myIoData.len);
1331 if (ffsm == NULL)
1332 {
1333 return -EFAULT;
1334 }
1335 list_for_each_entry(req, &epfile->ep->req->list, list) {
1336 if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) {
1337 spin_unlock_irq(&epfile->ffs->eps_lock);
1338 return req->status;
1339 }
1340 }
1341 spin_unlock_irq(&epfile->ffs->eps_lock);
1342 return -EFAULT;
1343 }
1344 case FUNCTIONFS_FIFO_STATUS:
1345 ret = usb_ep_fifo_status(epfile->ep->ep);
1346 break;
1347 case FUNCTIONFS_FIFO_FLUSH:
1348 usb_ep_fifo_flush(epfile->ep->ep);
1349 ret = 0;
1350 break;
1351 case FUNCTIONFS_CLEAR_HALT:
1352 ret = usb_ep_clear_halt(epfile->ep->ep);
1353 break;
1354 case FUNCTIONFS_ENDPOINT_REVMAP:
1355 ret = epfile->ep->num;
1356 break;
1357 case FUNCTIONFS_ENDPOINT_DESC:
1358 {
1359 int desc_idx;
1360 int i;
1361 struct usb_endpoint_descriptor *desc;
1362
1363 switch (epfile->ffs->speed) {
1364 case USB_SPEED_SUPER:
1365 desc_idx = 2;
1366 break;
1367 case USB_SPEED_HIGH:
1368 desc_idx = 1;
1369 break;
1370 default:
1371 desc_idx = 1;
1372 }
1373 for (i = 0; i < epfile->ffs->eps_count; i++) {
1374 if (epfile->ffs->epfiles + i == epfile)
1375 break;
1376 }
1377 ep = epfile->ffs->eps + i;
1378 desc = ep->descs[desc_idx];
1379 spin_unlock_irq(&epfile->ffs->eps_lock);
1380 ret = copy_to_user((void __user *)value, desc, desc->bLength);
1381 if (ret)
1382 ret = -EFAULT;
1383 return ret;
1384 }
1385 default:
1386 ret = -ENOTTY;
1387 }
1388 spin_unlock_irq(&epfile->ffs->eps_lock);
1389
1390 return ret;
1391 }
1392
ffs_epfile_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)1393 static ssize_t ffs_epfile_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
1394 {
1395 int status = 0;
1396 unsigned int copied = 0;
1397 unsigned long flags;
1398 struct ffs_epfile *epfile = file->private_data;
1399 ENTER();
1400 if (kfifo_is_empty(&epfile->reqEventFifo)) {
1401 return 0;
1402 }
1403 spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
1404 status = kfifo_to_user(&epfile->reqEventFifo, buf, count, &copied) == 0 ? copied : -1;
1405 spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
1406
1407 return status;
1408 }
1409
ffs_epfile_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos)1410 static ssize_t ffs_epfile_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos)
1411 {
1412 return count;
1413 }
1414
ffs_epfile_poll(struct file *file, struct poll_table_struct * wait)1415 static unsigned int ffs_epfile_poll(struct file *file, struct poll_table_struct * wait)
1416 {
1417 unsigned int mask = 0;
1418 struct ffs_epfile *epfile = file->private_data;
1419 ENTER();
1420 poll_wait(file, &epfile->wait_que, wait);
1421 if (!kfifo_is_empty(&epfile->reqEventFifo)) {
1422 mask |= POLLIN;
1423 }
1424 return mask;
1425 }
1426
1427 #ifdef CONFIG_COMPAT
ffs_epfile_compat_ioctl(struct file *file, unsigned code, unsigned long value)1428 static long ffs_epfile_compat_ioctl(struct file *file, unsigned code,
1429 unsigned long value)
1430 {
1431 return ffs_epfile_ioctl(file, code, value);
1432 }
1433 #endif
1434
1435 static const struct file_operations ffs_epfile_operations = {
1436 .owner = THIS_MODULE,
1437 .llseek = no_llseek,
1438 .mmap = ffs_epfile_mmap,
1439 .read = ffs_epfile_read,
1440 .write = ffs_epfile_write,
1441 .poll = ffs_epfile_poll,
1442 .open = ffs_epfile_open,
1443 .release = ffs_epfile_release,
1444 .unlocked_ioctl = ffs_epfile_ioctl,
1445 #ifdef CONFIG_COMPAT
1446 .compat_ioctl = ffs_epfile_compat_ioctl,
1447 #endif
1448 };
1449
1450 /* ffs_data and ffs_function construction and destruction code **************/
1451 static void ffs_data_clear(struct ffs_data *ffs);
1452 static void ffs_data_reset(struct ffs_data *ffs);
1453 static dev_t g_dev;
1454 #define MAX_EP_DEV 10
usbfn_ioctl(struct file *file, unsigned int cmd, unsigned long value)1455 static long usbfn_ioctl(struct file *file, unsigned int cmd, unsigned long value)
1456 {
1457 long ret;
1458 ENTER();
1459 switch(cmd)
1460 {
1461 case FUNCTIONFS_NEWFN:
1462 {
1463 struct ffs_dev *ffs_dev;
1464 struct ffs_data *ffs;
1465 struct FuncNew newfn;
1466 char nameEp0[MAX_NAMELEN];
1467 ret = copy_from_user(&newfn, (void __user *)value, sizeof(struct FuncNew ));
1468 if (unlikely(ret)) {
1469 return -EFAULT;
1470 }
1471 ffs = ffs_data_new(newfn.name);
1472 if (unlikely(!ffs)) {
1473 return (-ENOMEM);
1474 }
1475
1476 if (newfn.nameLen > MAX_NAMELEN) {
1477 return -EPERM;
1478 }
1479 memcpy(ffs->dev_name, newfn.name, newfn.nameLen);
1480
1481 if (unlikely(!ffs->dev_name)) {
1482 ffs_data_put(ffs);
1483 return (-ENOMEM);
1484 }
1485
1486 if (sprintf(nameEp0, "%s.ep%u", ffs->dev_name, 0) < 0) {
1487 ffs_data_put(ffs);
1488 return -EFAULT;
1489 }
1490 ffs_dev = ffs_acquire_dev(newfn.name);
1491 if (IS_ERR(ffs_dev)) {
1492 ffs_data_put(ffs);
1493 return (-ENODEV);
1494 }
1495 ffs->private_data = ffs_dev;
1496
1497 ret = alloc_chrdev_region(&g_dev, 0, MAX_EP_DEV, nameEp0);
1498 if (ret < 0) {
1499 ffs_release_dev(ffs);
1500 ffs_data_put(ffs);
1501 return -EBUSY;
1502 }
1503 cdev_init(&ffs->cdev, &ffs_ep0_operations);
1504 ffs->devno = MKDEV(MAJOR(g_dev), 0);
1505 ret = cdev_add(&ffs->cdev, ffs->devno, 1);
1506 if (ret) {
1507 ffs_release_dev(ffs);
1508 ffs_data_put(ffs);
1509 return -EBUSY;
1510 }
1511
1512 ffs->fn_device = device_create(ffs_class, NULL, ffs->devno, NULL, nameEp0);
1513 if (IS_ERR(ffs->fn_device)) {
1514 cdev_del(&ffs->cdev);
1515 ffs_release_dev(ffs);
1516 ffs_data_put(ffs);
1517 return -EBUSY;
1518 }
1519 return 0;
1520 }
1521 case FUNCTIONFS_DELFN:
1522 {
1523 struct FuncNew newfn;
1524 struct ffs_data *ffs;
1525 struct ffs_dev *ffs_dev;
1526 ret = copy_from_user(&newfn, (void __user *)value, sizeof(struct FuncNew ));
1527 if (unlikely(ret)) {
1528 return -EFAULT;
1529 }
1530
1531 ffs_dev = _ffs_find_dev(newfn.name);
1532 if (IS_ERR(ffs_dev)) {
1533 return -EFAULT;
1534 }
1535 ffs = ffs_dev->ffs_data;
1536 device_destroy(ffs_class, ffs->devno);
1537 cdev_del(&ffs->cdev);
1538 unregister_chrdev_region(g_dev, MAX_EP_DEV);
1539 ffs_release_dev(ffs);
1540 ffs_data_clear(ffs);
1541 destroy_workqueue(ffs->io_completion_wq);
1542 kfree(ffs);
1543 return 0;
1544 }
1545 default:
1546 ret = -ENOTTY;
1547 }
1548
1549 return ret;
1550 }
1551
usbfn_open(struct inode *inode, struct file *file)1552 static int usbfn_open(struct inode *inode, struct file *file)
1553 {
1554 return 0;
1555 }
1556
usbfn_release(struct inode *inode, struct file *file)1557 static int usbfn_release(struct inode *inode, struct file *file)
1558 {
1559 return 0;
1560 }
1561
1562 static struct file_operations usbfn_fops = {
1563 .owner = THIS_MODULE,
1564 .unlocked_ioctl = usbfn_ioctl,
1565 .open = usbfn_open,
1566 .release = usbfn_release,
1567 #ifdef CONFIG_COMPAT
1568 .compat_ioctl = usbfn_ioctl,
1569 #endif
1570 };
1571
1572 static struct miscdevice usbfn_misc = {
1573 .minor = MISC_DYNAMIC_MINOR,
1574 .name = "usbfn",
1575 .fops = &usbfn_fops,
1576 };
1577
1578 /* Driver's main init/cleanup functions *************************************/
functionfs_init(void)1579 static int functionfs_init(void)
1580 {
1581 int ret;
1582
1583 ENTER();
1584 ret = misc_register(&usbfn_misc);
1585 if (likely(!ret))
1586 pr_info("file system registered\n");
1587 else
1588 pr_err("failed registering file system (%d)\n", ret);
1589
1590 //ffs_class = class_create(THIS_MODULE, "functionfs");
1591 ffs_class = class_create("functionfs");
1592 if (IS_ERR(ffs_class))
1593 return PTR_ERR(ffs_class);
1594
1595 ffs_class->devnode = ffs_devnode;
1596
1597 return ret;
1598 }
1599
functionfs_cleanup(void)1600 static void functionfs_cleanup(void)
1601 {
1602 ENTER();
1603 class_destroy(ffs_class);
1604 misc_deregister(&usbfn_misc);
1605 }
1606
ffs_data_get(struct ffs_data *ffs)1607 static void ffs_data_get(struct ffs_data *ffs)
1608 {
1609 ENTER();
1610 refcount_inc(&ffs->ref);
1611 }
1612
ffs_data_put(struct ffs_data *ffs)1613 static void ffs_data_put(struct ffs_data *ffs)
1614 {
1615 ENTER();
1616 if (unlikely(refcount_dec_and_test(&ffs->ref))) {
1617 pr_info("%s(): freeing\n", __func__);
1618 ffs_data_clear(ffs);
1619 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1620 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
1621 swait_active(&ffs->ep0req_completion.wait) ||
1622 #else
1623 waitqueue_active(&ffs->ep0req_completion.wait) ||
1624 #endif
1625 waitqueue_active(&ffs->wait) ||
1626 waitqueue_active(&ffs->wait_que));
1627 destroy_workqueue(ffs->io_completion_wq);
1628 kfree(ffs);
1629 }
1630 }
1631
ffs_data_new(const char *dev_name)1632 static struct ffs_data *ffs_data_new(const char *dev_name)
1633 {
1634 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1635 if (unlikely(!ffs))
1636 return NULL;
1637
1638 ENTER();
1639
1640 ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
1641 if (!ffs->io_completion_wq) {
1642 kfree(ffs);
1643 return NULL;
1644 }
1645
1646 refcount_set(&ffs->ref, 1);
1647 atomic_set(&ffs->opened, 0);
1648 ffs->state = FFS_READ_DESCRIPTORS;
1649 mutex_init(&ffs->mutex);
1650 spin_lock_init(&ffs->eps_lock);
1651 spin_lock_init(&ffs->mem_lock);
1652 init_waitqueue_head(&ffs->ev.waitq);
1653 init_waitqueue_head(&ffs->wait);
1654 init_waitqueue_head(&ffs->wait_que);
1655 init_completion(&ffs->ep0req_completion);
1656 INIT_LIST_HEAD(&ffs->memory_list);
1657 ffs->ev.can_stall = 1;
1658
1659 return ffs;
1660 }
1661
ffs_data_clear(struct ffs_data *ffs)1662 static void ffs_data_clear(struct ffs_data *ffs)
1663 {
1664 ENTER();
1665
1666 ffs_closed(ffs);
1667
1668 BUG_ON(ffs->gadget);
1669
1670 if (ffs->epfiles)
1671 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1672
1673 if (ffs->ffs_eventfd)
1674 eventfd_ctx_put(ffs->ffs_eventfd);
1675
1676 kfree(ffs->raw_descs_data);
1677 kfree(ffs->raw_strings);
1678 kfree(ffs->stringtabs);
1679 }
1680
ffs_data_reset(struct ffs_data *ffs)1681 static void ffs_data_reset(struct ffs_data *ffs)
1682 {
1683 ENTER();
1684
1685 ffs_data_clear(ffs);
1686
1687 ffs->epfiles = NULL;
1688 ffs->raw_descs_data = NULL;
1689 ffs->raw_descs = NULL;
1690 ffs->raw_strings = NULL;
1691 ffs->stringtabs = NULL;
1692
1693 ffs->raw_descs_length = 0;
1694 ffs->fs_descs_count = 0;
1695 ffs->hs_descs_count = 0;
1696 ffs->ss_descs_count = 0;
1697
1698 ffs->strings_count = 0;
1699 ffs->interfaces_count = 0;
1700 ffs->eps_count = 0;
1701
1702 ffs->ev.count = 0;
1703
1704 ffs->state = FFS_READ_DESCRIPTORS;
1705 ffs->setup_state = FFS_NO_SETUP;
1706 ffs->flags = 0;
1707 }
1708
functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)1709 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1710 {
1711 struct usb_gadget_strings **lang;
1712 int first_id;
1713
1714 ENTER();
1715
1716 if (WARN_ON(ffs->state != FFS_ACTIVE
1717 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1718 return -EBADFD;
1719
1720 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1721 if (unlikely(first_id < 0))
1722 return first_id;
1723
1724 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1725 if (unlikely(!ffs->ep0req))
1726 return -ENOMEM;
1727 ffs->ep0req->complete = ffs_ep0_complete;
1728 ffs->ep0req->context = ffs;
1729 INIT_LIST_HEAD(&ffs->ep0req->list);
1730
1731 lang = ffs->stringtabs;
1732 if (lang) {
1733 for (; *lang; ++lang) {
1734 struct usb_string *str = (*lang)->strings;
1735 int id = first_id;
1736 for (; str->s; ++id, ++str)
1737 str->id = id;
1738 }
1739 }
1740
1741 ffs->gadget = cdev->gadget;
1742 ffs->speed = cdev->gadget->speed;
1743 ffs_data_get(ffs);
1744 return 0;
1745 }
1746
functionfs_unbind(struct ffs_data *ffs)1747 static void functionfs_unbind(struct ffs_data *ffs)
1748 {
1749 ENTER();
1750
1751 if (!WARN_ON(!ffs->gadget)) {
1752 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1753 ffs->ep0req = NULL;
1754 ffs->gadget = NULL;
1755 clear_bit(FFS_FL_BOUND, &ffs->flags);
1756 ffs_data_put(ffs);
1757 }
1758 }
1759
ffs_epfiles_create(struct ffs_data *ffs)1760 static int ffs_epfiles_create(struct ffs_data *ffs)
1761 {
1762 struct ffs_epfile *epfile = NULL, *epfiles = NULL;
1763 unsigned int i, count ,ret;
1764
1765 ENTER();
1766
1767 count = ffs->eps_count;
1768 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1769 if (!epfiles)
1770 return -ENOMEM;
1771
1772 epfile = epfiles;
1773 for (i = 1; i <= count; ++i, ++epfile) {
1774 epfile->ffs = ffs;
1775 mutex_init(&epfile->mutex);
1776 INIT_LIST_HEAD(&epfile->memory_list);
1777 init_waitqueue_head(&epfile->wait_que);
1778 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) {
1779 if (sprintf(epfile->name, "%s.ep%02x", ffs->dev_name, ffs->eps_addrmap[i]) < 0) {
1780 return -EFAULT;
1781 }
1782 } else {
1783 if (sprintf(epfile->name, "%s.ep%u", ffs->dev_name, i) < 0) {
1784 return -EFAULT;
1785 }
1786 }
1787
1788 cdev_init(&epfile->cdev, &ffs_epfile_operations);
1789 epfile->devno=MKDEV(MAJOR(ffs->devno), i);
1790 ret = cdev_add(&epfile->cdev, epfile->devno, 1);
1791 if (ret)
1792 {
1793 ffs_epfiles_destroy(epfiles, i - 1);
1794 return -EBUSY;
1795 }
1796
1797 epfile->device = device_create(ffs_class, NULL, epfile->devno, NULL, epfile->name);
1798 if (IS_ERR(epfile->device))
1799 {
1800 cdev_del(&epfile->cdev);
1801 ffs_epfiles_destroy(epfiles, i - 1);
1802 return -EBUSY;
1803 }
1804 }
1805
1806 ffs->epfiles = epfiles;
1807 return 0;
1808 }
1809
ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)1810 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1811 {
1812 struct ffs_epfile *epfile = epfiles;
1813
1814 ENTER();
1815
1816 for (; count; --count, ++epfile) {
1817 BUG_ON(mutex_is_locked(&epfile->mutex));
1818 device_destroy(ffs_class, epfile->devno);
1819 cdev_del(&epfile->cdev);
1820 }
1821
1822 kfree(epfiles);
1823 }
1824
ffs_func_eps_disable(struct ffs_function *func)1825 static void ffs_func_eps_disable(struct ffs_function *func)
1826 {
1827 struct ffs_ep *ep = func->eps;
1828 struct ffs_epfile *epfile = func->ffs->epfiles;
1829 unsigned count = func->ffs->eps_count;
1830 unsigned long flags;
1831
1832 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1833 while (count--) {
1834 /* pending requests get nuked */
1835 if (likely(ep->ep))
1836 usb_ep_disable(ep->ep);
1837 ++ep;
1838
1839 if (epfile) {
1840 epfile->ep = NULL;
1841 ++epfile;
1842 }
1843 }
1844 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1845 }
1846
ffs_func_eps_enable(struct ffs_function *func)1847 static int ffs_func_eps_enable(struct ffs_function *func)
1848 {
1849 struct ffs_data *ffs = func->ffs;
1850 struct ffs_ep *ep = func->eps;
1851 struct ffs_epfile *epfile = ffs->epfiles;
1852 unsigned count = ffs->eps_count;
1853 unsigned long flags;
1854 int ret = 0;
1855
1856 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1857 while(count--) {
1858 ep->ep->driver_data = ep;
1859
1860 ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
1861 if (ret) {
1862 pr_err("%s: config_ep_by_speed(%s) returned %d\n",
1863 __func__, ep->ep->name, ret);
1864 break;
1865 }
1866
1867 ret = usb_ep_enable(ep->ep);
1868 if (likely(!ret)) {
1869 epfile->ep = ep;
1870 epfile->in = usb_endpoint_dir_in(ep->ep->desc);
1871 epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
1872 } else {
1873 break;
1874 }
1875
1876 ++ep;
1877 ++epfile;
1878 }
1879
1880 wake_up_interruptible(&ffs->wait);
1881 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1882
1883 return ret;
1884 }
1885
1886 /* Parsing and building descriptors and strings *****************************/
1887
1888 /*
1889 * This validates if data pointed by data is a valid USB descriptor as
1890 * well as record how many interfaces, endpoints and strings are
1891 * required by given configuration. Returns address after the
1892 * descriptor or NULL if data is invalid.
1893 */
1894 enum ffs_entity_type {
1895 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1896 };
1897
1898 enum ffs_os_desc_type {
1899 FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1900 };
1901
1902 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, u8 *valuep,
1903 struct usb_descriptor_header *desc,
1904 void *priv);
1905
1906 typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1907 struct usb_os_desc_header *h, void *data,
1908 unsigned len, void *priv);
1909
ffs_do_single_desc(char *data, unsigned len, ffs_entity_callback entity, void *priv)1910 static int __must_check ffs_do_single_desc(char *data, unsigned len,
1911 ffs_entity_callback entity,
1912 void *priv)
1913 {
1914 struct usb_descriptor_header *_ds = (void *)data;
1915 u8 length;
1916 int ret;
1917
1918 ENTER();
1919
1920 /* At least two bytes are required: length and type */
1921 if (len < 2) {
1922 pr_vdebug("descriptor too short\n");
1923 return -EINVAL;
1924 }
1925
1926 /* If we have at least as many bytes as the descriptor takes? */
1927 length = _ds->bLength;
1928 if (len < length) {
1929 pr_vdebug("descriptor longer then available data\n");
1930 return -EINVAL;
1931 }
1932
1933 #define __entity_check_INTERFACE(val) 1
1934 #define __entity_check_STRING(val) (val)
1935 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1936 #define __entity(type, val) do { \
1937 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1938 if (unlikely(!__entity_check_ ##type(val))) { \
1939 pr_vdebug("invalid entity's value\n"); \
1940 return -EINVAL; \
1941 } \
1942 ret = entity(FFS_ ##type, &val, _ds, priv); \
1943 if (unlikely(ret < 0)) { \
1944 pr_debug("entity " #type "(%02x); ret = %d\n", \
1945 (val), ret); \
1946 return ret; \
1947 } \
1948 } while (0)
1949
1950 /* Parse descriptor depending on type. */
1951 switch (_ds->bDescriptorType) {
1952 case USB_DT_DEVICE:
1953 case USB_DT_CONFIG:
1954 case USB_DT_STRING:
1955 case USB_DT_DEVICE_QUALIFIER:
1956 /* function can't have any of those */
1957 pr_vdebug("descriptor reserved for gadget: %d\n",
1958 _ds->bDescriptorType);
1959 return -EINVAL;
1960
1961 case USB_DT_INTERFACE: {
1962 struct usb_interface_descriptor *ds = (void *)_ds;
1963 pr_vdebug("interface descriptor\n");
1964 if (length != sizeof *ds)
1965 goto inv_length;
1966
1967 __entity(INTERFACE, ds->bInterfaceNumber);
1968 if (ds->iInterface)
1969 __entity(STRING, ds->iInterface);
1970 }
1971 break;
1972
1973 case USB_DT_ENDPOINT: {
1974 struct usb_endpoint_descriptor *ds = (void *)_ds;
1975 pr_vdebug("endpoint descriptor\n");
1976 if (length != USB_DT_ENDPOINT_SIZE &&
1977 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1978 goto inv_length;
1979 __entity(ENDPOINT, ds->bEndpointAddress);
1980 }
1981 break;
1982
1983 case HID_DT_HID:
1984 pr_vdebug("hid descriptor\n");
1985 if (length != sizeof(struct hid_descriptor))
1986 goto inv_length;
1987 break;
1988
1989 case USB_DT_OTG:
1990 if (length != sizeof(struct usb_otg_descriptor))
1991 goto inv_length;
1992 break;
1993
1994 case USB_DT_INTERFACE_ASSOCIATION: {
1995 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1996 pr_vdebug("interface association descriptor\n");
1997 if (length != sizeof *ds)
1998 goto inv_length;
1999 if (ds->iFunction)
2000 __entity(STRING, ds->iFunction);
2001 }
2002 break;
2003
2004 case USB_DT_SS_ENDPOINT_COMP:
2005 pr_vdebug("EP SS companion descriptor\n");
2006 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
2007 goto inv_length;
2008 break;
2009
2010 case USB_DT_OTHER_SPEED_CONFIG:
2011 case USB_DT_INTERFACE_POWER:
2012 case USB_DT_DEBUG:
2013 case USB_DT_SECURITY:
2014 case USB_DT_CS_RADIO_CONTROL:
2015 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
2016 break;
2017 default:
2018 /* We should never be here */
2019 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
2020 break;
2021 inv_length:
2022 pr_vdebug("invalid length: %d (descriptor %d)\n",
2023 _ds->bLength, _ds->bDescriptorType);
2024 return -EINVAL;
2025 }
2026
2027 #undef __entity
2028 #undef __entity_check_DESCRIPTOR
2029 #undef __entity_check_INTERFACE
2030 #undef __entity_check_STRING
2031 #undef __entity_check_ENDPOINT
2032
2033 return length;
2034 }
2035
ffs_do_descs(unsigned count, char *data, unsigned len, ffs_entity_callback entity, void *priv)2036 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
2037 ffs_entity_callback entity, void *priv)
2038 {
2039 const unsigned _len = len;
2040 uintptr_t num = 0;
2041
2042 ENTER();
2043
2044 for (;;) {
2045 int ret;
2046
2047 if (num == count)
2048 data = NULL;
2049
2050 /* Record "descriptor" entity */
2051 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
2052 if (unlikely(ret < 0)) {
2053 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
2054 num, ret);
2055 return ret;
2056 }
2057
2058 if (!data)
2059 return _len - len;
2060
2061 ret = ffs_do_single_desc(data, len, entity, priv);
2062 if (unlikely(ret < 0)) {
2063 pr_debug("%s returns %d\n", __func__, ret);
2064 return ret;
2065 }
2066
2067 len -= ret;
2068 data += ret;
2069 ++num;
2070 }
2071 }
2072
__ffs_data_do_entity(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv)2073 static int __ffs_data_do_entity(enum ffs_entity_type type,
2074 u8 *valuep, struct usb_descriptor_header *desc,
2075 void *priv)
2076 {
2077 struct ffs_desc_helper *helper = priv;
2078 struct usb_endpoint_descriptor *d = NULL;
2079
2080 ENTER();
2081
2082 switch (type) {
2083 case FFS_DESCRIPTOR:
2084 break;
2085
2086 case FFS_INTERFACE:
2087 /*
2088 * Interfaces are indexed from zero so if we
2089 * encountered interface "n" then there are at least
2090 * "n+1" interfaces.
2091 */
2092 if (*valuep >= helper->interfaces_count)
2093 helper->interfaces_count = *valuep + 1;
2094 break;
2095
2096 case FFS_STRING:
2097 /*
2098 * Strings are indexed from 1 (0 is reserved
2099 * for languages list)
2100 */
2101 if (*valuep > helper->ffs->strings_count)
2102 helper->ffs->strings_count = *valuep;
2103 break;
2104
2105 case FFS_ENDPOINT:
2106 d = (void *)desc;
2107 helper->eps_count++;
2108 if (helper->eps_count >= FFS_MAX_EPS_COUNT)
2109 return -EINVAL;
2110 /* Check if descriptors for any speed were already parsed */
2111 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
2112 helper->ffs->eps_addrmap[helper->eps_count] =
2113 d->bEndpointAddress;
2114 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
2115 d->bEndpointAddress)
2116 return -EINVAL;
2117 break;
2118 }
2119
2120 return 0;
2121 }
2122
__ffs_do_os_desc_header(enum ffs_os_desc_type *next_type, struct usb_os_desc_header *desc)2123 static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
2124 struct usb_os_desc_header *desc)
2125 {
2126 u16 bcd_version = le16_to_cpu(desc->bcdVersion);
2127 u16 w_index = le16_to_cpu(desc->wIndex);
2128
2129 if (bcd_version != 1) {
2130 pr_vdebug("unsupported os descriptors version: %d",
2131 bcd_version);
2132 return -EINVAL;
2133 }
2134 switch (w_index) {
2135 case 0x4:
2136 *next_type = FFS_OS_DESC_EXT_COMPAT;
2137 break;
2138 case 0x5:
2139 *next_type = FFS_OS_DESC_EXT_PROP;
2140 break;
2141 default:
2142 pr_vdebug("unsupported os descriptor type: %d", w_index);
2143 return -EINVAL;
2144 }
2145
2146 return sizeof(*desc);
2147 }
2148
2149 /*
2150 * Process all extended compatibility/extended property descriptors
2151 * of a feature descriptor
2152 */
ffs_do_single_os_desc(char *data, unsigned len, enum ffs_os_desc_type type, u16 feature_count, ffs_os_desc_callback entity, void *priv, struct usb_os_desc_header *h)2153 static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
2154 enum ffs_os_desc_type type,
2155 u16 feature_count,
2156 ffs_os_desc_callback entity,
2157 void *priv,
2158 struct usb_os_desc_header *h)
2159 {
2160 int ret;
2161 const unsigned _len = len;
2162
2163 ENTER();
2164
2165 /* loop over all ext compat/ext prop descriptors */
2166 while (feature_count--) {
2167 ret = entity(type, h, data, len, priv);
2168 if (unlikely(ret < 0)) {
2169 pr_debug("bad OS descriptor, type: %d\n", type);
2170 return ret;
2171 }
2172 data += ret;
2173 len -= ret;
2174 }
2175 return _len - len;
2176 }
2177
2178 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
ffs_do_os_descs(unsigned count, char *data, unsigned len, ffs_os_desc_callback entity, void *priv)2179 static int __must_check ffs_do_os_descs(unsigned count,
2180 char *data, unsigned len,
2181 ffs_os_desc_callback entity, void *priv)
2182 {
2183 const unsigned _len = len;
2184 unsigned long num = 0;
2185
2186 ENTER();
2187
2188 for (num = 0; num < count; ++num) {
2189 int ret;
2190 enum ffs_os_desc_type type;
2191 u16 feature_count;
2192 struct usb_os_desc_header *desc = (void *)data;
2193
2194 if (len < sizeof(*desc))
2195 return -EINVAL;
2196
2197 /*
2198 * Record "descriptor" entity.
2199 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2200 * Move the data pointer to the beginning of extended
2201 * compatibilities proper or extended properties proper
2202 * portions of the data
2203 */
2204 if (le32_to_cpu(desc->dwLength) > len)
2205 return -EINVAL;
2206
2207 ret = __ffs_do_os_desc_header(&type, desc);
2208 if (unlikely(ret < 0)) {
2209 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2210 num, ret);
2211 return ret;
2212 }
2213 /*
2214 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2215 */
2216 feature_count = le16_to_cpu(desc->wCount);
2217 if (type == FFS_OS_DESC_EXT_COMPAT &&
2218 (feature_count > 255 || desc->Reserved))
2219 return -EINVAL;
2220 len -= ret;
2221 data += ret;
2222
2223 /*
2224 * Process all function/property descriptors
2225 * of this Feature Descriptor
2226 */
2227 ret = ffs_do_single_os_desc(data, len, type,
2228 feature_count, entity, priv, desc);
2229 if (unlikely(ret < 0)) {
2230 pr_debug("%s returns %d\n", __func__, ret);
2231 return ret;
2232 }
2233
2234 len -= ret;
2235 data += ret;
2236 }
2237 return _len - len;
2238 }
2239
2240 /**
2241 * Validate contents of the buffer from userspace related to OS descriptors.
2242 */
__ffs_data_do_os_desc(enum ffs_os_desc_type type, struct usb_os_desc_header *h, void *data, unsigned len, void *priv)2243 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2244 struct usb_os_desc_header *h, void *data,
2245 unsigned len, void *priv)
2246 {
2247 struct ffs_data *ffs = priv;
2248 u8 length;
2249
2250 ENTER();
2251
2252 switch (type) {
2253 case FFS_OS_DESC_EXT_COMPAT: {
2254 struct usb_ext_compat_desc *d = data;
2255 int i;
2256
2257 if (len < sizeof(*d) ||
2258 d->bFirstInterfaceNumber >= ffs->interfaces_count)
2259 return -EINVAL;
2260 if (d->Reserved1 != 1) {
2261 /*
2262 * According to the spec, Reserved1 must be set to 1
2263 * but older kernels incorrectly rejected non-zero
2264 * values. We fix it here to avoid returning EINVAL
2265 * in response to values we used to accept.
2266 */
2267 pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2268 d->Reserved1 = 1;
2269 }
2270 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2271 if (d->Reserved2[i])
2272 return -EINVAL;
2273
2274 length = sizeof(struct usb_ext_compat_desc);
2275 }
2276 break;
2277 case FFS_OS_DESC_EXT_PROP: {
2278 struct usb_ext_prop_desc *d = data;
2279 u32 type, pdl;
2280 u16 pnl;
2281
2282 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2283 return -EINVAL;
2284 length = le32_to_cpu(d->dwSize);
2285 if (len < length)
2286 return -EINVAL;
2287 type = le32_to_cpu(d->dwPropertyDataType);
2288 if (type < USB_EXT_PROP_UNICODE ||
2289 type > USB_EXT_PROP_UNICODE_MULTI) {
2290 pr_vdebug("unsupported os descriptor property type: %d",
2291 type);
2292 return -EINVAL;
2293 }
2294 pnl = le16_to_cpu(d->wPropertyNameLength);
2295 if (length < 14 + pnl) {
2296 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2297 length, pnl, type);
2298 return -EINVAL;
2299 }
2300 pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
2301 if (length != 14 + pnl + pdl) {
2302 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2303 length, pnl, pdl, type);
2304 return -EINVAL;
2305 }
2306 ++ffs->ms_os_descs_ext_prop_count;
2307 /* property name reported to the host as "WCHAR"s */
2308 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2309 ffs->ms_os_descs_ext_prop_data_len += pdl;
2310 }
2311 break;
2312 default:
2313 pr_vdebug("unknown descriptor: %d\n", type);
2314 return -EINVAL;
2315 }
2316 return length;
2317 }
2318
__ffs_data_got_descs(struct ffs_data *ffs, char *const _data, size_t len)2319 static int __ffs_data_got_descs(struct ffs_data *ffs,
2320 char *const _data, size_t len)
2321 {
2322 char *data = _data, *raw_descs = NULL;
2323 unsigned os_descs_count = 0, counts[3], flags;
2324 int ret = -EINVAL, i;
2325 struct ffs_desc_helper helper;
2326
2327 ENTER();
2328
2329 if (get_unaligned_le32(data + 4) != len)
2330 goto error;
2331
2332 switch (get_unaligned_le32(data)) {
2333 case FUNCTIONFS_DESCRIPTORS_MAGIC:
2334 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2335 data += 8;
2336 len -= 8;
2337 break;
2338 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2339 flags = get_unaligned_le32(data + 8);
2340 ffs->user_flags = flags;
2341 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2342 FUNCTIONFS_HAS_HS_DESC |
2343 FUNCTIONFS_HAS_SS_DESC |
2344 FUNCTIONFS_HAS_MS_OS_DESC |
2345 FUNCTIONFS_VIRTUAL_ADDR |
2346 FUNCTIONFS_EVENTFD |
2347 FUNCTIONFS_ALL_CTRL_RECIP |
2348 FUNCTIONFS_CONFIG0_SETUP)) {
2349 ret = -ENOSYS;
2350 goto error;
2351 }
2352 data += 12;
2353 len -= 12;
2354 break;
2355 default:
2356 goto error;
2357 }
2358
2359 if (flags & FUNCTIONFS_EVENTFD) {
2360 if (len < 4)
2361 goto error;
2362 ffs->ffs_eventfd =
2363 eventfd_ctx_fdget((int)get_unaligned_le32(data));
2364 if (IS_ERR(ffs->ffs_eventfd)) {
2365 ret = PTR_ERR(ffs->ffs_eventfd);
2366 ffs->ffs_eventfd = NULL;
2367 goto error;
2368 }
2369 data += 4;
2370 len -= 4;
2371 }
2372
2373 /* Read fs_count, hs_count and ss_count (if present) */
2374 for (i = 0; i < 3; ++i) {
2375 if (!(flags & (1 << i))) {
2376 counts[i] = 0;
2377 } else if (len < 4) {
2378 goto error;
2379 } else {
2380 counts[i] = get_unaligned_le32(data);
2381 data += 4;
2382 len -= 4;
2383 }
2384 }
2385 if (flags & (1 << i)) {
2386 if (len < 4) {
2387 goto error;
2388 }
2389 os_descs_count = get_unaligned_le32(data);
2390 data += 4;
2391 len -= 4;
2392 }
2393
2394 /* Read descriptors */
2395 raw_descs = data;
2396 helper.ffs = ffs;
2397 for (i = 0; i < 3; ++i) {
2398 if (!counts[i])
2399 continue;
2400 helper.interfaces_count = 0;
2401 helper.eps_count = 0;
2402 ret = ffs_do_descs(counts[i], data, len,
2403 __ffs_data_do_entity, &helper);
2404 if (ret < 0)
2405 goto error;
2406 if (!ffs->eps_count && !ffs->interfaces_count) {
2407 ffs->eps_count = helper.eps_count;
2408 ffs->interfaces_count = helper.interfaces_count;
2409 } else {
2410 if (ffs->eps_count != helper.eps_count) {
2411 ret = -EINVAL;
2412 goto error;
2413 }
2414 if (ffs->interfaces_count != helper.interfaces_count) {
2415 ret = -EINVAL;
2416 goto error;
2417 }
2418 }
2419 data += ret;
2420 len -= ret;
2421 }
2422 if (os_descs_count) {
2423 ret = ffs_do_os_descs(os_descs_count, data, len,
2424 __ffs_data_do_os_desc, ffs);
2425 if (ret < 0)
2426 goto error;
2427 data += ret;
2428 len -= ret;
2429 }
2430
2431 if (raw_descs == data || len) {
2432 ret = -EINVAL;
2433 goto error;
2434 }
2435
2436 ffs->raw_descs_data = _data;
2437 ffs->raw_descs = raw_descs;
2438 ffs->raw_descs_length = data - raw_descs;
2439 ffs->fs_descs_count = counts[0];
2440 ffs->hs_descs_count = counts[1];
2441 ffs->ss_descs_count = counts[2];
2442 ffs->ms_os_descs_count = os_descs_count;
2443
2444 return 0;
2445
2446 error:
2447 kfree(_data);
2448 return ret;
2449 }
2450
__ffs_data_got_strings(struct ffs_data *ffs, char *const _data, size_t len)2451 static int __ffs_data_got_strings(struct ffs_data *ffs,
2452 char *const _data, size_t len)
2453 {
2454 u32 str_count, needed_count, lang_count;
2455 struct usb_gadget_strings **stringtabs = NULL, *t = NULL;
2456 const char *data = _data;
2457 struct usb_string *s = NULL;
2458
2459 ENTER();
2460
2461 if (unlikely(len < 16 ||
2462 get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2463 get_unaligned_le32(data + 4) != len))
2464 goto error;
2465 str_count = get_unaligned_le32(data + 8);
2466 lang_count = get_unaligned_le32(data + 12);
2467
2468 /* if one is zero the other must be zero */
2469 if (unlikely(!str_count != !lang_count))
2470 goto error;
2471
2472 /* Do we have at least as many strings as descriptors need? */
2473 needed_count = ffs->strings_count;
2474 if (unlikely(str_count < needed_count))
2475 goto error;
2476
2477 /*
2478 * If we don't need any strings just return and free all
2479 * memory.
2480 */
2481 if (!needed_count) {
2482 kfree(_data);
2483 return 0;
2484 }
2485
2486 /* Allocate everything in one chunk so there's less maintenance. */
2487 {
2488 unsigned i = 0;
2489 vla_group(d);
2490 vla_item(d, struct usb_gadget_strings *, stringtabs,
2491 lang_count + 1);
2492 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2493 vla_item(d, struct usb_string, strings,
2494 lang_count*(needed_count+1));
2495
2496 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2497
2498 if (unlikely(!vlabuf)) {
2499 kfree(_data);
2500 return -ENOMEM;
2501 }
2502
2503 /* Initialize the VLA pointers */
2504 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2505 t = vla_ptr(vlabuf, d, stringtab);
2506 i = lang_count;
2507 do {
2508 *stringtabs++ = t++;
2509 } while (--i);
2510 *stringtabs = NULL;
2511
2512 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2513 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2514 t = vla_ptr(vlabuf, d, stringtab);
2515 s = vla_ptr(vlabuf, d, strings);
2516 }
2517
2518 /* For each language */
2519 data += 16;
2520 len -= 16;
2521
2522 do { /* lang_count > 0 so we can use do-while */
2523 unsigned needed = needed_count;
2524
2525 if (unlikely(len < 3))
2526 goto error_free;
2527 t->language = get_unaligned_le16(data);
2528 t->strings = s;
2529 ++t;
2530
2531 data += 2;
2532 len -= 2;
2533
2534 /* For each string */
2535 do { /* str_count > 0 so we can use do-while */
2536 size_t length = strnlen(data, len);
2537
2538 if (unlikely(length == len))
2539 goto error_free;
2540
2541 /*
2542 * User may provide more strings then we need,
2543 * if that's the case we simply ignore the
2544 * rest
2545 */
2546 if (likely(needed)) {
2547 /*
2548 * s->id will be set while adding
2549 * function to configuration so for
2550 * now just leave garbage here.
2551 */
2552 s->s = data;
2553 --needed;
2554 ++s;
2555 }
2556
2557 data += length + 1;
2558 len -= length + 1;
2559 } while (--str_count);
2560
2561 s->id = 0; /* terminator */
2562 s->s = NULL;
2563 ++s;
2564
2565 } while (--lang_count);
2566
2567 /* Some garbage left? */
2568 if (unlikely(len))
2569 goto error_free;
2570
2571 /* Done! */
2572 ffs->stringtabs = stringtabs;
2573 ffs->raw_strings = _data;
2574
2575 return 0;
2576
2577 error_free:
2578 kfree(stringtabs);
2579 error:
2580 kfree(_data);
2581 return -EINVAL;
2582 }
2583
2584 /* Events handling and management *******************************************/
__ffs_event_add(struct ffs_data *ffs, enum usb_functionfs_event_type type)2585 static void __ffs_event_add(struct ffs_data *ffs,
2586 enum usb_functionfs_event_type type)
2587 {
2588 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2589 int neg = 0;
2590
2591 /*
2592 * Abort any unhandled setup
2593 *
2594 * We do not need to worry about some cmpxchg() changing value
2595 * of ffs->setup_state without holding the lock because when
2596 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2597 * the source does nothing.
2598 */
2599 if (ffs->setup_state == FFS_SETUP_PENDING)
2600 ffs->setup_state = FFS_SETUP_CANCELLED;
2601
2602 /*
2603 * Logic of this function guarantees that there are at most four pending
2604 * evens on ffs->ev.types queue. This is important because the queue
2605 * has space for four elements only and __ffs_ep0_read_events function
2606 * depends on that limit as well. If more event types are added, those
2607 * limits have to be revisited or guaranteed to still hold.
2608 */
2609 switch (type) {
2610 case FUNCTIONFS_RESUME:
2611 rem_type2 = FUNCTIONFS_SUSPEND;
2612 /* FALL THROUGH */
2613 case FUNCTIONFS_SUSPEND:
2614 case FUNCTIONFS_SETUP:
2615 rem_type1 = type;
2616 /* Discard all similar events */
2617 break;
2618
2619 case FUNCTIONFS_BIND:
2620 case FUNCTIONFS_UNBIND:
2621 case FUNCTIONFS_DISABLE:
2622 case FUNCTIONFS_ENABLE:
2623 /* Discard everything other then power management. */
2624 rem_type1 = FUNCTIONFS_SUSPEND;
2625 rem_type2 = FUNCTIONFS_RESUME;
2626 neg = 1;
2627 break;
2628
2629 default:
2630 WARN(1, "%d: unknown event, this should not happen\n", type);
2631 return;
2632 }
2633
2634 {
2635 u8 *ev = ffs->ev.types, *out = ev;
2636 unsigned n = ffs->ev.count;
2637 for (; n; --n, ++ev)
2638 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2639 *out++ = *ev;
2640 else
2641 pr_vdebug("purging event %d\n", *ev);
2642 ffs->ev.count = out - ffs->ev.types;
2643 }
2644
2645 pr_vdebug("adding event %d\n", type);
2646 ffs->ev.types[ffs->ev.count++] = type;
2647 wake_up_locked(&ffs->ev.waitq);
2648 if (ffs->ffs_eventfd)
2649 eventfd_signal(ffs->ffs_eventfd, 1);
2650 }
2651
ffs_event_add(struct ffs_data *ffs, enum usb_functionfs_event_type type)2652 static void ffs_event_add(struct ffs_data *ffs,
2653 enum usb_functionfs_event_type type)
2654 {
2655 unsigned long flags;
2656 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2657 __ffs_event_add(ffs, type);
2658 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2659 }
2660
2661 /* Bind/unbind USB function hooks *******************************************/
2662
ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)2663 static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2664 {
2665 int i;
2666
2667 for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2668 if (ffs->eps_addrmap[i] == endpoint_address)
2669 return i;
2670 return -ENOENT;
2671 }
2672
__ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv)2673 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2674 struct usb_descriptor_header *desc,
2675 void *priv)
2676 {
2677 struct usb_endpoint_descriptor *ds = (void *)desc;
2678 struct ffs_function *func = priv;
2679 struct ffs_ep *ffs_ep = NULL;
2680 unsigned ep_desc_id;
2681 int idx;
2682 static const char *speed_names[] = { "full", "high", "super" };
2683
2684 if (type != FFS_DESCRIPTOR)
2685 return 0;
2686
2687 /*
2688 * If ss_descriptors is not NULL, we are reading super speed
2689 * descriptors; if hs_descriptors is not NULL, we are reading high
2690 * speed descriptors; otherwise, we are reading full speed
2691 * descriptors.
2692 */
2693 if (func->function.ss_descriptors) {
2694 ep_desc_id = 2;
2695 func->function.ss_descriptors[(uintptr_t)valuep] = desc;
2696 } else if (func->function.hs_descriptors) {
2697 ep_desc_id = 1;
2698 func->function.hs_descriptors[(uintptr_t)valuep] = desc;
2699 } else {
2700 ep_desc_id = 0;
2701 func->function.fs_descriptors[(uintptr_t)valuep] = desc;
2702 }
2703
2704 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2705 return 0;
2706
2707 idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2708 if (idx < 0)
2709 return idx;
2710
2711 ffs_ep = func->eps + idx;
2712
2713 if (unlikely(ffs_ep->descs[ep_desc_id])) {
2714 pr_err("two %sspeed descriptors for EP %d\n",
2715 speed_names[ep_desc_id],
2716 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2717 return -EINVAL;
2718 }
2719 ffs_ep->descs[ep_desc_id] = ds;
2720
2721 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2722 if (ffs_ep->ep) {
2723 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2724 if (!ds->wMaxPacketSize)
2725 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2726 } else {
2727 struct usb_request *req = NULL;
2728 struct usb_ep *ep = NULL;
2729 u8 bEndpointAddress;
2730
2731 /*
2732 * We back up bEndpointAddress because autoconfig overwrites
2733 * it with physical endpoint address.
2734 */
2735 bEndpointAddress = ds->bEndpointAddress;
2736 pr_vdebug("autoconfig\n");
2737 ep = usb_ep_autoconfig(func->gadget, ds);
2738 if (unlikely(!ep))
2739 return -ENOTSUPP;
2740 ep->driver_data = func->eps + idx;
2741
2742 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2743 if (unlikely(!req))
2744 return -ENOMEM;
2745
2746 ffs_ep->ep = ep;
2747 ffs_ep->req = req;
2748 INIT_LIST_HEAD(&ffs_ep->req->list);
2749 func->eps_revmap[ds->bEndpointAddress &
2750 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2751 /*
2752 * If we use virtual address mapping, we restore
2753 * original bEndpointAddress value.
2754 */
2755 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2756 ds->bEndpointAddress = bEndpointAddress;
2757 }
2758 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2759
2760 return 0;
2761 }
2762
__ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv)2763 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2764 struct usb_descriptor_header *desc,
2765 void *priv)
2766 {
2767 struct ffs_function *func = priv;
2768 unsigned idx;
2769 u8 newValue;
2770
2771 switch (type) {
2772 default:
2773 case FFS_DESCRIPTOR:
2774 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2775 return 0;
2776
2777 case FFS_INTERFACE:
2778 idx = *valuep;
2779 if (func->interfaces_nums[idx] < 0) {
2780 int id = usb_interface_id(func->conf, &func->function);
2781 if (unlikely(id < 0))
2782 return id;
2783 func->interfaces_nums[idx] = id;
2784 }
2785 newValue = func->interfaces_nums[idx];
2786 break;
2787
2788 case FFS_STRING:
2789 /* String' IDs are allocated when fsf_data is bound to cdev */
2790 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2791 break;
2792
2793 case FFS_ENDPOINT:
2794 /*
2795 * USB_DT_ENDPOINT are handled in
2796 * __ffs_func_bind_do_descs().
2797 */
2798 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2799 return 0;
2800
2801 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2802 if (unlikely(!func->eps[idx].ep))
2803 return -EINVAL;
2804
2805 {
2806 struct usb_endpoint_descriptor **descs;
2807 descs = func->eps[idx].descs;
2808 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2809 }
2810 break;
2811 }
2812
2813 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2814 *valuep = newValue;
2815 return 0;
2816 }
2817
__ffs_func_bind_do_os_desc(enum ffs_os_desc_type type, struct usb_os_desc_header *h, void *data, unsigned len, void *priv)2818 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2819 struct usb_os_desc_header *h, void *data,
2820 unsigned len, void *priv)
2821 {
2822 struct ffs_function *func = priv;
2823 u8 length = 0;
2824
2825 switch (type) {
2826 case FFS_OS_DESC_EXT_COMPAT: {
2827 struct usb_ext_compat_desc *desc = data;
2828 struct usb_os_desc_table *t;
2829
2830 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2831 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2832 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2833 ARRAY_SIZE(desc->CompatibleID) + ARRAY_SIZE(desc->SubCompatibleID));
2834 length = sizeof(*desc);
2835 }
2836 break;
2837 case FFS_OS_DESC_EXT_PROP: {
2838 struct usb_ext_prop_desc *desc = data;
2839 struct usb_os_desc_table *t;
2840 struct usb_os_desc_ext_prop *ext_prop;
2841 char *ext_prop_name;
2842 char *ext_prop_data;
2843
2844 t = &func->function.os_desc_table[h->interface];
2845 t->if_id = func->interfaces_nums[h->interface];
2846
2847 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2848 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2849
2850 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2851 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2852 ext_prop->data_len = le32_to_cpu(*(__le32 *)
2853 usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2854 length = ext_prop->name_len + ext_prop->data_len + 14;
2855
2856 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2857 func->ffs->ms_os_descs_ext_prop_name_avail +=
2858 ext_prop->name_len;
2859
2860 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2861 func->ffs->ms_os_descs_ext_prop_data_avail +=
2862 ext_prop->data_len;
2863 memcpy(ext_prop_data, usb_ext_prop_data_ptr(data, ext_prop->name_len),
2864 ext_prop->data_len);
2865 /* unicode data reported to the host as "WCHAR"s */
2866 switch (ext_prop->type) {
2867 case USB_EXT_PROP_UNICODE:
2868 case USB_EXT_PROP_UNICODE_ENV:
2869 case USB_EXT_PROP_UNICODE_LINK:
2870 case USB_EXT_PROP_UNICODE_MULTI:
2871 ext_prop->data_len *= 2;
2872 break;
2873 }
2874 ext_prop->data = ext_prop_data;
2875
2876 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2877 ext_prop->name_len);
2878 /* property name reported to the host as "WCHAR"s */
2879 ext_prop->name_len *= 2;
2880 ext_prop->name = ext_prop_name;
2881
2882 t->os_desc->ext_prop_len +=
2883 ext_prop->name_len + ext_prop->data_len + 14;
2884 ++t->os_desc->ext_prop_count;
2885 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2886 }
2887 break;
2888 default:
2889 pr_vdebug("unknown descriptor: %d\n", type);
2890 }
2891
2892 return length;
2893 }
2894
ffs_do_functionfs_bind(struct usb_function *f, struct usb_configuration *c)2895 static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2896 struct usb_configuration *c)
2897 {
2898 struct ffs_function *func = ffs_func_from_usb(f);
2899 struct f_fs_opts *ffs_opts =
2900 container_of(f->fi, struct f_fs_opts, func_inst);
2901 int ret;
2902
2903 ENTER();
2904
2905 /*
2906 * Legacy gadget triggers binding in functionfs_ready_callback,
2907 * which already uses locking; taking the same lock here would
2908 * cause a deadlock.
2909 *
2910 * Configfs-enabled gadgets however do need ffs_dev_lock.
2911 */
2912 if (!ffs_opts->no_configfs)
2913 ffs_dev_lock();
2914 ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2915 func->ffs = ffs_opts->dev->ffs_data;
2916 if (!ffs_opts->no_configfs)
2917 ffs_dev_unlock();
2918 if (ret)
2919 return ERR_PTR(ret);
2920
2921 func->conf = c;
2922 func->gadget = c->cdev->gadget;
2923
2924 /*
2925 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2926 * configurations are bound in sequence with list_for_each_entry,
2927 * in each configuration its functions are bound in sequence
2928 * with list_for_each_entry, so we assume no race condition
2929 * with regard to ffs_opts->bound access
2930 */
2931 if (!ffs_opts->refcnt) {
2932 ret = functionfs_bind(func->ffs, c->cdev);
2933 if (ret)
2934 return ERR_PTR(ret);
2935 }
2936 ffs_opts->refcnt++;
2937 func->function.strings = func->ffs->stringtabs;
2938
2939 return ffs_opts;
2940 }
2941
_ffs_func_bind(struct usb_configuration *c, struct usb_function *f)2942 static int _ffs_func_bind(struct usb_configuration *c, struct usb_function *f)
2943 {
2944 struct ffs_function *func = ffs_func_from_usb(f);
2945 struct ffs_data *ffs = func->ffs;
2946
2947 const int full = !!func->ffs->fs_descs_count;
2948 const int high = !!func->ffs->hs_descs_count;
2949 const int super = !!func->ffs->ss_descs_count;
2950
2951 int fs_len, hs_len, ss_len, ret, i;
2952 struct ffs_ep *eps_ptr = NULL;
2953 struct usb_descriptor_header *des_head = NULL;
2954 struct usb_interface_descriptor *intf_ctl = NULL;
2955 struct usb_interface_descriptor *intf_data = NULL;
2956 /* Make it a single chunk, less management later on */
2957 vla_group(d);
2958 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2959 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2960 full ? ffs->fs_descs_count + 1 : 0);
2961 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2962 high ? ffs->hs_descs_count + 1 : 0);
2963 vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2964 super ? ffs->ss_descs_count + 1 : 0);
2965 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2966 vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2967 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2968 vla_item_with_sz(d, char[16], ext_compat,
2969 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2970 vla_item_with_sz(d, struct usb_os_desc, os_desc,
2971 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2972 vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2973 ffs->ms_os_descs_ext_prop_count);
2974 vla_item_with_sz(d, char, ext_prop_name,
2975 ffs->ms_os_descs_ext_prop_name_len);
2976 vla_item_with_sz(d, char, ext_prop_data,
2977 ffs->ms_os_descs_ext_prop_data_len);
2978 vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
2979 char *vlabuf = NULL;
2980
2981 ENTER();
2982
2983 /* Has descriptors only for speeds gadget does not support */
2984 if (unlikely(!(full | high | super)))
2985 return -ENOTSUPP;
2986
2987 /* Allocate a single chunk, less management later on */
2988 vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
2989 if (unlikely(!vlabuf))
2990 return -ENOMEM;
2991
2992 ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2993 ffs->ms_os_descs_ext_prop_name_avail =
2994 vla_ptr(vlabuf, d, ext_prop_name);
2995 ffs->ms_os_descs_ext_prop_data_avail =
2996 vla_ptr(vlabuf, d, ext_prop_data);
2997
2998 /* Copy descriptors */
2999 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs, ffs->raw_descs_length);
3000
3001 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
3002
3003 eps_ptr = vla_ptr(vlabuf, d, eps);
3004 for (i = 0; i < ffs->eps_count; i++)
3005 eps_ptr[i].num = -1;
3006
3007 /* Save pointers
3008 * d_eps == vlabuf, func->eps used to kfree vlabuf later
3009 */
3010 func->eps = vla_ptr(vlabuf, d, eps);
3011 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
3012
3013 /*
3014 * Go through all the endpoint descriptors and allocate
3015 * endpoints first, so that later we can rewrite the endpoint
3016 * numbers without worrying that it may be described later on.
3017 */
3018 if (likely(full)) {
3019 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
3020 fs_len = ffs_do_descs(ffs->fs_descs_count,
3021 vla_ptr(vlabuf, d, raw_descs),
3022 d_raw_descs__sz,
3023 __ffs_func_bind_do_descs, func);
3024 if (unlikely(fs_len < 0)) {
3025 ret = fs_len;
3026 goto error;
3027 }
3028 } else {
3029 fs_len = 0;
3030 }
3031 if (likely(high)) {
3032 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
3033 hs_len = ffs_do_descs(ffs->hs_descs_count,
3034 vla_ptr(vlabuf, d, raw_descs) + fs_len,
3035 d_raw_descs__sz - fs_len,
3036 __ffs_func_bind_do_descs, func);
3037 if (unlikely(hs_len < 0)) {
3038 ret = hs_len;
3039 goto error;
3040 }
3041 } else {
3042 hs_len = 0;
3043 }
3044 if (likely(super)) {
3045 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
3046 ss_len = ffs_do_descs(ffs->ss_descs_count,
3047 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
3048 d_raw_descs__sz - fs_len - hs_len,
3049 __ffs_func_bind_do_descs, func);
3050 if (unlikely(ss_len < 0)) {
3051 ret = ss_len;
3052 goto error;
3053 }
3054 } else {
3055 ss_len = 0;
3056 }
3057 /*
3058 * Now handle interface numbers allocation and interface and
3059 * endpoint numbers rewriting. We can do that in one go
3060 * now.
3061 */
3062 ret = ffs_do_descs(ffs->fs_descs_count +
3063 (high ? ffs->hs_descs_count : 0) +
3064 (super ? ffs->ss_descs_count : 0),
3065 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
3066 __ffs_func_bind_do_nums, func);
3067 if (unlikely(ret < 0))
3068 goto error;
3069
3070 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
3071 if (c->cdev->use_os_string) {
3072 for (i = 0; i < ffs->interfaces_count; ++i) {
3073 struct usb_os_desc *desc;
3074
3075 desc = func->function.os_desc_table[i].os_desc =
3076 vla_ptr(vlabuf, d, os_desc) +
3077 i * sizeof(struct usb_os_desc);
3078 desc->ext_compat_id =
3079 vla_ptr(vlabuf, d, ext_compat) + i * 16;
3080 INIT_LIST_HEAD(&desc->ext_prop);
3081 }
3082 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
3083 vla_ptr(vlabuf, d, raw_descs) +
3084 fs_len + hs_len + ss_len,
3085 d_raw_descs__sz - fs_len - hs_len -
3086 ss_len,
3087 __ffs_func_bind_do_os_desc, func);
3088 if (unlikely(ret < 0))
3089 goto error;
3090 }
3091 func->function.os_desc_n =
3092 c->cdev->use_os_string ? ffs->interfaces_count : 0;
3093
3094 for (i = 0; i< func->ffs->fs_descs_count; i++) {
3095 des_head = func->function.fs_descriptors[i];
3096 if (des_head->bDescriptorType == USB_DT_INTERFACE) {
3097 struct usb_interface_descriptor *intf = (struct usb_interface_descriptor *)des_head;
3098 if (intf->bNumEndpoints > 0) {
3099 if (intf_ctl == NULL) {
3100 intf_ctl = intf;
3101 } else {
3102 intf_data = intf;
3103 break;
3104 }
3105 }
3106 }
3107 }
3108 for (i = 0; i< func->ffs->fs_descs_count; i++) {
3109 des_head = func->function.fs_descriptors[i];
3110 if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) {
3111 struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head;
3112 a_dec->bFirstInterface = intf_ctl->bInterfaceNumber;
3113 } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) {
3114 struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head;
3115 if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) {
3116 struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head;
3117 mgmt_des->bDataInterface = intf_data->bInterfaceNumber;
3118 } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) {
3119 struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head;
3120 union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber;
3121 union_des->bSlaveInterface0 = intf_data->bInterfaceNumber;
3122 } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) {
3123 struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head;
3124 ether_des->iMACAddress = intf_ctl->iInterface + 1;
3125 }
3126 }
3127 }
3128 for (i = 0; i< func->ffs->hs_descs_count; i++) {
3129 des_head = func->function.hs_descriptors[i];
3130 if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) {
3131 struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head;
3132 a_dec->bFirstInterface = intf_ctl->bInterfaceNumber;
3133 } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) {
3134 struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head;
3135 if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) {
3136 struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head;
3137 mgmt_des->bDataInterface = intf_data->bInterfaceNumber;
3138 } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) {
3139 struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head;
3140 union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber;
3141 union_des->bSlaveInterface0 = intf_data->bInterfaceNumber;
3142 } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) {
3143 struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head;
3144 ether_des->iMACAddress = intf_ctl->iInterface + 1;
3145 }
3146 }
3147 }
3148 for (i = 0; i< func->ffs->ss_descs_count; i++) {
3149 des_head = func->function.ss_descriptors[i];
3150 if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) {
3151 struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head;
3152 a_dec->bFirstInterface = intf_ctl->bInterfaceNumber;
3153 } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) {
3154 struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head;
3155 if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) {
3156 struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head;
3157 mgmt_des->bDataInterface = intf_data->bInterfaceNumber;
3158 } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) {
3159 struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head;
3160 union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber;
3161 union_des->bSlaveInterface0 = intf_data->bInterfaceNumber;
3162 } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) {
3163 struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head;
3164 ether_des->iMACAddress = intf_ctl->iInterface + 1;
3165 }
3166 }
3167 }
3168 /* And we're done */
3169 ffs->eps = func->eps;
3170 ffs_event_add(ffs, FUNCTIONFS_BIND);
3171 return 0;
3172
3173 error:
3174 /* XXX Do we need to release all claimed endpoints here? */
3175 return ret;
3176 }
3177
ffs_func_bind(struct usb_configuration *c, struct usb_function *f)3178 static int ffs_func_bind(struct usb_configuration *c, struct usb_function *f)
3179 {
3180 struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
3181 struct ffs_function *func = ffs_func_from_usb(f);
3182 int ret;
3183
3184 if (IS_ERR(ffs_opts))
3185 return PTR_ERR(ffs_opts);
3186
3187 ret = _ffs_func_bind(c, f);
3188 if (ret && !--ffs_opts->refcnt)
3189 functionfs_unbind(func->ffs);
3190
3191 return ret;
3192 }
3193
3194 /* Other USB function hooks *************************************************/
ffs_reset_work(struct work_struct *work)3195 static void ffs_reset_work(struct work_struct *work)
3196 {
3197 struct ffs_data *ffs = container_of(work,
3198 struct ffs_data, reset_work);
3199 ffs_data_reset(ffs);
3200 }
3201
ffs_func_set_alt(struct usb_function *f, unsigned interface, unsigned alt)3202 static int ffs_func_set_alt(struct usb_function *f,
3203 unsigned interface, unsigned alt)
3204 {
3205 struct ffs_function *func = ffs_func_from_usb(f);
3206 struct ffs_data *ffs = func->ffs;
3207 int ret = 0, intf;
3208
3209 if (alt != (unsigned)-1) {
3210 intf = ffs_func_revmap_intf(func, interface);
3211 if (unlikely(intf < 0))
3212 return intf;
3213 }
3214
3215 if (ffs->func)
3216 ffs_func_eps_disable(ffs->func);
3217
3218 if (ffs->state == FFS_DEACTIVATED) {
3219 ffs->state = FFS_CLOSING;
3220 INIT_WORK(&ffs->reset_work, ffs_reset_work);
3221 schedule_work(&ffs->reset_work);
3222 return -ENODEV;
3223 }
3224
3225 if (ffs->state != FFS_ACTIVE)
3226 return -ENODEV;
3227
3228 if (alt == (unsigned)-1) {
3229 ffs->func = NULL;
3230 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
3231 return 0;
3232 }
3233
3234 ffs->func = func;
3235 ret = ffs_func_eps_enable(func);
3236 if (likely(ret >= 0))
3237 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
3238 return ret;
3239 }
3240
ffs_func_disable(struct usb_function *f)3241 static void ffs_func_disable(struct usb_function *f)
3242 {
3243 ffs_func_set_alt(f, 0, (unsigned)-1);
3244 }
3245
ffs_func_setup(struct usb_function *f, const struct usb_ctrlrequest *creq)3246 static int ffs_func_setup(struct usb_function *f, const struct usb_ctrlrequest *creq)
3247 {
3248 struct ffs_function *func = ffs_func_from_usb(f);
3249 struct ffs_data *ffs = func->ffs;
3250 unsigned long flags;
3251 int ret;
3252
3253 ENTER();
3254
3255 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
3256 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
3257 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
3258 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
3259 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
3260
3261 /*
3262 * Most requests directed to interface go through here
3263 * (notable exceptions are set/get interface) so we need to
3264 * handle them. All other either handled by composite or
3265 * passed to usb_configuration->setup() (if one is set). No
3266 * matter, we will handle requests directed to endpoint here
3267 * as well (as it's straightforward). Other request recipient
3268 * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
3269 * is being used.
3270 */
3271 if (ffs->state != FFS_ACTIVE)
3272 return -ENODEV;
3273
3274 switch (creq->bRequestType & USB_RECIP_MASK) {
3275 case USB_RECIP_INTERFACE:
3276 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
3277 if (unlikely(ret < 0))
3278 return ret;
3279 break;
3280
3281 case USB_RECIP_ENDPOINT:
3282 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3283 if (unlikely(ret < 0))
3284 return ret;
3285 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3286 ret = func->ffs->eps_addrmap[ret];
3287 break;
3288
3289 default:
3290 if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
3291 ret = le16_to_cpu(creq->wIndex);
3292 else
3293 return -EOPNOTSUPP;
3294 }
3295
3296 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3297 ffs->ev.setup = *creq;
3298 ffs->ev.setup.wIndex = cpu_to_le16(ret);
3299 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3300 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3301
3302 return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3303 }
3304
ffs_func_req_match(struct usb_function *f, const struct usb_ctrlrequest *creq, bool config0)3305 static bool ffs_func_req_match(struct usb_function *f,
3306 const struct usb_ctrlrequest *creq,
3307 bool config0)
3308 {
3309 struct ffs_function *func = ffs_func_from_usb(f);
3310
3311 if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
3312 return false;
3313
3314 switch (creq->bRequestType & USB_RECIP_MASK) {
3315 case USB_RECIP_INTERFACE:
3316 return (ffs_func_revmap_intf(func,
3317 le16_to_cpu(creq->wIndex)) >= 0);
3318 case USB_RECIP_ENDPOINT:
3319 return (ffs_func_revmap_ep(func,
3320 le16_to_cpu(creq->wIndex)) >= 0);
3321 default:
3322 return (bool) (func->ffs->user_flags &
3323 FUNCTIONFS_ALL_CTRL_RECIP);
3324 }
3325 }
3326
ffs_func_suspend(struct usb_function *f)3327 static void ffs_func_suspend(struct usb_function *f)
3328 {
3329 ENTER();
3330 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
3331 }
3332
ffs_func_resume(struct usb_function *f)3333 static void ffs_func_resume(struct usb_function *f)
3334 {
3335 ENTER();
3336 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3337 }
3338
3339 /* Endpoint and interface numbers reverse mapping ***************************/
ffs_func_revmap_ep(struct ffs_function *func, u8 num)3340 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3341 {
3342 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3343 return num ? num : -EDOM;
3344 }
3345
ffs_func_revmap_intf(struct ffs_function *func, u8 intf)3346 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3347 {
3348 short *nums = func->interfaces_nums;
3349 unsigned count = func->ffs->interfaces_count;
3350
3351 for (; count; --count, ++nums) {
3352 if (*nums >= 0 && *nums == intf)
3353 return nums - func->interfaces_nums;
3354 }
3355
3356 return -EDOM;
3357 }
3358
3359 /* Devices management *******************************************************/
3360 static LIST_HEAD(ffs_devices);
3361
_ffs_do_find_dev(const char *name)3362 static struct ffs_dev *_ffs_do_find_dev(const char *name)
3363 {
3364 struct ffs_dev *dev = NULL;
3365
3366 if (!name)
3367 return NULL;
3368
3369 list_for_each_entry(dev, &ffs_devices, entry) {
3370 if (!dev->name)
3371 return NULL;
3372 if (strcmp(dev->name, name) == 0)
3373 return dev;
3374 }
3375
3376 return NULL;
3377 }
3378
3379 /*
3380 * ffs_lock must be taken by the caller of this function
3381 */
_ffs_get_single_dev(void)3382 static struct ffs_dev *_ffs_get_single_dev(void)
3383 {
3384 struct ffs_dev *dev = NULL;
3385
3386 if (list_is_singular(&ffs_devices)) {
3387 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3388 if (dev->single)
3389 return dev;
3390 }
3391
3392 return NULL;
3393 }
3394
3395 /*
3396 * ffs_lock must be taken by the caller of this function
3397 */
_ffs_find_dev(const char *name)3398 static struct ffs_dev *_ffs_find_dev(const char *name)
3399 {
3400 struct ffs_dev *dev;
3401
3402 dev = _ffs_get_single_dev();
3403 if (dev)
3404 return dev;
3405
3406 return _ffs_do_find_dev(name);
3407 }
3408
3409 /* Configfs support *********************************************************/
to_ffs_opts(struct config_item *item)3410 static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3411 {
3412 return container_of(to_config_group(item), struct f_fs_opts,
3413 func_inst.group);
3414 }
3415
ffs_attr_release(struct config_item *item)3416 static void ffs_attr_release(struct config_item *item)
3417 {
3418 struct f_fs_opts *opts = to_ffs_opts(item);
3419
3420 usb_put_function_instance(&opts->func_inst);
3421 }
3422
3423 static struct configfs_item_operations ffs_item_ops = {
3424 .release = ffs_attr_release,
3425 };
3426
3427 static const struct config_item_type ffs_func_type = {
3428 .ct_item_ops = &ffs_item_ops,
3429 .ct_owner = THIS_MODULE,
3430 };
3431
3432 /* Function registration interface ******************************************/
ffs_free_inst(struct usb_function_instance *f)3433 static void ffs_free_inst(struct usb_function_instance *f)
3434 {
3435 struct f_fs_opts *opts;
3436
3437 opts = to_f_fs_opts(f);
3438 ffs_dev_lock();
3439 _ffs_free_dev(opts->dev);
3440 ffs_dev_unlock();
3441 kfree(opts);
3442 }
3443
ffs_set_inst_name(struct usb_function_instance *fi, const char *name)3444 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3445 {
3446 char name_dev[MAX_NAMELEN] = {0};
3447 if (snprintf(name_dev, MAX_NAMELEN - 1, "%s.%s", FUNCTION_GENERIC, name) < 0) {
3448 return -EFAULT;
3449 }
3450 if (strlen(name_dev) >= sizeof_field(struct ffs_dev, name))
3451 return -ENAMETOOLONG;
3452 return ffs_name_dev_adapter(to_f_fs_opts(fi)->dev, name_dev);
3453 }
3454
ffs_alloc_inst(void)3455 static struct usb_function_instance *ffs_alloc_inst(void)
3456 {
3457 struct f_fs_opts *opts = NULL;
3458 struct ffs_dev *dev = NULL;
3459
3460 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3461 if (!opts)
3462 return ERR_PTR(-ENOMEM);
3463
3464 opts->func_inst.set_inst_name = ffs_set_inst_name;
3465 opts->func_inst.free_func_inst = ffs_free_inst;
3466 ffs_dev_lock();
3467 dev = _ffs_alloc_dev();
3468 ffs_dev_unlock();
3469 if (IS_ERR(dev)) {
3470 kfree(opts);
3471 return ERR_CAST(dev);
3472 }
3473 opts->dev = dev;
3474 dev->opts = opts;
3475
3476 config_group_init_type_name(&opts->func_inst.group, "",
3477 &ffs_func_type);
3478 return &opts->func_inst;
3479 }
3480
ffs_free(struct usb_function *f)3481 static void ffs_free(struct usb_function *f)
3482 {
3483 kfree(ffs_func_from_usb(f));
3484 }
3485
ffs_func_unbind(struct usb_configuration *c, struct usb_function *f)3486 static void ffs_func_unbind(struct usb_configuration *c,
3487 struct usb_function *f)
3488 {
3489 struct ffs_function *func = ffs_func_from_usb(f);
3490 struct ffs_data *ffs = func->ffs;
3491 struct f_fs_opts *opts =
3492 container_of(f->fi, struct f_fs_opts, func_inst);
3493 struct ffs_ep *ep = func->eps;
3494 unsigned count = ffs->eps_count;
3495 unsigned long flags;
3496
3497 ENTER();
3498 if (ffs->func == func) {
3499 ffs_func_eps_disable(func);
3500 ffs->func = NULL;
3501 }
3502
3503 if (!--opts->refcnt)
3504 functionfs_unbind(ffs);
3505
3506 /* cleanup after autoconfig */
3507 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3508 while (count--) {
3509 if (ep->ep && ep->req)
3510 usb_ep_free_request(ep->ep, ep->req);
3511 ep->req = NULL;
3512 ++ep;
3513 }
3514 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3515 kfree(func->eps);
3516 func->eps = NULL;
3517 /*
3518 * eps, descriptors and interfaces_nums are allocated in the
3519 * same chunk so only one free is required.
3520 */
3521 func->function.fs_descriptors = NULL;
3522 func->function.hs_descriptors = NULL;
3523 func->function.ss_descriptors = NULL;
3524 func->interfaces_nums = NULL;
3525
3526 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3527 }
3528
ffs_func_get_alt(struct usb_function *f, unsigned intf)3529 static int ffs_func_get_alt(struct usb_function *f, unsigned intf)
3530 {
3531 if (intf == 0)
3532 return 0;
3533 return 1;
3534 }
3535
ffs_alloc(struct usb_function_instance *fi)3536 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3537 {
3538 struct ffs_function *func = NULL;
3539
3540 ENTER();
3541
3542 func = kzalloc(sizeof(*func), GFP_KERNEL);
3543 if (unlikely(!func))
3544 return ERR_PTR(-ENOMEM);
3545
3546 func->function.name = "FunctionFS Adapter";
3547
3548 func->function.bind = ffs_func_bind;
3549 func->function.unbind = ffs_func_unbind;
3550 func->function.set_alt = ffs_func_set_alt;
3551 func->function.get_alt = ffs_func_get_alt;
3552 func->function.disable = ffs_func_disable;
3553 func->function.setup = ffs_func_setup;
3554 func->function.req_match = ffs_func_req_match;
3555 func->function.suspend = ffs_func_suspend;
3556 func->function.resume = ffs_func_resume;
3557 func->function.free_func = ffs_free;
3558
3559 return &func->function;
3560 }
3561
3562 /*
3563 * ffs_lock must be taken by the caller of this function
3564 */
_ffs_alloc_dev(void)3565 static struct ffs_dev *_ffs_alloc_dev(void)
3566 {
3567 struct ffs_dev *dev = NULL;
3568 int ret;
3569
3570 if (_ffs_get_single_dev())
3571 return ERR_PTR(-EBUSY);
3572
3573 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3574 if (!dev)
3575 return ERR_PTR(-ENOMEM);
3576
3577 if (list_empty(&ffs_devices)) {
3578 ret = functionfs_init();
3579 if (ret) {
3580 kfree(dev);
3581 return ERR_PTR(ret);
3582 }
3583 }
3584
3585 list_add(&dev->entry, &ffs_devices);
3586
3587 return dev;
3588 }
3589
ffs_name_dev_adapter(struct ffs_dev *dev, const char *name)3590 int ffs_name_dev_adapter(struct ffs_dev *dev, const char *name)
3591 {
3592 struct ffs_dev *existing = NULL;
3593 int ret = 0;
3594
3595 ffs_dev_lock();
3596
3597 existing = _ffs_do_find_dev(name);
3598 if (!existing)
3599 strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
3600 else if (existing != dev)
3601 ret = -EBUSY;
3602
3603 ffs_dev_unlock();
3604
3605 return ret;
3606 }
3607 EXPORT_SYMBOL_GPL(ffs_name_dev_adapter);
3608
ffs_single_dev_adapter(struct ffs_dev *dev)3609 int ffs_single_dev_adapter(struct ffs_dev *dev)
3610 {
3611 int ret;
3612
3613 ret = 0;
3614 ffs_dev_lock();
3615
3616 if (!list_is_singular(&ffs_devices))
3617 ret = -EBUSY;
3618 else
3619 dev->single = true;
3620
3621 ffs_dev_unlock();
3622 return ret;
3623 }
3624 EXPORT_SYMBOL_GPL(ffs_single_dev_adapter);
3625 /*
3626 * ffs_lock must be taken by the caller of this function
3627 */
_ffs_free_dev(struct ffs_dev *dev)3628 static void _ffs_free_dev(struct ffs_dev *dev)
3629 {
3630 list_del(&dev->entry);
3631
3632 /* Clear the private_data pointer to stop incorrect dev access */
3633 if (dev->ffs_data)
3634 dev->ffs_data->private_data = NULL;
3635
3636 kfree(dev);
3637 if (list_empty(&ffs_devices))
3638 functionfs_cleanup();
3639 }
3640
ffs_acquire_dev(const char *dev_name)3641 static void *ffs_acquire_dev(const char *dev_name)
3642 {
3643 struct ffs_dev *ffs_dev = NULL;
3644
3645 ENTER();
3646 ffs_dev_lock();
3647
3648 ffs_dev = _ffs_find_dev(dev_name);
3649 if (!ffs_dev)
3650 ffs_dev = ERR_PTR(-ENOENT);
3651 else if (ffs_dev->mounted)
3652 ffs_dev = ERR_PTR(-EBUSY);
3653 else if (ffs_dev->ffs_acquire_dev_callback &&
3654 ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3655 ffs_dev = ERR_PTR(-ENOENT);
3656 else
3657 ffs_dev->mounted = true;
3658
3659 ffs_dev_unlock();
3660 return ffs_dev;
3661 }
3662
ffs_release_dev(struct ffs_data *ffs_data)3663 static void ffs_release_dev(struct ffs_data *ffs_data)
3664 {
3665 struct ffs_dev *ffs_dev = NULL;
3666
3667 ENTER();
3668 ffs_dev_lock();
3669
3670 ffs_dev = ffs_data->private_data;
3671 if (ffs_dev) {
3672 ffs_dev->mounted = false;
3673
3674 if (ffs_dev->ffs_release_dev_callback)
3675 ffs_dev->ffs_release_dev_callback(ffs_dev);
3676 }
3677
3678 ffs_dev_unlock();
3679 }
3680
ffs_ready(struct ffs_data *ffs)3681 static int ffs_ready(struct ffs_data *ffs)
3682 {
3683 struct ffs_dev *ffs_obj = NULL;
3684 int ret = 0;
3685
3686 ENTER();
3687 ffs_dev_lock();
3688
3689 ffs_obj = ffs->private_data;
3690 if (!ffs_obj) {
3691 ret = -EINVAL;
3692 goto done;
3693 }
3694 if (WARN_ON(ffs_obj->desc_ready)) {
3695 ret = -EBUSY;
3696 goto done;
3697 }
3698
3699 ffs_obj->desc_ready = true;
3700 ffs_obj->ffs_data = ffs;
3701
3702 if (ffs_obj->ffs_ready_callback) {
3703 ret = ffs_obj->ffs_ready_callback(ffs);
3704 if (ret)
3705 goto done;
3706 }
3707
3708 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
3709 done:
3710 ffs_dev_unlock();
3711 return ret;
3712 }
3713
ffs_closed(struct ffs_data *ffs)3714 static void ffs_closed(struct ffs_data *ffs)
3715 {
3716 struct ffs_dev *ffs_obj = NULL;
3717 struct f_fs_opts *opts = NULL;
3718 struct config_item *ci = NULL;
3719
3720 ENTER();
3721 ffs_dev_lock();
3722
3723 ffs_obj = ffs->private_data;
3724 if (!ffs_obj)
3725 goto done;
3726
3727 ffs_obj->desc_ready = false;
3728 ffs_obj->ffs_data = NULL;
3729
3730 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
3731 ffs_obj->ffs_closed_callback)
3732 ffs_obj->ffs_closed_callback(ffs);
3733
3734 if (ffs_obj->opts)
3735 opts = ffs_obj->opts;
3736 else
3737 goto done;
3738
3739 if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
3740 || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
3741 goto done;
3742
3743 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3744 ffs_dev_unlock();
3745
3746 if (test_bit(FFS_FL_BOUND, &ffs->flags))
3747 unregister_gadget_item(ci);
3748 return;
3749 done:
3750 ffs_dev_unlock();
3751 }
3752
3753 /* Misc helper functions ****************************************************/
ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)3754 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3755 {
3756 return nonblock
3757 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3758 : mutex_lock_interruptible(mutex);
3759 }
3760
ffs_prepare_buffer(const char __user *buf, size_t len)3761 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3762 {
3763 char *data = NULL;
3764
3765 if (unlikely(!len))
3766 return NULL;
3767
3768 data = kmalloc(len, GFP_KERNEL);
3769 if (unlikely(!data))
3770 return ERR_PTR(-ENOMEM);
3771
3772 if (unlikely(copy_from_user(data, buf, len))) {
3773 kfree(data);
3774 return ERR_PTR(-EFAULT);
3775 }
3776
3777 pr_vdebug("Buffer from user space:\n");
3778 ffs_dump_mem("", data, len);
3779
3780 return data;
3781 }
3782
3783 DECLARE_USB_FUNCTION_INIT(f_generic, ffs_alloc_inst, ffs_alloc);
3784 MODULE_LICENSE("GPL");
3785