1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * inode.c -- user mode filesystem api for usb gadget controllers
4 *
5 * Copyright (C) 2003-2004 David Brownell
6 * Copyright (C) 2003 Agilent Technologies
7 */
8
9
10/* #define VERBOSE_DEBUG */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/fs_context.h>
16#include <linux/pagemap.h>
17#include <linux/uts.h>
18#include <linux/wait.h>
19#include <linux/compiler.h>
20#include <linux/uaccess.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/poll.h>
24#include <linux/kthread.h>
25#include <linux/aio.h>
26#include <linux/uio.h>
27#include <linux/refcount.h>
28#include <linux/delay.h>
29#include <linux/device.h>
30#include <linux/moduleparam.h>
31
32#include <linux/usb/gadgetfs.h>
33#include <linux/usb/gadget.h>
34
35
36/*
37 * The gadgetfs API maps each endpoint to a file descriptor so that you
38 * can use standard synchronous read/write calls for I/O.  There's some
39 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
40 * drivers show how this works in practice.  You can also use AIO to
41 * eliminate I/O gaps between requests, to help when streaming data.
42 *
43 * Key parts that must be USB-specific are protocols defining how the
44 * read/write operations relate to the hardware state machines.  There
45 * are two types of files.  One type is for the device, implementing ep0.
46 * The other type is for each IN or OUT endpoint.  In both cases, the
47 * user mode driver must configure the hardware before using it.
48 *
49 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
50 *   (by writing configuration and device descriptors).  Afterwards it
51 *   may serve as a source of device events, used to handle all control
52 *   requests other than basic enumeration.
53 *
54 * - Then, after a SET_CONFIGURATION control request, ep_config() is
55 *   called when each /dev/gadget/ep* file is configured (by writing
56 *   endpoint descriptors).  Afterwards these files are used to write()
57 *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
58 *   direction" request is issued (like reading an IN endpoint).
59 *
60 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
61 * not possible on all hardware.  For example, precise fault handling with
62 * respect to data left in endpoint fifos after aborted operations; or
63 * selective clearing of endpoint halts, to implement SET_INTERFACE.
64 */
65
66#define	DRIVER_DESC	"USB Gadget filesystem"
67#define	DRIVER_VERSION	"24 Aug 2004"
68
69static const char driver_desc [] = DRIVER_DESC;
70static const char shortname [] = "gadgetfs";
71
72MODULE_DESCRIPTION (DRIVER_DESC);
73MODULE_AUTHOR ("David Brownell");
74MODULE_LICENSE ("GPL");
75
76static int ep_open(struct inode *, struct file *);
77
78
79/*----------------------------------------------------------------------*/
80
81#define GADGETFS_MAGIC		0xaee71ee7
82
83/* /dev/gadget/$CHIP represents ep0 and the whole device */
84enum ep0_state {
85	/* DISABLED is the initial state. */
86	STATE_DEV_DISABLED = 0,
87
88	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
89	 * ep0/device i/o modes and binding to the controller.  Driver
90	 * must always write descriptors to initialize the device, then
91	 * the device becomes UNCONNECTED until enumeration.
92	 */
93	STATE_DEV_OPENED,
94
95	/* From then on, ep0 fd is in either of two basic modes:
96	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
97	 * - SETUP: read/write will transfer control data and succeed;
98	 *   or if "wrong direction", performs protocol stall
99	 */
100	STATE_DEV_UNCONNECTED,
101	STATE_DEV_CONNECTED,
102	STATE_DEV_SETUP,
103
104	/* UNBOUND means the driver closed ep0, so the device won't be
105	 * accessible again (DEV_DISABLED) until all fds are closed.
106	 */
107	STATE_DEV_UNBOUND,
108};
109
110/* enough for the whole queue: most events invalidate others */
111#define	N_EVENT			5
112
113#define RBUF_SIZE		256
114
115struct dev_data {
116	spinlock_t			lock;
117	refcount_t			count;
118	int				udc_usage;
119	enum ep0_state			state;		/* P: lock */
120	struct usb_gadgetfs_event	event [N_EVENT];
121	unsigned			ev_next;
122	struct fasync_struct		*fasync;
123	u8				current_config;
124
125	/* drivers reading ep0 MUST handle control requests (SETUP)
126	 * reported that way; else the host will time out.
127	 */
128	unsigned			usermode_setup : 1,
129					setup_in : 1,
130					setup_can_stall : 1,
131					setup_out_ready : 1,
132					setup_out_error : 1,
133					setup_abort : 1,
134					gadget_registered : 1;
135	unsigned			setup_wLength;
136
137	/* the rest is basically write-once */
138	struct usb_config_descriptor	*config, *hs_config;
139	struct usb_device_descriptor	*dev;
140	struct usb_request		*req;
141	struct usb_gadget		*gadget;
142	struct list_head		epfiles;
143	void				*buf;
144	wait_queue_head_t		wait;
145	struct super_block		*sb;
146	struct dentry			*dentry;
147
148	/* except this scratch i/o buffer for ep0 */
149	u8				rbuf[RBUF_SIZE];
150};
151
152static inline void get_dev (struct dev_data *data)
153{
154	refcount_inc (&data->count);
155}
156
157static void put_dev (struct dev_data *data)
158{
159	if (likely (!refcount_dec_and_test (&data->count)))
160		return;
161	/* needs no more cleanup */
162	BUG_ON (waitqueue_active (&data->wait));
163	kfree (data);
164}
165
166static struct dev_data *dev_new (void)
167{
168	struct dev_data		*dev;
169
170	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
171	if (!dev)
172		return NULL;
173	dev->state = STATE_DEV_DISABLED;
174	refcount_set (&dev->count, 1);
175	spin_lock_init (&dev->lock);
176	INIT_LIST_HEAD (&dev->epfiles);
177	init_waitqueue_head (&dev->wait);
178	return dev;
179}
180
181/*----------------------------------------------------------------------*/
182
183/* other /dev/gadget/$ENDPOINT files represent endpoints */
184enum ep_state {
185	STATE_EP_DISABLED = 0,
186	STATE_EP_READY,
187	STATE_EP_ENABLED,
188	STATE_EP_UNBOUND,
189};
190
191struct ep_data {
192	struct mutex			lock;
193	enum ep_state			state;
194	refcount_t			count;
195	struct dev_data			*dev;
196	/* must hold dev->lock before accessing ep or req */
197	struct usb_ep			*ep;
198	struct usb_request		*req;
199	ssize_t				status;
200	char				name [16];
201	struct usb_endpoint_descriptor	desc, hs_desc;
202	struct list_head		epfiles;
203	wait_queue_head_t		wait;
204	struct dentry			*dentry;
205};
206
207static inline void get_ep (struct ep_data *data)
208{
209	refcount_inc (&data->count);
210}
211
212static void put_ep (struct ep_data *data)
213{
214	if (likely (!refcount_dec_and_test (&data->count)))
215		return;
216	put_dev (data->dev);
217	/* needs no more cleanup */
218	BUG_ON (!list_empty (&data->epfiles));
219	BUG_ON (waitqueue_active (&data->wait));
220	kfree (data);
221}
222
223/*----------------------------------------------------------------------*/
224
225/* most "how to use the hardware" policy choices are in userspace:
226 * mapping endpoint roles (which the driver needs) to the capabilities
227 * which the usb controller has.  most of those capabilities are exposed
228 * implicitly, starting with the driver name and then endpoint names.
229 */
230
231static const char *CHIP;
232static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
233
234/*----------------------------------------------------------------------*/
235
236/* NOTE:  don't use dev_printk calls before binding to the gadget
237 * at the end of ep0 configuration, or after unbind.
238 */
239
240/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
241#define xprintk(d,level,fmt,args...) \
242	printk(level "%s: " fmt , shortname , ## args)
243
244#ifdef DEBUG
245#define DBG(dev,fmt,args...) \
246	xprintk(dev , KERN_DEBUG , fmt , ## args)
247#else
248#define DBG(dev,fmt,args...) \
249	do { } while (0)
250#endif /* DEBUG */
251
252#ifdef VERBOSE_DEBUG
253#define VDEBUG	DBG
254#else
255#define VDEBUG(dev,fmt,args...) \
256	do { } while (0)
257#endif /* DEBUG */
258
259#define ERROR(dev,fmt,args...) \
260	xprintk(dev , KERN_ERR , fmt , ## args)
261#define INFO(dev,fmt,args...) \
262	xprintk(dev , KERN_INFO , fmt , ## args)
263
264
265/*----------------------------------------------------------------------*/
266
267/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
268 *
269 * After opening, configure non-control endpoints.  Then use normal
270 * stream read() and write() requests; and maybe ioctl() to get more
271 * precise FIFO status when recovering from cancellation.
272 */
273
274static void epio_complete (struct usb_ep *ep, struct usb_request *req)
275{
276	struct ep_data	*epdata = ep->driver_data;
277
278	if (!req->context)
279		return;
280	if (req->status)
281		epdata->status = req->status;
282	else
283		epdata->status = req->actual;
284	complete ((struct completion *)req->context);
285}
286
287/* tasklock endpoint, returning when it's connected.
288 * still need dev->lock to use epdata->ep.
289 */
290static int
291get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
292{
293	int	val;
294
295	if (f_flags & O_NONBLOCK) {
296		if (!mutex_trylock(&epdata->lock))
297			goto nonblock;
298		if (epdata->state != STATE_EP_ENABLED &&
299		    (!is_write || epdata->state != STATE_EP_READY)) {
300			mutex_unlock(&epdata->lock);
301nonblock:
302			val = -EAGAIN;
303		} else
304			val = 0;
305		return val;
306	}
307
308	val = mutex_lock_interruptible(&epdata->lock);
309	if (val < 0)
310		return val;
311
312	switch (epdata->state) {
313	case STATE_EP_ENABLED:
314		return 0;
315	case STATE_EP_READY:			/* not configured yet */
316		if (is_write)
317			return 0;
318		fallthrough;
319	case STATE_EP_UNBOUND:			/* clean disconnect */
320		break;
321	// case STATE_EP_DISABLED:		/* "can't happen" */
322	default:				/* error! */
323		pr_debug ("%s: ep %p not available, state %d\n",
324				shortname, epdata, epdata->state);
325	}
326	mutex_unlock(&epdata->lock);
327	return -ENODEV;
328}
329
330static ssize_t
331ep_io (struct ep_data *epdata, void *buf, unsigned len)
332{
333	DECLARE_COMPLETION_ONSTACK (done);
334	int value;
335
336	spin_lock_irq (&epdata->dev->lock);
337	if (likely (epdata->ep != NULL)) {
338		struct usb_request	*req = epdata->req;
339
340		req->context = &done;
341		req->complete = epio_complete;
342		req->buf = buf;
343		req->length = len;
344		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
345	} else
346		value = -ENODEV;
347	spin_unlock_irq (&epdata->dev->lock);
348
349	if (likely (value == 0)) {
350		value = wait_for_completion_interruptible(&done);
351		if (value != 0) {
352			spin_lock_irq (&epdata->dev->lock);
353			if (likely (epdata->ep != NULL)) {
354				DBG (epdata->dev, "%s i/o interrupted\n",
355						epdata->name);
356				usb_ep_dequeue (epdata->ep, epdata->req);
357				spin_unlock_irq (&epdata->dev->lock);
358
359				wait_for_completion(&done);
360				if (epdata->status == -ECONNRESET)
361					epdata->status = -EINTR;
362			} else {
363				spin_unlock_irq (&epdata->dev->lock);
364
365				DBG (epdata->dev, "endpoint gone\n");
366				wait_for_completion(&done);
367				epdata->status = -ENODEV;
368			}
369		}
370		return epdata->status;
371	}
372	return value;
373}
374
375static int
376ep_release (struct inode *inode, struct file *fd)
377{
378	struct ep_data		*data = fd->private_data;
379	int value;
380
381	value = mutex_lock_interruptible(&data->lock);
382	if (value < 0)
383		return value;
384
385	/* clean up if this can be reopened */
386	if (data->state != STATE_EP_UNBOUND) {
387		data->state = STATE_EP_DISABLED;
388		data->desc.bDescriptorType = 0;
389		data->hs_desc.bDescriptorType = 0;
390		usb_ep_disable(data->ep);
391	}
392	mutex_unlock(&data->lock);
393	put_ep (data);
394	return 0;
395}
396
397static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
398{
399	struct ep_data		*data = fd->private_data;
400	int			status;
401
402	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
403		return status;
404
405	spin_lock_irq (&data->dev->lock);
406	if (likely (data->ep != NULL)) {
407		switch (code) {
408		case GADGETFS_FIFO_STATUS:
409			status = usb_ep_fifo_status (data->ep);
410			break;
411		case GADGETFS_FIFO_FLUSH:
412			usb_ep_fifo_flush (data->ep);
413			break;
414		case GADGETFS_CLEAR_HALT:
415			status = usb_ep_clear_halt (data->ep);
416			break;
417		default:
418			status = -ENOTTY;
419		}
420	} else
421		status = -ENODEV;
422	spin_unlock_irq (&data->dev->lock);
423	mutex_unlock(&data->lock);
424	return status;
425}
426
427/*----------------------------------------------------------------------*/
428
429/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
430
431struct kiocb_priv {
432	struct usb_request	*req;
433	struct ep_data		*epdata;
434	struct kiocb		*iocb;
435	struct mm_struct	*mm;
436	struct work_struct	work;
437	void			*buf;
438	struct iov_iter		to;
439	const void		*to_free;
440	unsigned		actual;
441};
442
443static int ep_aio_cancel(struct kiocb *iocb)
444{
445	struct kiocb_priv	*priv = iocb->private;
446	struct ep_data		*epdata;
447	int			value;
448
449	local_irq_disable();
450	epdata = priv->epdata;
451	// spin_lock(&epdata->dev->lock);
452	if (likely(epdata && epdata->ep && priv->req))
453		value = usb_ep_dequeue (epdata->ep, priv->req);
454	else
455		value = -EINVAL;
456	// spin_unlock(&epdata->dev->lock);
457	local_irq_enable();
458
459	return value;
460}
461
462static void ep_user_copy_worker(struct work_struct *work)
463{
464	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
465	struct mm_struct *mm = priv->mm;
466	struct kiocb *iocb = priv->iocb;
467	size_t ret;
468
469	kthread_use_mm(mm);
470	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
471	kthread_unuse_mm(mm);
472	if (!ret)
473		ret = -EFAULT;
474
475	/* completing the iocb can drop the ctx and mm, don't touch mm after */
476	iocb->ki_complete(iocb, ret, ret);
477
478	kfree(priv->buf);
479	kfree(priv->to_free);
480	kfree(priv);
481}
482
483static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
484{
485	struct kiocb		*iocb = req->context;
486	struct kiocb_priv	*priv = iocb->private;
487	struct ep_data		*epdata = priv->epdata;
488
489	/* lock against disconnect (and ideally, cancel) */
490	spin_lock(&epdata->dev->lock);
491	priv->req = NULL;
492	priv->epdata = NULL;
493
494	/* if this was a write or a read returning no data then we
495	 * don't need to copy anything to userspace, so we can
496	 * complete the aio request immediately.
497	 */
498	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
499		kfree(req->buf);
500		kfree(priv->to_free);
501		kfree(priv);
502		iocb->private = NULL;
503		/* aio_complete() reports bytes-transferred _and_ faults */
504
505		iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
506				req->status);
507	} else {
508		/* ep_copy_to_user() won't report both; we hide some faults */
509		if (unlikely(0 != req->status))
510			DBG(epdata->dev, "%s fault %d len %d\n",
511				ep->name, req->status, req->actual);
512
513		priv->buf = req->buf;
514		priv->actual = req->actual;
515		INIT_WORK(&priv->work, ep_user_copy_worker);
516		schedule_work(&priv->work);
517	}
518
519	usb_ep_free_request(ep, req);
520	spin_unlock(&epdata->dev->lock);
521	put_ep(epdata);
522}
523
524static ssize_t ep_aio(struct kiocb *iocb,
525		      struct kiocb_priv *priv,
526		      struct ep_data *epdata,
527		      char *buf,
528		      size_t len)
529{
530	struct usb_request *req;
531	ssize_t value;
532
533	iocb->private = priv;
534	priv->iocb = iocb;
535
536	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
537	get_ep(epdata);
538	priv->epdata = epdata;
539	priv->actual = 0;
540	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
541
542	/* each kiocb is coupled to one usb_request, but we can't
543	 * allocate or submit those if the host disconnected.
544	 */
545	spin_lock_irq(&epdata->dev->lock);
546	value = -ENODEV;
547	if (unlikely(epdata->ep == NULL))
548		goto fail;
549
550	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
551	value = -ENOMEM;
552	if (unlikely(!req))
553		goto fail;
554
555	priv->req = req;
556	req->buf = buf;
557	req->length = len;
558	req->complete = ep_aio_complete;
559	req->context = iocb;
560	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
561	if (unlikely(0 != value)) {
562		usb_ep_free_request(epdata->ep, req);
563		goto fail;
564	}
565	spin_unlock_irq(&epdata->dev->lock);
566	return -EIOCBQUEUED;
567
568fail:
569	spin_unlock_irq(&epdata->dev->lock);
570	kfree(priv->to_free);
571	kfree(priv);
572	put_ep(epdata);
573	return value;
574}
575
576static ssize_t
577ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
578{
579	struct file *file = iocb->ki_filp;
580	struct ep_data *epdata = file->private_data;
581	size_t len = iov_iter_count(to);
582	ssize_t value;
583	char *buf;
584
585	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
586		return value;
587
588	/* halt any endpoint by doing a "wrong direction" i/o call */
589	if (usb_endpoint_dir_in(&epdata->desc)) {
590		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
591		    !is_sync_kiocb(iocb)) {
592			mutex_unlock(&epdata->lock);
593			return -EINVAL;
594		}
595		DBG (epdata->dev, "%s halt\n", epdata->name);
596		spin_lock_irq(&epdata->dev->lock);
597		if (likely(epdata->ep != NULL))
598			usb_ep_set_halt(epdata->ep);
599		spin_unlock_irq(&epdata->dev->lock);
600		mutex_unlock(&epdata->lock);
601		return -EBADMSG;
602	}
603
604	buf = kmalloc(len, GFP_KERNEL);
605	if (unlikely(!buf)) {
606		mutex_unlock(&epdata->lock);
607		return -ENOMEM;
608	}
609	if (is_sync_kiocb(iocb)) {
610		value = ep_io(epdata, buf, len);
611		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
612			value = -EFAULT;
613	} else {
614		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
615		value = -ENOMEM;
616		if (!priv)
617			goto fail;
618		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
619		if (!priv->to_free) {
620			kfree(priv);
621			goto fail;
622		}
623		value = ep_aio(iocb, priv, epdata, buf, len);
624		if (value == -EIOCBQUEUED)
625			buf = NULL;
626	}
627fail:
628	kfree(buf);
629	mutex_unlock(&epdata->lock);
630	return value;
631}
632
633static ssize_t ep_config(struct ep_data *, const char *, size_t);
634
635static ssize_t
636ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
637{
638	struct file *file = iocb->ki_filp;
639	struct ep_data *epdata = file->private_data;
640	size_t len = iov_iter_count(from);
641	bool configured;
642	ssize_t value;
643	char *buf;
644
645	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
646		return value;
647
648	configured = epdata->state == STATE_EP_ENABLED;
649
650	/* halt any endpoint by doing a "wrong direction" i/o call */
651	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
652		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
653		    !is_sync_kiocb(iocb)) {
654			mutex_unlock(&epdata->lock);
655			return -EINVAL;
656		}
657		DBG (epdata->dev, "%s halt\n", epdata->name);
658		spin_lock_irq(&epdata->dev->lock);
659		if (likely(epdata->ep != NULL))
660			usb_ep_set_halt(epdata->ep);
661		spin_unlock_irq(&epdata->dev->lock);
662		mutex_unlock(&epdata->lock);
663		return -EBADMSG;
664	}
665
666	buf = kmalloc(len, GFP_KERNEL);
667	if (unlikely(!buf)) {
668		mutex_unlock(&epdata->lock);
669		return -ENOMEM;
670	}
671
672	if (unlikely(!copy_from_iter_full(buf, len, from))) {
673		value = -EFAULT;
674		goto out;
675	}
676
677	if (unlikely(!configured)) {
678		value = ep_config(epdata, buf, len);
679	} else if (is_sync_kiocb(iocb)) {
680		value = ep_io(epdata, buf, len);
681	} else {
682		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
683		value = -ENOMEM;
684		if (priv) {
685			value = ep_aio(iocb, priv, epdata, buf, len);
686			if (value == -EIOCBQUEUED)
687				buf = NULL;
688		}
689	}
690out:
691	kfree(buf);
692	mutex_unlock(&epdata->lock);
693	return value;
694}
695
696/*----------------------------------------------------------------------*/
697
698/* used after endpoint configuration */
699static const struct file_operations ep_io_operations = {
700	.owner =	THIS_MODULE,
701
702	.open =		ep_open,
703	.release =	ep_release,
704	.llseek =	no_llseek,
705	.unlocked_ioctl = ep_ioctl,
706	.read_iter =	ep_read_iter,
707	.write_iter =	ep_write_iter,
708};
709
710/* ENDPOINT INITIALIZATION
711 *
712 *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
713 *     status = write (fd, descriptors, sizeof descriptors)
714 *
715 * That write establishes the endpoint configuration, configuring
716 * the controller to process bulk, interrupt, or isochronous transfers
717 * at the right maxpacket size, and so on.
718 *
719 * The descriptors are message type 1, identified by a host order u32
720 * at the beginning of what's written.  Descriptor order is: full/low
721 * speed descriptor, then optional high speed descriptor.
722 */
723static ssize_t
724ep_config (struct ep_data *data, const char *buf, size_t len)
725{
726	struct usb_ep		*ep;
727	u32			tag;
728	int			value, length = len;
729
730	if (data->state != STATE_EP_READY) {
731		value = -EL2HLT;
732		goto fail;
733	}
734
735	value = len;
736	if (len < USB_DT_ENDPOINT_SIZE + 4)
737		goto fail0;
738
739	/* we might need to change message format someday */
740	memcpy(&tag, buf, 4);
741	if (tag != 1) {
742		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
743		goto fail0;
744	}
745	buf += 4;
746	len -= 4;
747
748	/* NOTE:  audio endpoint extensions not accepted here;
749	 * just don't include the extra bytes.
750	 */
751
752	/* full/low speed descriptor, then high speed */
753	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
754	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
755			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
756		goto fail0;
757	if (len != USB_DT_ENDPOINT_SIZE) {
758		if (len != 2 * USB_DT_ENDPOINT_SIZE)
759			goto fail0;
760		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
761			USB_DT_ENDPOINT_SIZE);
762		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
763				|| data->hs_desc.bDescriptorType
764					!= USB_DT_ENDPOINT) {
765			DBG(data->dev, "config %s, bad hs length or type\n",
766					data->name);
767			goto fail0;
768		}
769	}
770
771	spin_lock_irq (&data->dev->lock);
772	if (data->dev->state == STATE_DEV_UNBOUND) {
773		value = -ENOENT;
774		goto gone;
775	} else {
776		ep = data->ep;
777		if (ep == NULL) {
778			value = -ENODEV;
779			goto gone;
780		}
781	}
782	switch (data->dev->gadget->speed) {
783	case USB_SPEED_LOW:
784	case USB_SPEED_FULL:
785		ep->desc = &data->desc;
786		break;
787	case USB_SPEED_HIGH:
788		/* fails if caller didn't provide that descriptor... */
789		ep->desc = &data->hs_desc;
790		break;
791	default:
792		DBG(data->dev, "unconnected, %s init abandoned\n",
793				data->name);
794		value = -EINVAL;
795		goto gone;
796	}
797	value = usb_ep_enable(ep);
798	if (value == 0) {
799		data->state = STATE_EP_ENABLED;
800		value = length;
801	}
802gone:
803	spin_unlock_irq (&data->dev->lock);
804	if (value < 0) {
805fail:
806		data->desc.bDescriptorType = 0;
807		data->hs_desc.bDescriptorType = 0;
808	}
809	return value;
810fail0:
811	value = -EINVAL;
812	goto fail;
813}
814
815static int
816ep_open (struct inode *inode, struct file *fd)
817{
818	struct ep_data		*data = inode->i_private;
819	int			value = -EBUSY;
820
821	if (mutex_lock_interruptible(&data->lock) != 0)
822		return -EINTR;
823	spin_lock_irq (&data->dev->lock);
824	if (data->dev->state == STATE_DEV_UNBOUND)
825		value = -ENOENT;
826	else if (data->state == STATE_EP_DISABLED) {
827		value = 0;
828		data->state = STATE_EP_READY;
829		get_ep (data);
830		fd->private_data = data;
831		VDEBUG (data->dev, "%s ready\n", data->name);
832	} else
833		DBG (data->dev, "%s state %d\n",
834			data->name, data->state);
835	spin_unlock_irq (&data->dev->lock);
836	mutex_unlock(&data->lock);
837	return value;
838}
839
840/*----------------------------------------------------------------------*/
841
842/* EP0 IMPLEMENTATION can be partly in userspace.
843 *
844 * Drivers that use this facility receive various events, including
845 * control requests the kernel doesn't handle.  Drivers that don't
846 * use this facility may be too simple-minded for real applications.
847 */
848
849static inline void ep0_readable (struct dev_data *dev)
850{
851	wake_up (&dev->wait);
852	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
853}
854
855static void clean_req (struct usb_ep *ep, struct usb_request *req)
856{
857	struct dev_data		*dev = ep->driver_data;
858
859	if (req->buf != dev->rbuf) {
860		kfree(req->buf);
861		req->buf = dev->rbuf;
862	}
863	req->complete = epio_complete;
864	dev->setup_out_ready = 0;
865}
866
867static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
868{
869	struct dev_data		*dev = ep->driver_data;
870	unsigned long		flags;
871	int			free = 1;
872
873	/* for control OUT, data must still get to userspace */
874	spin_lock_irqsave(&dev->lock, flags);
875	if (!dev->setup_in) {
876		dev->setup_out_error = (req->status != 0);
877		if (!dev->setup_out_error)
878			free = 0;
879		dev->setup_out_ready = 1;
880		ep0_readable (dev);
881	}
882
883	/* clean up as appropriate */
884	if (free && req->buf != &dev->rbuf)
885		clean_req (ep, req);
886	req->complete = epio_complete;
887	spin_unlock_irqrestore(&dev->lock, flags);
888}
889
890static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
891{
892	struct dev_data	*dev = ep->driver_data;
893
894	if (dev->setup_out_ready) {
895		DBG (dev, "ep0 request busy!\n");
896		return -EBUSY;
897	}
898	if (len > sizeof (dev->rbuf))
899		req->buf = kmalloc(len, GFP_ATOMIC);
900	if (req->buf == NULL) {
901		req->buf = dev->rbuf;
902		return -ENOMEM;
903	}
904	req->complete = ep0_complete;
905	req->length = len;
906	req->zero = 0;
907	return 0;
908}
909
910static ssize_t
911ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
912{
913	struct dev_data			*dev = fd->private_data;
914	ssize_t				retval;
915	enum ep0_state			state;
916
917	spin_lock_irq (&dev->lock);
918	if (dev->state <= STATE_DEV_OPENED) {
919		retval = -EINVAL;
920		goto done;
921	}
922
923	/* report fd mode change before acting on it */
924	if (dev->setup_abort) {
925		dev->setup_abort = 0;
926		retval = -EIDRM;
927		goto done;
928	}
929
930	/* control DATA stage */
931	if ((state = dev->state) == STATE_DEV_SETUP) {
932
933		if (dev->setup_in) {		/* stall IN */
934			VDEBUG(dev, "ep0in stall\n");
935			(void) usb_ep_set_halt (dev->gadget->ep0);
936			retval = -EL2HLT;
937			dev->state = STATE_DEV_CONNECTED;
938
939		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
940			struct usb_ep		*ep = dev->gadget->ep0;
941			struct usb_request	*req = dev->req;
942
943			if ((retval = setup_req (ep, req, 0)) == 0) {
944				++dev->udc_usage;
945				spin_unlock_irq (&dev->lock);
946				retval = usb_ep_queue (ep, req, GFP_KERNEL);
947				spin_lock_irq (&dev->lock);
948				--dev->udc_usage;
949			}
950			dev->state = STATE_DEV_CONNECTED;
951
952			/* assume that was SET_CONFIGURATION */
953			if (dev->current_config) {
954				unsigned power;
955
956				if (gadget_is_dualspeed(dev->gadget)
957						&& (dev->gadget->speed
958							== USB_SPEED_HIGH))
959					power = dev->hs_config->bMaxPower;
960				else
961					power = dev->config->bMaxPower;
962				usb_gadget_vbus_draw(dev->gadget, 2 * power);
963			}
964
965		} else {			/* collect OUT data */
966			if ((fd->f_flags & O_NONBLOCK) != 0
967					&& !dev->setup_out_ready) {
968				retval = -EAGAIN;
969				goto done;
970			}
971			spin_unlock_irq (&dev->lock);
972			retval = wait_event_interruptible (dev->wait,
973					dev->setup_out_ready != 0);
974
975			/* FIXME state could change from under us */
976			spin_lock_irq (&dev->lock);
977			if (retval)
978				goto done;
979
980			if (dev->state != STATE_DEV_SETUP) {
981				retval = -ECANCELED;
982				goto done;
983			}
984			dev->state = STATE_DEV_CONNECTED;
985
986			if (dev->setup_out_error)
987				retval = -EIO;
988			else {
989				len = min (len, (size_t)dev->req->actual);
990				++dev->udc_usage;
991				spin_unlock_irq(&dev->lock);
992				if (copy_to_user (buf, dev->req->buf, len))
993					retval = -EFAULT;
994				else
995					retval = len;
996				spin_lock_irq(&dev->lock);
997				--dev->udc_usage;
998				clean_req (dev->gadget->ep0, dev->req);
999				/* NOTE userspace can't yet choose to stall */
1000			}
1001		}
1002		goto done;
1003	}
1004
1005	/* else normal: return event data */
1006	if (len < sizeof dev->event [0]) {
1007		retval = -EINVAL;
1008		goto done;
1009	}
1010	len -= len % sizeof (struct usb_gadgetfs_event);
1011	dev->usermode_setup = 1;
1012
1013scan:
1014	/* return queued events right away */
1015	if (dev->ev_next != 0) {
1016		unsigned		i, n;
1017
1018		n = len / sizeof (struct usb_gadgetfs_event);
1019		if (dev->ev_next < n)
1020			n = dev->ev_next;
1021
1022		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1023		for (i = 0; i < n; i++) {
1024			if (dev->event [i].type == GADGETFS_SETUP) {
1025				dev->state = STATE_DEV_SETUP;
1026				n = i + 1;
1027				break;
1028			}
1029		}
1030		spin_unlock_irq (&dev->lock);
1031		len = n * sizeof (struct usb_gadgetfs_event);
1032		if (copy_to_user (buf, &dev->event, len))
1033			retval = -EFAULT;
1034		else
1035			retval = len;
1036		if (len > 0) {
1037			/* NOTE this doesn't guard against broken drivers;
1038			 * concurrent ep0 readers may lose events.
1039			 */
1040			spin_lock_irq (&dev->lock);
1041			if (dev->ev_next > n) {
1042				memmove(&dev->event[0], &dev->event[n],
1043					sizeof (struct usb_gadgetfs_event)
1044						* (dev->ev_next - n));
1045			}
1046			dev->ev_next -= n;
1047			spin_unlock_irq (&dev->lock);
1048		}
1049		return retval;
1050	}
1051	if (fd->f_flags & O_NONBLOCK) {
1052		retval = -EAGAIN;
1053		goto done;
1054	}
1055
1056	switch (state) {
1057	default:
1058		DBG (dev, "fail %s, state %d\n", __func__, state);
1059		retval = -ESRCH;
1060		break;
1061	case STATE_DEV_UNCONNECTED:
1062	case STATE_DEV_CONNECTED:
1063		spin_unlock_irq (&dev->lock);
1064		DBG (dev, "%s wait\n", __func__);
1065
1066		/* wait for events */
1067		retval = wait_event_interruptible (dev->wait,
1068				dev->ev_next != 0);
1069		if (retval < 0)
1070			return retval;
1071		spin_lock_irq (&dev->lock);
1072		goto scan;
1073	}
1074
1075done:
1076	spin_unlock_irq (&dev->lock);
1077	return retval;
1078}
1079
1080static struct usb_gadgetfs_event *
1081next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1082{
1083	struct usb_gadgetfs_event	*event;
1084	unsigned			i;
1085
1086	switch (type) {
1087	/* these events purge the queue */
1088	case GADGETFS_DISCONNECT:
1089		if (dev->state == STATE_DEV_SETUP)
1090			dev->setup_abort = 1;
1091		fallthrough;
1092	case GADGETFS_CONNECT:
1093		dev->ev_next = 0;
1094		break;
1095	case GADGETFS_SETUP:		/* previous request timed out */
1096	case GADGETFS_SUSPEND:		/* same effect */
1097		/* these events can't be repeated */
1098		for (i = 0; i != dev->ev_next; i++) {
1099			if (dev->event [i].type != type)
1100				continue;
1101			DBG(dev, "discard old event[%d] %d\n", i, type);
1102			dev->ev_next--;
1103			if (i == dev->ev_next)
1104				break;
1105			/* indices start at zero, for simplicity */
1106			memmove (&dev->event [i], &dev->event [i + 1],
1107				sizeof (struct usb_gadgetfs_event)
1108					* (dev->ev_next - i));
1109		}
1110		break;
1111	default:
1112		BUG ();
1113	}
1114	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1115	event = &dev->event [dev->ev_next++];
1116	BUG_ON (dev->ev_next > N_EVENT);
1117	memset (event, 0, sizeof *event);
1118	event->type = type;
1119	return event;
1120}
1121
1122static ssize_t
1123ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1124{
1125	struct dev_data		*dev = fd->private_data;
1126	ssize_t			retval = -ESRCH;
1127
1128	/* report fd mode change before acting on it */
1129	if (dev->setup_abort) {
1130		dev->setup_abort = 0;
1131		retval = -EIDRM;
1132
1133	/* data and/or status stage for control request */
1134	} else if (dev->state == STATE_DEV_SETUP) {
1135
1136		len = min_t(size_t, len, dev->setup_wLength);
1137		if (dev->setup_in) {
1138			retval = setup_req (dev->gadget->ep0, dev->req, len);
1139			if (retval == 0) {
1140				dev->state = STATE_DEV_CONNECTED;
1141				++dev->udc_usage;
1142				spin_unlock_irq (&dev->lock);
1143				if (copy_from_user (dev->req->buf, buf, len))
1144					retval = -EFAULT;
1145				else {
1146					if (len < dev->setup_wLength)
1147						dev->req->zero = 1;
1148					retval = usb_ep_queue (
1149						dev->gadget->ep0, dev->req,
1150						GFP_KERNEL);
1151				}
1152				spin_lock_irq(&dev->lock);
1153				--dev->udc_usage;
1154				if (retval < 0) {
1155					clean_req (dev->gadget->ep0, dev->req);
1156				} else
1157					retval = len;
1158
1159				return retval;
1160			}
1161
1162		/* can stall some OUT transfers */
1163		} else if (dev->setup_can_stall) {
1164			VDEBUG(dev, "ep0out stall\n");
1165			(void) usb_ep_set_halt (dev->gadget->ep0);
1166			retval = -EL2HLT;
1167			dev->state = STATE_DEV_CONNECTED;
1168		} else {
1169			DBG(dev, "bogus ep0out stall!\n");
1170		}
1171	} else
1172		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1173
1174	return retval;
1175}
1176
1177static int
1178ep0_fasync (int f, struct file *fd, int on)
1179{
1180	struct dev_data		*dev = fd->private_data;
1181	// caller must F_SETOWN before signal delivery happens
1182	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1183	return fasync_helper (f, fd, on, &dev->fasync);
1184}
1185
1186static struct usb_gadget_driver gadgetfs_driver;
1187
1188static int
1189dev_release (struct inode *inode, struct file *fd)
1190{
1191	struct dev_data		*dev = fd->private_data;
1192
1193	/* closing ep0 === shutdown all */
1194
1195	if (dev->gadget_registered) {
1196		usb_gadget_unregister_driver (&gadgetfs_driver);
1197		dev->gadget_registered = false;
1198	}
1199
1200	/* at this point "good" hardware has disconnected the
1201	 * device from USB; the host won't see it any more.
1202	 * alternatively, all host requests will time out.
1203	 */
1204
1205	kfree (dev->buf);
1206	dev->buf = NULL;
1207
1208	/* other endpoints were all decoupled from this device */
1209	spin_lock_irq(&dev->lock);
1210	dev->state = STATE_DEV_DISABLED;
1211	spin_unlock_irq(&dev->lock);
1212
1213	put_dev (dev);
1214	return 0;
1215}
1216
1217static __poll_t
1218ep0_poll (struct file *fd, poll_table *wait)
1219{
1220       struct dev_data         *dev = fd->private_data;
1221       __poll_t                mask = 0;
1222
1223	if (dev->state <= STATE_DEV_OPENED)
1224		return DEFAULT_POLLMASK;
1225
1226	poll_wait(fd, &dev->wait, wait);
1227
1228	spin_lock_irq(&dev->lock);
1229
1230	/* report fd mode change before acting on it */
1231	if (dev->setup_abort) {
1232		dev->setup_abort = 0;
1233		mask = EPOLLHUP;
1234		goto out;
1235	}
1236
1237	if (dev->state == STATE_DEV_SETUP) {
1238		if (dev->setup_in || dev->setup_can_stall)
1239			mask = EPOLLOUT;
1240	} else {
1241		if (dev->ev_next != 0)
1242			mask = EPOLLIN;
1243	}
1244out:
1245	spin_unlock_irq(&dev->lock);
1246	return mask;
1247}
1248
1249static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1250{
1251	struct dev_data		*dev = fd->private_data;
1252	struct usb_gadget	*gadget = dev->gadget;
1253	long ret = -ENOTTY;
1254
1255	spin_lock_irq(&dev->lock);
1256	if (dev->state == STATE_DEV_OPENED ||
1257			dev->state == STATE_DEV_UNBOUND) {
1258		/* Not bound to a UDC */
1259	} else if (gadget->ops->ioctl) {
1260		++dev->udc_usage;
1261		spin_unlock_irq(&dev->lock);
1262
1263		ret = gadget->ops->ioctl (gadget, code, value);
1264
1265		spin_lock_irq(&dev->lock);
1266		--dev->udc_usage;
1267	}
1268	spin_unlock_irq(&dev->lock);
1269
1270	return ret;
1271}
1272
1273/*----------------------------------------------------------------------*/
1274
1275/* The in-kernel gadget driver handles most ep0 issues, in particular
1276 * enumerating the single configuration (as provided from user space).
1277 *
1278 * Unrecognized ep0 requests may be handled in user space.
1279 */
1280
1281static void make_qualifier (struct dev_data *dev)
1282{
1283	struct usb_qualifier_descriptor		qual;
1284	struct usb_device_descriptor		*desc;
1285
1286	qual.bLength = sizeof qual;
1287	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1288	qual.bcdUSB = cpu_to_le16 (0x0200);
1289
1290	desc = dev->dev;
1291	qual.bDeviceClass = desc->bDeviceClass;
1292	qual.bDeviceSubClass = desc->bDeviceSubClass;
1293	qual.bDeviceProtocol = desc->bDeviceProtocol;
1294
1295	/* assumes ep0 uses the same value for both speeds ... */
1296	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1297
1298	qual.bNumConfigurations = 1;
1299	qual.bRESERVED = 0;
1300
1301	memcpy (dev->rbuf, &qual, sizeof qual);
1302}
1303
1304static int
1305config_buf (struct dev_data *dev, u8 type, unsigned index)
1306{
1307	int		len;
1308	int		hs = 0;
1309
1310	/* only one configuration */
1311	if (index > 0)
1312		return -EINVAL;
1313
1314	if (gadget_is_dualspeed(dev->gadget)) {
1315		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1316		if (type == USB_DT_OTHER_SPEED_CONFIG)
1317			hs = !hs;
1318	}
1319	if (hs) {
1320		dev->req->buf = dev->hs_config;
1321		len = le16_to_cpu(dev->hs_config->wTotalLength);
1322	} else {
1323		dev->req->buf = dev->config;
1324		len = le16_to_cpu(dev->config->wTotalLength);
1325	}
1326	((u8 *)dev->req->buf) [1] = type;
1327	return len;
1328}
1329
1330static int
1331gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1332{
1333	struct dev_data			*dev = get_gadget_data (gadget);
1334	struct usb_request		*req = dev->req;
1335	int				value = -EOPNOTSUPP;
1336	struct usb_gadgetfs_event	*event;
1337	u16				w_value = le16_to_cpu(ctrl->wValue);
1338	u16				w_length = le16_to_cpu(ctrl->wLength);
1339
1340	if (w_length > RBUF_SIZE) {
1341		if (ctrl->bRequestType & USB_DIR_IN) {
1342			/* Cast away the const, we are going to overwrite on purpose. */
1343			__le16 *temp = (__le16 *)&ctrl->wLength;
1344
1345			*temp = cpu_to_le16(RBUF_SIZE);
1346			w_length = RBUF_SIZE;
1347		} else {
1348			return value;
1349		}
1350	}
1351
1352	spin_lock (&dev->lock);
1353	dev->setup_abort = 0;
1354	if (dev->state == STATE_DEV_UNCONNECTED) {
1355		if (gadget_is_dualspeed(gadget)
1356				&& gadget->speed == USB_SPEED_HIGH
1357				&& dev->hs_config == NULL) {
1358			spin_unlock(&dev->lock);
1359			ERROR (dev, "no high speed config??\n");
1360			return -EINVAL;
1361		}
1362
1363		dev->state = STATE_DEV_CONNECTED;
1364
1365		INFO (dev, "connected\n");
1366		event = next_event (dev, GADGETFS_CONNECT);
1367		event->u.speed = gadget->speed;
1368		ep0_readable (dev);
1369
1370	/* host may have given up waiting for response.  we can miss control
1371	 * requests handled lower down (device/endpoint status and features);
1372	 * then ep0_{read,write} will report the wrong status. controller
1373	 * driver will have aborted pending i/o.
1374	 */
1375	} else if (dev->state == STATE_DEV_SETUP)
1376		dev->setup_abort = 1;
1377
1378	req->buf = dev->rbuf;
1379	req->context = NULL;
1380	switch (ctrl->bRequest) {
1381
1382	case USB_REQ_GET_DESCRIPTOR:
1383		if (ctrl->bRequestType != USB_DIR_IN)
1384			goto unrecognized;
1385		switch (w_value >> 8) {
1386
1387		case USB_DT_DEVICE:
1388			value = min (w_length, (u16) sizeof *dev->dev);
1389			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1390			req->buf = dev->dev;
1391			break;
1392		case USB_DT_DEVICE_QUALIFIER:
1393			if (!dev->hs_config)
1394				break;
1395			value = min (w_length, (u16)
1396				sizeof (struct usb_qualifier_descriptor));
1397			make_qualifier (dev);
1398			break;
1399		case USB_DT_OTHER_SPEED_CONFIG:
1400		case USB_DT_CONFIG:
1401			value = config_buf (dev,
1402					w_value >> 8,
1403					w_value & 0xff);
1404			if (value >= 0)
1405				value = min (w_length, (u16) value);
1406			break;
1407		case USB_DT_STRING:
1408			goto unrecognized;
1409
1410		default:		// all others are errors
1411			break;
1412		}
1413		break;
1414
1415	/* currently one config, two speeds */
1416	case USB_REQ_SET_CONFIGURATION:
1417		if (ctrl->bRequestType != 0)
1418			goto unrecognized;
1419		if (0 == (u8) w_value) {
1420			value = 0;
1421			dev->current_config = 0;
1422			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1423			// user mode expected to disable endpoints
1424		} else {
1425			u8	config, power;
1426
1427			if (gadget_is_dualspeed(gadget)
1428					&& gadget->speed == USB_SPEED_HIGH) {
1429				config = dev->hs_config->bConfigurationValue;
1430				power = dev->hs_config->bMaxPower;
1431			} else {
1432				config = dev->config->bConfigurationValue;
1433				power = dev->config->bMaxPower;
1434			}
1435
1436			if (config == (u8) w_value) {
1437				value = 0;
1438				dev->current_config = config;
1439				usb_gadget_vbus_draw(gadget, 2 * power);
1440			}
1441		}
1442
1443		/* report SET_CONFIGURATION like any other control request,
1444		 * except that usermode may not stall this.  the next
1445		 * request mustn't be allowed start until this finishes:
1446		 * endpoints and threads set up, etc.
1447		 *
1448		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1449		 * has bad/racey automagic that prevents synchronizing here.
1450		 * even kernel mode drivers often miss them.
1451		 */
1452		if (value == 0) {
1453			INFO (dev, "configuration #%d\n", dev->current_config);
1454			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1455			if (dev->usermode_setup) {
1456				dev->setup_can_stall = 0;
1457				goto delegate;
1458			}
1459		}
1460		break;
1461
1462#ifndef	CONFIG_USB_PXA25X
1463	/* PXA automagically handles this request too */
1464	case USB_REQ_GET_CONFIGURATION:
1465		if (ctrl->bRequestType != 0x80)
1466			goto unrecognized;
1467		*(u8 *)req->buf = dev->current_config;
1468		value = min (w_length, (u16) 1);
1469		break;
1470#endif
1471
1472	default:
1473unrecognized:
1474		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1475			dev->usermode_setup ? "delegate" : "fail",
1476			ctrl->bRequestType, ctrl->bRequest,
1477			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1478
1479		/* if there's an ep0 reader, don't stall */
1480		if (dev->usermode_setup) {
1481			dev->setup_can_stall = 1;
1482delegate:
1483			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1484						? 1 : 0;
1485			dev->setup_wLength = w_length;
1486			dev->setup_out_ready = 0;
1487			dev->setup_out_error = 0;
1488
1489			/* read DATA stage for OUT right away */
1490			if (unlikely (!dev->setup_in && w_length)) {
1491				value = setup_req (gadget->ep0, dev->req,
1492							w_length);
1493				if (value < 0)
1494					break;
1495
1496				++dev->udc_usage;
1497				spin_unlock (&dev->lock);
1498				value = usb_ep_queue (gadget->ep0, dev->req,
1499							GFP_KERNEL);
1500				spin_lock (&dev->lock);
1501				--dev->udc_usage;
1502				if (value < 0) {
1503					clean_req (gadget->ep0, dev->req);
1504					break;
1505				}
1506
1507				/* we can't currently stall these */
1508				dev->setup_can_stall = 0;
1509			}
1510
1511			/* state changes when reader collects event */
1512			event = next_event (dev, GADGETFS_SETUP);
1513			event->u.setup = *ctrl;
1514			ep0_readable (dev);
1515			spin_unlock (&dev->lock);
1516			return 0;
1517		}
1518	}
1519
1520	/* proceed with data transfer and status phases? */
1521	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1522		req->length = value;
1523		req->zero = value < w_length;
1524
1525		++dev->udc_usage;
1526		spin_unlock (&dev->lock);
1527		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1528		spin_lock(&dev->lock);
1529		--dev->udc_usage;
1530		spin_unlock(&dev->lock);
1531		if (value < 0) {
1532			DBG (dev, "ep_queue --> %d\n", value);
1533			req->status = 0;
1534		}
1535		return value;
1536	}
1537
1538	/* device stalls when value < 0 */
1539	spin_unlock (&dev->lock);
1540	return value;
1541}
1542
1543static void destroy_ep_files (struct dev_data *dev)
1544{
1545	DBG (dev, "%s %d\n", __func__, dev->state);
1546
1547	/* dev->state must prevent interference */
1548	spin_lock_irq (&dev->lock);
1549	while (!list_empty(&dev->epfiles)) {
1550		struct ep_data	*ep;
1551		struct inode	*parent;
1552		struct dentry	*dentry;
1553
1554		/* break link to FS */
1555		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1556		list_del_init (&ep->epfiles);
1557		spin_unlock_irq (&dev->lock);
1558
1559		dentry = ep->dentry;
1560		ep->dentry = NULL;
1561		parent = d_inode(dentry->d_parent);
1562
1563		/* break link to controller */
1564		mutex_lock(&ep->lock);
1565		if (ep->state == STATE_EP_ENABLED)
1566			(void) usb_ep_disable (ep->ep);
1567		ep->state = STATE_EP_UNBOUND;
1568		usb_ep_free_request (ep->ep, ep->req);
1569		ep->ep = NULL;
1570		mutex_unlock(&ep->lock);
1571
1572		wake_up (&ep->wait);
1573		put_ep (ep);
1574
1575		/* break link to dcache */
1576		inode_lock(parent);
1577		d_delete (dentry);
1578		dput (dentry);
1579		inode_unlock(parent);
1580
1581		spin_lock_irq (&dev->lock);
1582	}
1583	spin_unlock_irq (&dev->lock);
1584}
1585
1586
1587static struct dentry *
1588gadgetfs_create_file (struct super_block *sb, char const *name,
1589		void *data, const struct file_operations *fops);
1590
1591static int activate_ep_files (struct dev_data *dev)
1592{
1593	struct usb_ep	*ep;
1594	struct ep_data	*data;
1595
1596	gadget_for_each_ep (ep, dev->gadget) {
1597
1598		data = kzalloc(sizeof(*data), GFP_KERNEL);
1599		if (!data)
1600			goto enomem0;
1601		data->state = STATE_EP_DISABLED;
1602		mutex_init(&data->lock);
1603		init_waitqueue_head (&data->wait);
1604
1605		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1606		refcount_set (&data->count, 1);
1607		data->dev = dev;
1608		get_dev (dev);
1609
1610		data->ep = ep;
1611		ep->driver_data = data;
1612
1613		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1614		if (!data->req)
1615			goto enomem1;
1616
1617		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1618				data, &ep_io_operations);
1619		if (!data->dentry)
1620			goto enomem2;
1621		list_add_tail (&data->epfiles, &dev->epfiles);
1622	}
1623	return 0;
1624
1625enomem2:
1626	usb_ep_free_request (ep, data->req);
1627enomem1:
1628	put_dev (dev);
1629	kfree (data);
1630enomem0:
1631	DBG (dev, "%s enomem\n", __func__);
1632	destroy_ep_files (dev);
1633	return -ENOMEM;
1634}
1635
1636static void
1637gadgetfs_unbind (struct usb_gadget *gadget)
1638{
1639	struct dev_data		*dev = get_gadget_data (gadget);
1640
1641	DBG (dev, "%s\n", __func__);
1642
1643	spin_lock_irq (&dev->lock);
1644	dev->state = STATE_DEV_UNBOUND;
1645	while (dev->udc_usage > 0) {
1646		spin_unlock_irq(&dev->lock);
1647		usleep_range(1000, 2000);
1648		spin_lock_irq(&dev->lock);
1649	}
1650	spin_unlock_irq (&dev->lock);
1651
1652	destroy_ep_files (dev);
1653	gadget->ep0->driver_data = NULL;
1654	set_gadget_data (gadget, NULL);
1655
1656	/* we've already been disconnected ... no i/o is active */
1657	if (dev->req)
1658		usb_ep_free_request (gadget->ep0, dev->req);
1659	DBG (dev, "%s done\n", __func__);
1660	put_dev (dev);
1661}
1662
1663static struct dev_data		*the_device;
1664
1665static int gadgetfs_bind(struct usb_gadget *gadget,
1666		struct usb_gadget_driver *driver)
1667{
1668	struct dev_data		*dev = the_device;
1669
1670	if (!dev)
1671		return -ESRCH;
1672	if (0 != strcmp (CHIP, gadget->name)) {
1673		pr_err("%s expected %s controller not %s\n",
1674			shortname, CHIP, gadget->name);
1675		return -ENODEV;
1676	}
1677
1678	set_gadget_data (gadget, dev);
1679	dev->gadget = gadget;
1680	gadget->ep0->driver_data = dev;
1681
1682	/* preallocate control response and buffer */
1683	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1684	if (!dev->req)
1685		goto enomem;
1686	dev->req->context = NULL;
1687	dev->req->complete = epio_complete;
1688
1689	if (activate_ep_files (dev) < 0)
1690		goto enomem;
1691
1692	INFO (dev, "bound to %s driver\n", gadget->name);
1693	spin_lock_irq(&dev->lock);
1694	dev->state = STATE_DEV_UNCONNECTED;
1695	spin_unlock_irq(&dev->lock);
1696	get_dev (dev);
1697	return 0;
1698
1699enomem:
1700	gadgetfs_unbind (gadget);
1701	return -ENOMEM;
1702}
1703
1704static void
1705gadgetfs_disconnect (struct usb_gadget *gadget)
1706{
1707	struct dev_data		*dev = get_gadget_data (gadget);
1708	unsigned long		flags;
1709
1710	spin_lock_irqsave (&dev->lock, flags);
1711	if (dev->state == STATE_DEV_UNCONNECTED)
1712		goto exit;
1713	dev->state = STATE_DEV_UNCONNECTED;
1714
1715	INFO (dev, "disconnected\n");
1716	next_event (dev, GADGETFS_DISCONNECT);
1717	ep0_readable (dev);
1718exit:
1719	spin_unlock_irqrestore (&dev->lock, flags);
1720}
1721
1722static void
1723gadgetfs_suspend (struct usb_gadget *gadget)
1724{
1725	struct dev_data		*dev = get_gadget_data (gadget);
1726	unsigned long		flags;
1727
1728	INFO (dev, "suspended from state %d\n", dev->state);
1729	spin_lock_irqsave(&dev->lock, flags);
1730	switch (dev->state) {
1731	case STATE_DEV_SETUP:		// VERY odd... host died??
1732	case STATE_DEV_CONNECTED:
1733	case STATE_DEV_UNCONNECTED:
1734		next_event (dev, GADGETFS_SUSPEND);
1735		ep0_readable (dev);
1736		fallthrough;
1737	default:
1738		break;
1739	}
1740	spin_unlock_irqrestore(&dev->lock, flags);
1741}
1742
1743static struct usb_gadget_driver gadgetfs_driver = {
1744	.function	= (char *) driver_desc,
1745	.bind		= gadgetfs_bind,
1746	.unbind		= gadgetfs_unbind,
1747	.setup		= gadgetfs_setup,
1748	.reset		= gadgetfs_disconnect,
1749	.disconnect	= gadgetfs_disconnect,
1750	.suspend	= gadgetfs_suspend,
1751
1752	.driver	= {
1753		.name		= shortname,
1754	},
1755};
1756
1757/*----------------------------------------------------------------------*/
1758/* DEVICE INITIALIZATION
1759 *
1760 *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1761 *     status = write (fd, descriptors, sizeof descriptors)
1762 *
1763 * That write establishes the device configuration, so the kernel can
1764 * bind to the controller ... guaranteeing it can handle enumeration
1765 * at all necessary speeds.  Descriptor order is:
1766 *
1767 * . message tag (u32, host order) ... for now, must be zero; it
1768 *	would change to support features like multi-config devices
1769 * . full/low speed config ... all wTotalLength bytes (with interface,
1770 *	class, altsetting, endpoint, and other descriptors)
1771 * . high speed config ... all descriptors, for high speed operation;
1772 *	this one's optional except for high-speed hardware
1773 * . device descriptor
1774 *
1775 * Endpoints are not yet enabled. Drivers must wait until device
1776 * configuration and interface altsetting changes create
1777 * the need to configure (or unconfigure) them.
1778 *
1779 * After initialization, the device stays active for as long as that
1780 * $CHIP file is open.  Events must then be read from that descriptor,
1781 * such as configuration notifications.
1782 */
1783
1784static int is_valid_config(struct usb_config_descriptor *config,
1785		unsigned int total)
1786{
1787	return config->bDescriptorType == USB_DT_CONFIG
1788		&& config->bLength == USB_DT_CONFIG_SIZE
1789		&& total >= USB_DT_CONFIG_SIZE
1790		&& config->bConfigurationValue != 0
1791		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1792		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1793	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1794	/* FIXME check lengths: walk to end */
1795}
1796
1797static ssize_t
1798dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1799{
1800	struct dev_data		*dev = fd->private_data;
1801	ssize_t			value, length = len;
1802	unsigned		total;
1803	u32			tag;
1804	char			*kbuf;
1805
1806	spin_lock_irq(&dev->lock);
1807	if (dev->state > STATE_DEV_OPENED) {
1808		value = ep0_write(fd, buf, len, ptr);
1809		spin_unlock_irq(&dev->lock);
1810		return value;
1811	}
1812	spin_unlock_irq(&dev->lock);
1813
1814	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1815	    (len > PAGE_SIZE * 4))
1816		return -EINVAL;
1817
1818	/* we might need to change message format someday */
1819	if (copy_from_user (&tag, buf, 4))
1820		return -EFAULT;
1821	if (tag != 0)
1822		return -EINVAL;
1823	buf += 4;
1824	length -= 4;
1825
1826	kbuf = memdup_user(buf, length);
1827	if (IS_ERR(kbuf))
1828		return PTR_ERR(kbuf);
1829
1830	spin_lock_irq (&dev->lock);
1831	value = -EINVAL;
1832	if (dev->buf) {
1833		spin_unlock_irq(&dev->lock);
1834		kfree(kbuf);
1835		return value;
1836	}
1837	dev->buf = kbuf;
1838
1839	/* full or low speed config */
1840	dev->config = (void *) kbuf;
1841	total = le16_to_cpu(dev->config->wTotalLength);
1842	if (!is_valid_config(dev->config, total) ||
1843			total > length - USB_DT_DEVICE_SIZE)
1844		goto fail;
1845	kbuf += total;
1846	length -= total;
1847
1848	/* optional high speed config */
1849	if (kbuf [1] == USB_DT_CONFIG) {
1850		dev->hs_config = (void *) kbuf;
1851		total = le16_to_cpu(dev->hs_config->wTotalLength);
1852		if (!is_valid_config(dev->hs_config, total) ||
1853				total > length - USB_DT_DEVICE_SIZE)
1854			goto fail;
1855		kbuf += total;
1856		length -= total;
1857	} else {
1858		dev->hs_config = NULL;
1859	}
1860
1861	/* could support multiple configs, using another encoding! */
1862
1863	/* device descriptor (tweaked for paranoia) */
1864	if (length != USB_DT_DEVICE_SIZE)
1865		goto fail;
1866	dev->dev = (void *)kbuf;
1867	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1868			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1869			|| dev->dev->bNumConfigurations != 1)
1870		goto fail;
1871	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1872
1873	/* triggers gadgetfs_bind(); then we can enumerate. */
1874	spin_unlock_irq (&dev->lock);
1875	if (dev->hs_config)
1876		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1877	else
1878		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1879
1880	value = usb_gadget_probe_driver(&gadgetfs_driver);
1881	if (value != 0) {
1882		spin_lock_irq(&dev->lock);
1883		goto fail;
1884	} else {
1885		/* at this point "good" hardware has for the first time
1886		 * let the USB the host see us.  alternatively, if users
1887		 * unplug/replug that will clear all the error state.
1888		 *
1889		 * note:  everything running before here was guaranteed
1890		 * to choke driver model style diagnostics.  from here
1891		 * on, they can work ... except in cleanup paths that
1892		 * kick in after the ep0 descriptor is closed.
1893		 */
1894		value = len;
1895		dev->gadget_registered = true;
1896	}
1897	return value;
1898
1899fail:
1900	dev->config = NULL;
1901	dev->hs_config = NULL;
1902	dev->dev = NULL;
1903	spin_unlock_irq (&dev->lock);
1904	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1905	kfree (dev->buf);
1906	dev->buf = NULL;
1907	return value;
1908}
1909
1910static int
1911dev_open (struct inode *inode, struct file *fd)
1912{
1913	struct dev_data		*dev = inode->i_private;
1914	int			value = -EBUSY;
1915
1916	spin_lock_irq(&dev->lock);
1917	if (dev->state == STATE_DEV_DISABLED) {
1918		dev->ev_next = 0;
1919		dev->state = STATE_DEV_OPENED;
1920		fd->private_data = dev;
1921		get_dev (dev);
1922		value = 0;
1923	}
1924	spin_unlock_irq(&dev->lock);
1925	return value;
1926}
1927
1928static const struct file_operations ep0_operations = {
1929	.llseek =	no_llseek,
1930
1931	.open =		dev_open,
1932	.read =		ep0_read,
1933	.write =	dev_config,
1934	.fasync =	ep0_fasync,
1935	.poll =		ep0_poll,
1936	.unlocked_ioctl = dev_ioctl,
1937	.release =	dev_release,
1938};
1939
1940/*----------------------------------------------------------------------*/
1941
1942/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1943 *
1944 * Mounting the filesystem creates a controller file, used first for
1945 * device configuration then later for event monitoring.
1946 */
1947
1948
1949/* FIXME PAM etc could set this security policy without mount options
1950 * if epfiles inherited ownership and permissons from ep0 ...
1951 */
1952
1953static unsigned default_uid;
1954static unsigned default_gid;
1955static unsigned default_perm = S_IRUSR | S_IWUSR;
1956
1957module_param (default_uid, uint, 0644);
1958module_param (default_gid, uint, 0644);
1959module_param (default_perm, uint, 0644);
1960
1961
1962static struct inode *
1963gadgetfs_make_inode (struct super_block *sb,
1964		void *data, const struct file_operations *fops,
1965		int mode)
1966{
1967	struct inode *inode = new_inode (sb);
1968
1969	if (inode) {
1970		inode->i_ino = get_next_ino();
1971		inode->i_mode = mode;
1972		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1973		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1974		inode->i_atime = inode->i_mtime = inode->i_ctime
1975				= current_time(inode);
1976		inode->i_private = data;
1977		inode->i_fop = fops;
1978	}
1979	return inode;
1980}
1981
1982/* creates in fs root directory, so non-renamable and non-linkable.
1983 * so inode and dentry are paired, until device reconfig.
1984 */
1985static struct dentry *
1986gadgetfs_create_file (struct super_block *sb, char const *name,
1987		void *data, const struct file_operations *fops)
1988{
1989	struct dentry	*dentry;
1990	struct inode	*inode;
1991
1992	dentry = d_alloc_name(sb->s_root, name);
1993	if (!dentry)
1994		return NULL;
1995
1996	inode = gadgetfs_make_inode (sb, data, fops,
1997			S_IFREG | (default_perm & S_IRWXUGO));
1998	if (!inode) {
1999		dput(dentry);
2000		return NULL;
2001	}
2002	d_add (dentry, inode);
2003	return dentry;
2004}
2005
2006static const struct super_operations gadget_fs_operations = {
2007	.statfs =	simple_statfs,
2008	.drop_inode =	generic_delete_inode,
2009};
2010
2011static int
2012gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2013{
2014	struct inode	*inode;
2015	struct dev_data	*dev;
2016	int		rc;
2017
2018	mutex_lock(&sb_mutex);
2019
2020	if (the_device) {
2021		rc = -ESRCH;
2022		goto Done;
2023	}
2024
2025	CHIP = usb_get_gadget_udc_name();
2026	if (!CHIP) {
2027		rc = -ENODEV;
2028		goto Done;
2029	}
2030
2031	/* superblock */
2032	sb->s_blocksize = PAGE_SIZE;
2033	sb->s_blocksize_bits = PAGE_SHIFT;
2034	sb->s_magic = GADGETFS_MAGIC;
2035	sb->s_op = &gadget_fs_operations;
2036	sb->s_time_gran = 1;
2037
2038	/* root inode */
2039	inode = gadgetfs_make_inode (sb,
2040			NULL, &simple_dir_operations,
2041			S_IFDIR | S_IRUGO | S_IXUGO);
2042	if (!inode)
2043		goto Enomem;
2044	inode->i_op = &simple_dir_inode_operations;
2045	if (!(sb->s_root = d_make_root (inode)))
2046		goto Enomem;
2047
2048	/* the ep0 file is named after the controller we expect;
2049	 * user mode code can use it for sanity checks, like we do.
2050	 */
2051	dev = dev_new ();
2052	if (!dev)
2053		goto Enomem;
2054
2055	dev->sb = sb;
2056	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2057	if (!dev->dentry) {
2058		put_dev(dev);
2059		goto Enomem;
2060	}
2061
2062	/* other endpoint files are available after hardware setup,
2063	 * from binding to a controller.
2064	 */
2065	the_device = dev;
2066	rc = 0;
2067	goto Done;
2068
2069 Enomem:
2070	kfree(CHIP);
2071	CHIP = NULL;
2072	rc = -ENOMEM;
2073
2074 Done:
2075	mutex_unlock(&sb_mutex);
2076	return rc;
2077}
2078
2079/* "mount -t gadgetfs path /dev/gadget" ends up here */
2080static int gadgetfs_get_tree(struct fs_context *fc)
2081{
2082	return get_tree_single(fc, gadgetfs_fill_super);
2083}
2084
2085static const struct fs_context_operations gadgetfs_context_ops = {
2086	.get_tree	= gadgetfs_get_tree,
2087};
2088
2089static int gadgetfs_init_fs_context(struct fs_context *fc)
2090{
2091	fc->ops = &gadgetfs_context_ops;
2092	return 0;
2093}
2094
2095static void
2096gadgetfs_kill_sb (struct super_block *sb)
2097{
2098	mutex_lock(&sb_mutex);
2099	kill_litter_super (sb);
2100	if (the_device) {
2101		put_dev (the_device);
2102		the_device = NULL;
2103	}
2104	kfree(CHIP);
2105	CHIP = NULL;
2106	mutex_unlock(&sb_mutex);
2107}
2108
2109/*----------------------------------------------------------------------*/
2110
2111static struct file_system_type gadgetfs_type = {
2112	.owner		= THIS_MODULE,
2113	.name		= shortname,
2114	.init_fs_context = gadgetfs_init_fs_context,
2115	.kill_sb	= gadgetfs_kill_sb,
2116};
2117MODULE_ALIAS_FS("gadgetfs");
2118
2119/*----------------------------------------------------------------------*/
2120
2121static int __init init (void)
2122{
2123	int status;
2124
2125	status = register_filesystem (&gadgetfs_type);
2126	if (status == 0)
2127		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2128			shortname, driver_desc);
2129	return status;
2130}
2131module_init (init);
2132
2133static void __exit cleanup (void)
2134{
2135	pr_debug ("unregister %s\n", shortname);
2136	unregister_filesystem (&gadgetfs_type);
2137}
2138module_exit (cleanup);
2139
2140