1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
4 *
5 * Copyright (C) 2000-2002 Lineo
6 *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
7 * Copyright (C) 2002 Toshiba Corporation
8 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
9 */
10
11/*
12 * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
13 *
14 *  - Endpoint numbering is fixed: ep{1,2,3}-bulk
15 *  - Gadget drivers can choose ep maxpacket (8/16/32/64)
16 *  - Gadget drivers can choose direction (IN, OUT)
17 *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
18 */
19
20// #define	VERBOSE		/* extra debug messages (success too) */
21// #define	USB_TRACE	/* packet-level success messages */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/errno.h>
30#include <linux/timer.h>
31#include <linux/list.h>
32#include <linux/interrupt.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/device.h>
36#include <linux/usb/ch9.h>
37#include <linux/usb/gadget.h>
38#include <linux/prefetch.h>
39
40#include <asm/byteorder.h>
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/unaligned.h>
44
45
46#include "goku_udc.h"
47
48#define	DRIVER_DESC		"TC86C001 USB Device Controller"
49#define	DRIVER_VERSION		"30-Oct 2003"
50
51static const char driver_name [] = "goku_udc";
52static const char driver_desc [] = DRIVER_DESC;
53
54MODULE_AUTHOR("source@mvista.com");
55MODULE_DESCRIPTION(DRIVER_DESC);
56MODULE_LICENSE("GPL");
57
58
59/*
60 * IN dma behaves ok under testing, though the IN-dma abort paths don't
61 * seem to behave quite as expected.  Used by default.
62 *
63 * OUT dma documents design problems handling the common "short packet"
64 * transfer termination policy; it couldn't be enabled by default, even
65 * if the OUT-dma abort problems had a resolution.
66 */
67static unsigned use_dma = 1;
68
69#if 0
70//#include <linux/moduleparam.h>
71/* "modprobe goku_udc use_dma=1" etc
72 *	0 to disable dma
73 *	1 to use IN dma only (normal operation)
74 *	2 to use IN and OUT dma
75 */
76module_param(use_dma, uint, S_IRUGO);
77#endif
78
79/*-------------------------------------------------------------------------*/
80
81static void nuke(struct goku_ep *, int status);
82
83static inline void
84command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
85{
86	writel(COMMAND_EP(epnum) | command, &regs->Command);
87	udelay(300);
88}
89
90static int
91goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
92{
93	struct goku_udc	*dev;
94	struct goku_ep	*ep;
95	u32		mode;
96	u16		max;
97	unsigned long	flags;
98
99	ep = container_of(_ep, struct goku_ep, ep);
100	if (!_ep || !desc
101			|| desc->bDescriptorType != USB_DT_ENDPOINT)
102		return -EINVAL;
103	dev = ep->dev;
104	if (ep == &dev->ep[0])
105		return -EINVAL;
106	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
107		return -ESHUTDOWN;
108	if (ep->num != usb_endpoint_num(desc))
109		return -EINVAL;
110
111	switch (usb_endpoint_type(desc)) {
112	case USB_ENDPOINT_XFER_BULK:
113	case USB_ENDPOINT_XFER_INT:
114		break;
115	default:
116		return -EINVAL;
117	}
118
119	if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
120			!= EPxSTATUS_EP_INVALID)
121		return -EBUSY;
122
123	/* enabling the no-toggle interrupt mode would need an api hook */
124	mode = 0;
125	max = get_unaligned_le16(&desc->wMaxPacketSize);
126	switch (max) {
127	case 64:
128		mode++;
129		fallthrough;
130	case 32:
131		mode++;
132		fallthrough;
133	case 16:
134		mode++;
135		fallthrough;
136	case 8:
137		mode <<= 3;
138		break;
139	default:
140		return -EINVAL;
141	}
142	mode |= 2 << 1;		/* bulk, or intr-with-toggle */
143
144	/* ep1/ep2 dma direction is chosen early; it works in the other
145	 * direction, with pio.  be cautious with out-dma.
146	 */
147	ep->is_in = usb_endpoint_dir_in(desc);
148	if (ep->is_in) {
149		mode |= 1;
150		ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
151	} else {
152		ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
153		if (ep->dma)
154			DBG(dev, "%s out-dma hides short packets\n",
155				ep->ep.name);
156	}
157
158	spin_lock_irqsave(&ep->dev->lock, flags);
159
160	/* ep1 and ep2 can do double buffering and/or dma */
161	if (ep->num < 3) {
162		struct goku_udc_regs __iomem	*regs = ep->dev->regs;
163		u32				tmp;
164
165		/* double buffer except (for now) with pio in */
166		tmp = ((ep->dma || !ep->is_in)
167				? 0x10	/* double buffered */
168				: 0x11	/* single buffer */
169			) << ep->num;
170		tmp |= readl(&regs->EPxSingle);
171		writel(tmp, &regs->EPxSingle);
172
173		tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
174		tmp |= readl(&regs->EPxBCS);
175		writel(tmp, &regs->EPxBCS);
176	}
177	writel(mode, ep->reg_mode);
178	command(ep->dev->regs, COMMAND_RESET, ep->num);
179	ep->ep.maxpacket = max;
180	ep->stopped = 0;
181	ep->ep.desc = desc;
182	spin_unlock_irqrestore(&ep->dev->lock, flags);
183
184	DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
185		ep->is_in ? "IN" : "OUT",
186		ep->dma ? "dma" : "pio",
187		max);
188
189	return 0;
190}
191
192static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
193{
194	struct goku_udc		*dev = ep->dev;
195
196	if (regs) {
197		command(regs, COMMAND_INVALID, ep->num);
198		if (ep->num) {
199			if (ep->num == UDC_MSTWR_ENDPOINT)
200				dev->int_enable &= ~(INT_MSTWREND
201							|INT_MSTWRTMOUT);
202			else if (ep->num == UDC_MSTRD_ENDPOINT)
203				dev->int_enable &= ~INT_MSTRDEND;
204			dev->int_enable &= ~INT_EPxDATASET (ep->num);
205		} else
206			dev->int_enable &= ~INT_EP0;
207		writel(dev->int_enable, &regs->int_enable);
208		readl(&regs->int_enable);
209		if (ep->num < 3) {
210			struct goku_udc_regs __iomem	*r = ep->dev->regs;
211			u32				tmp;
212
213			tmp = readl(&r->EPxSingle);
214			tmp &= ~(0x11 << ep->num);
215			writel(tmp, &r->EPxSingle);
216
217			tmp = readl(&r->EPxBCS);
218			tmp &= ~(0x11 << ep->num);
219			writel(tmp, &r->EPxBCS);
220		}
221		/* reset dma in case we're still using it */
222		if (ep->dma) {
223			u32	master;
224
225			master = readl(&regs->dma_master) & MST_RW_BITS;
226			if (ep->num == UDC_MSTWR_ENDPOINT) {
227				master &= ~MST_W_BITS;
228				master |= MST_WR_RESET;
229			} else {
230				master &= ~MST_R_BITS;
231				master |= MST_RD_RESET;
232			}
233			writel(master, &regs->dma_master);
234		}
235	}
236
237	usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE);
238	ep->ep.desc = NULL;
239	ep->stopped = 1;
240	ep->irqs = 0;
241	ep->dma = 0;
242}
243
244static int goku_ep_disable(struct usb_ep *_ep)
245{
246	struct goku_ep	*ep;
247	struct goku_udc	*dev;
248	unsigned long	flags;
249
250	ep = container_of(_ep, struct goku_ep, ep);
251	if (!_ep || !ep->ep.desc)
252		return -ENODEV;
253	dev = ep->dev;
254	if (dev->ep0state == EP0_SUSPEND)
255		return -EBUSY;
256
257	VDBG(dev, "disable %s\n", _ep->name);
258
259	spin_lock_irqsave(&dev->lock, flags);
260	nuke(ep, -ESHUTDOWN);
261	ep_reset(dev->regs, ep);
262	spin_unlock_irqrestore(&dev->lock, flags);
263
264	return 0;
265}
266
267/*-------------------------------------------------------------------------*/
268
269static struct usb_request *
270goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
271{
272	struct goku_request	*req;
273
274	if (!_ep)
275		return NULL;
276	req = kzalloc(sizeof *req, gfp_flags);
277	if (!req)
278		return NULL;
279
280	INIT_LIST_HEAD(&req->queue);
281	return &req->req;
282}
283
284static void
285goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
286{
287	struct goku_request	*req;
288
289	if (!_ep || !_req)
290		return;
291
292	req = container_of(_req, struct goku_request, req);
293	WARN_ON(!list_empty(&req->queue));
294	kfree(req);
295}
296
297/*-------------------------------------------------------------------------*/
298
299static void
300done(struct goku_ep *ep, struct goku_request *req, int status)
301{
302	struct goku_udc		*dev;
303	unsigned		stopped = ep->stopped;
304
305	list_del_init(&req->queue);
306
307	if (likely(req->req.status == -EINPROGRESS))
308		req->req.status = status;
309	else
310		status = req->req.status;
311
312	dev = ep->dev;
313
314	if (ep->dma)
315		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
316
317#ifndef USB_TRACE
318	if (status && status != -ESHUTDOWN)
319#endif
320		VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
321			ep->ep.name, &req->req, status,
322			req->req.actual, req->req.length);
323
324	/* don't modify queue heads during completion callback */
325	ep->stopped = 1;
326	spin_unlock(&dev->lock);
327	usb_gadget_giveback_request(&ep->ep, &req->req);
328	spin_lock(&dev->lock);
329	ep->stopped = stopped;
330}
331
332/*-------------------------------------------------------------------------*/
333
334static inline int
335write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
336{
337	unsigned	length, count;
338
339	length = min(req->req.length - req->req.actual, max);
340	req->req.actual += length;
341
342	count = length;
343	while (likely(count--))
344		writel(*buf++, fifo);
345	return length;
346}
347
348// return:  0 = still running, 1 = completed, negative = errno
349static int write_fifo(struct goku_ep *ep, struct goku_request *req)
350{
351	struct goku_udc	*dev = ep->dev;
352	u32		tmp;
353	u8		*buf;
354	unsigned	count;
355	int		is_last;
356
357	tmp = readl(&dev->regs->DataSet);
358	buf = req->req.buf + req->req.actual;
359	prefetch(buf);
360
361	dev = ep->dev;
362	if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
363		return -EL2HLT;
364
365	/* NOTE:  just single-buffered PIO-IN for now.  */
366	if (unlikely((tmp & DATASET_A(ep->num)) != 0))
367		return 0;
368
369	/* clear our "packet available" irq */
370	if (ep->num != 0)
371		writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
372
373	count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
374
375	/* last packet often short (sometimes a zlp, especially on ep0) */
376	if (unlikely(count != ep->ep.maxpacket)) {
377		writel(~(1<<ep->num), &dev->regs->EOP);
378		if (ep->num == 0) {
379			dev->ep[0].stopped = 1;
380			dev->ep0state = EP0_STATUS;
381		}
382		is_last = 1;
383	} else {
384		if (likely(req->req.length != req->req.actual)
385				|| req->req.zero)
386			is_last = 0;
387		else
388			is_last = 1;
389	}
390#if 0		/* printk seemed to trash is_last...*/
391//#ifdef USB_TRACE
392	VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
393		ep->ep.name, count, is_last ? "/last" : "",
394		req->req.length - req->req.actual, req);
395#endif
396
397	/* requests complete when all IN data is in the FIFO,
398	 * or sometimes later, if a zlp was needed.
399	 */
400	if (is_last) {
401		done(ep, req, 0);
402		return 1;
403	}
404
405	return 0;
406}
407
408static int read_fifo(struct goku_ep *ep, struct goku_request *req)
409{
410	struct goku_udc_regs __iomem	*regs;
411	u32				size, set;
412	u8				*buf;
413	unsigned			bufferspace, is_short, dbuff;
414
415	regs = ep->dev->regs;
416top:
417	buf = req->req.buf + req->req.actual;
418	prefetchw(buf);
419
420	if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
421		return -EL2HLT;
422
423	dbuff = (ep->num == 1 || ep->num == 2);
424	do {
425		/* ack dataset irq matching the status we'll handle */
426		if (ep->num != 0)
427			writel(~INT_EPxDATASET(ep->num), &regs->int_status);
428
429		set = readl(&regs->DataSet) & DATASET_AB(ep->num);
430		size = readl(&regs->EPxSizeLA[ep->num]);
431		bufferspace = req->req.length - req->req.actual;
432
433		/* usually do nothing without an OUT packet */
434		if (likely(ep->num != 0 || bufferspace != 0)) {
435			if (unlikely(set == 0))
436				break;
437			/* use ep1/ep2 double-buffering for OUT */
438			if (!(size & PACKET_ACTIVE))
439				size = readl(&regs->EPxSizeLB[ep->num]);
440			if (!(size & PACKET_ACTIVE))	/* "can't happen" */
441				break;
442			size &= DATASIZE;	/* EPxSizeH == 0 */
443
444		/* ep0out no-out-data case for set_config, etc */
445		} else
446			size = 0;
447
448		/* read all bytes from this packet */
449		req->req.actual += size;
450		is_short = (size < ep->ep.maxpacket);
451#ifdef USB_TRACE
452		VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
453			ep->ep.name, size, is_short ? "/S" : "",
454			req, req->req.actual, req->req.length);
455#endif
456		while (likely(size-- != 0)) {
457			u8	byte = (u8) readl(ep->reg_fifo);
458
459			if (unlikely(bufferspace == 0)) {
460				/* this happens when the driver's buffer
461				 * is smaller than what the host sent.
462				 * discard the extra data in this packet.
463				 */
464				if (req->req.status != -EOVERFLOW)
465					DBG(ep->dev, "%s overflow %u\n",
466						ep->ep.name, size);
467				req->req.status = -EOVERFLOW;
468			} else {
469				*buf++ = byte;
470				bufferspace--;
471			}
472		}
473
474		/* completion */
475		if (unlikely(is_short || req->req.actual == req->req.length)) {
476			if (unlikely(ep->num == 0)) {
477				/* non-control endpoints now usable? */
478				if (ep->dev->req_config)
479					writel(ep->dev->configured
480							? USBSTATE_CONFIGURED
481							: 0,
482						&regs->UsbState);
483				/* ep0out status stage */
484				writel(~(1<<0), &regs->EOP);
485				ep->stopped = 1;
486				ep->dev->ep0state = EP0_STATUS;
487			}
488			done(ep, req, 0);
489
490			/* empty the second buffer asap */
491			if (dbuff && !list_empty(&ep->queue)) {
492				req = list_entry(ep->queue.next,
493						struct goku_request, queue);
494				goto top;
495			}
496			return 1;
497		}
498	} while (dbuff);
499	return 0;
500}
501
502static inline void
503pio_irq_enable(struct goku_udc *dev,
504		struct goku_udc_regs __iomem *regs, int epnum)
505{
506	dev->int_enable |= INT_EPxDATASET (epnum);
507	writel(dev->int_enable, &regs->int_enable);
508	/* write may still be posted */
509}
510
511static inline void
512pio_irq_disable(struct goku_udc *dev,
513		struct goku_udc_regs __iomem *regs, int epnum)
514{
515	dev->int_enable &= ~INT_EPxDATASET (epnum);
516	writel(dev->int_enable, &regs->int_enable);
517	/* write may still be posted */
518}
519
520static inline void
521pio_advance(struct goku_ep *ep)
522{
523	struct goku_request	*req;
524
525	if (unlikely(list_empty (&ep->queue)))
526		return;
527	req = list_entry(ep->queue.next, struct goku_request, queue);
528	(ep->is_in ? write_fifo : read_fifo)(ep, req);
529}
530
531
532/*-------------------------------------------------------------------------*/
533
534// return:  0 = q running, 1 = q stopped, negative = errno
535static int start_dma(struct goku_ep *ep, struct goku_request *req)
536{
537	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
538	u32				master;
539	u32				start = req->req.dma;
540	u32				end = start + req->req.length - 1;
541
542	master = readl(&regs->dma_master) & MST_RW_BITS;
543
544	/* re-init the bits affecting IN dma; careful with zlps */
545	if (likely(ep->is_in)) {
546		if (unlikely(master & MST_RD_ENA)) {
547			DBG (ep->dev, "start, IN active dma %03x!!\n",
548				master);
549//			return -EL2HLT;
550		}
551		writel(end, &regs->in_dma_end);
552		writel(start, &regs->in_dma_start);
553
554		master &= ~MST_R_BITS;
555		if (unlikely(req->req.length == 0))
556			master = MST_RD_ENA | MST_RD_EOPB;
557		else if ((req->req.length % ep->ep.maxpacket) != 0
558					|| req->req.zero)
559			master = MST_RD_ENA | MST_EOPB_ENA;
560		else
561			master = MST_RD_ENA | MST_EOPB_DIS;
562
563		ep->dev->int_enable |= INT_MSTRDEND;
564
565	/* Goku DMA-OUT merges short packets, which plays poorly with
566	 * protocols where short packets mark the transfer boundaries.
567	 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
568	 * ending transfers after 3 SOFs; we don't turn it on.
569	 */
570	} else {
571		if (unlikely(master & MST_WR_ENA)) {
572			DBG (ep->dev, "start, OUT active dma %03x!!\n",
573				master);
574//			return -EL2HLT;
575		}
576		writel(end, &regs->out_dma_end);
577		writel(start, &regs->out_dma_start);
578
579		master &= ~MST_W_BITS;
580		master |= MST_WR_ENA | MST_TIMEOUT_DIS;
581
582		ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
583	}
584
585	writel(master, &regs->dma_master);
586	writel(ep->dev->int_enable, &regs->int_enable);
587	return 0;
588}
589
590static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
591{
592	struct goku_request		*req;
593	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
594	u32				master;
595
596	master = readl(&regs->dma_master);
597
598	if (unlikely(list_empty(&ep->queue))) {
599stop:
600		if (ep->is_in)
601			dev->int_enable &= ~INT_MSTRDEND;
602		else
603			dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
604		writel(dev->int_enable, &regs->int_enable);
605		return;
606	}
607	req = list_entry(ep->queue.next, struct goku_request, queue);
608
609	/* normal hw dma completion (not abort) */
610	if (likely(ep->is_in)) {
611		if (unlikely(master & MST_RD_ENA))
612			return;
613		req->req.actual = readl(&regs->in_dma_current);
614	} else {
615		if (unlikely(master & MST_WR_ENA))
616			return;
617
618		/* hardware merges short packets, and also hides packet
619		 * overruns.  a partial packet MAY be in the fifo here.
620		 */
621		req->req.actual = readl(&regs->out_dma_current);
622	}
623	req->req.actual -= req->req.dma;
624	req->req.actual++;
625
626#ifdef USB_TRACE
627	VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
628		ep->ep.name, ep->is_in ? "IN" : "OUT",
629		req->req.actual, req->req.length, req);
630#endif
631	done(ep, req, 0);
632	if (list_empty(&ep->queue))
633		goto stop;
634	req = list_entry(ep->queue.next, struct goku_request, queue);
635	(void) start_dma(ep, req);
636}
637
638static void abort_dma(struct goku_ep *ep, int status)
639{
640	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
641	struct goku_request		*req;
642	u32				curr, master;
643
644	/* NAK future host requests, hoping the implicit delay lets the
645	 * dma engine finish reading (or writing) its latest packet and
646	 * empty the dma buffer (up to 16 bytes).
647	 *
648	 * This avoids needing to clean up a partial packet in the fifo;
649	 * we can't do that for IN without side effects to HALT and TOGGLE.
650	 */
651	command(regs, COMMAND_FIFO_DISABLE, ep->num);
652	req = list_entry(ep->queue.next, struct goku_request, queue);
653	master = readl(&regs->dma_master) & MST_RW_BITS;
654
655	/* FIXME using these resets isn't usably documented. this may
656	 * not work unless it's followed by disabling the endpoint.
657	 *
658	 * FIXME the OUT reset path doesn't even behave consistently.
659	 */
660	if (ep->is_in) {
661		if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
662			goto finished;
663		curr = readl(&regs->in_dma_current);
664
665		writel(curr, &regs->in_dma_end);
666		writel(curr, &regs->in_dma_start);
667
668		master &= ~MST_R_BITS;
669		master |= MST_RD_RESET;
670		writel(master, &regs->dma_master);
671
672		if (readl(&regs->dma_master) & MST_RD_ENA)
673			DBG(ep->dev, "IN dma active after reset!\n");
674
675	} else {
676		if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
677			goto finished;
678		curr = readl(&regs->out_dma_current);
679
680		writel(curr, &regs->out_dma_end);
681		writel(curr, &regs->out_dma_start);
682
683		master &= ~MST_W_BITS;
684		master |= MST_WR_RESET;
685		writel(master, &regs->dma_master);
686
687		if (readl(&regs->dma_master) & MST_WR_ENA)
688			DBG(ep->dev, "OUT dma active after reset!\n");
689	}
690	req->req.actual = (curr - req->req.dma) + 1;
691	req->req.status = status;
692
693	VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
694		ep->is_in ? "IN" : "OUT",
695		req->req.actual, req->req.length);
696
697	command(regs, COMMAND_FIFO_ENABLE, ep->num);
698
699	return;
700
701finished:
702	/* dma already completed; no abort needed */
703	command(regs, COMMAND_FIFO_ENABLE, ep->num);
704	req->req.actual = req->req.length;
705	req->req.status = 0;
706}
707
708/*-------------------------------------------------------------------------*/
709
710static int
711goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
712{
713	struct goku_request	*req;
714	struct goku_ep		*ep;
715	struct goku_udc		*dev;
716	unsigned long		flags;
717	int			status;
718
719	/* always require a cpu-view buffer so pio works */
720	req = container_of(_req, struct goku_request, req);
721	if (unlikely(!_req || !_req->complete
722			|| !_req->buf || !list_empty(&req->queue)))
723		return -EINVAL;
724	ep = container_of(_ep, struct goku_ep, ep);
725	if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0)))
726		return -EINVAL;
727	dev = ep->dev;
728	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
729		return -ESHUTDOWN;
730
731	/* can't touch registers when suspended */
732	if (dev->ep0state == EP0_SUSPEND)
733		return -EBUSY;
734
735	/* set up dma mapping in case the caller didn't */
736	if (ep->dma) {
737		status = usb_gadget_map_request(&dev->gadget, &req->req,
738				ep->is_in);
739		if (status)
740			return status;
741	}
742
743#ifdef USB_TRACE
744	VDBG(dev, "%s queue req %p, len %u buf %p\n",
745			_ep->name, _req, _req->length, _req->buf);
746#endif
747
748	spin_lock_irqsave(&dev->lock, flags);
749
750	_req->status = -EINPROGRESS;
751	_req->actual = 0;
752
753	/* for ep0 IN without premature status, zlp is required and
754	 * writing EOP starts the status stage (OUT).
755	 */
756	if (unlikely(ep->num == 0 && ep->is_in))
757		_req->zero = 1;
758
759	/* kickstart this i/o queue? */
760	status = 0;
761	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
762		/* dma:  done after dma completion IRQ (or error)
763		 * pio:  done after last fifo operation
764		 */
765		if (ep->dma)
766			status = start_dma(ep, req);
767		else
768			status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
769
770		if (unlikely(status != 0)) {
771			if (status > 0)
772				status = 0;
773			req = NULL;
774		}
775
776	} /* else pio or dma irq handler advances the queue. */
777
778	if (likely(req != NULL))
779		list_add_tail(&req->queue, &ep->queue);
780
781	if (likely(!list_empty(&ep->queue))
782			&& likely(ep->num != 0)
783			&& !ep->dma
784			&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
785		pio_irq_enable(dev, dev->regs, ep->num);
786
787	spin_unlock_irqrestore(&dev->lock, flags);
788
789	/* pci writes may still be posted */
790	return status;
791}
792
793/* dequeue ALL requests */
794static void nuke(struct goku_ep *ep, int status)
795{
796	struct goku_request	*req;
797
798	ep->stopped = 1;
799	if (list_empty(&ep->queue))
800		return;
801	if (ep->dma)
802		abort_dma(ep, status);
803	while (!list_empty(&ep->queue)) {
804		req = list_entry(ep->queue.next, struct goku_request, queue);
805		done(ep, req, status);
806	}
807}
808
809/* dequeue JUST ONE request */
810static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
811{
812	struct goku_request	*req;
813	struct goku_ep		*ep;
814	struct goku_udc		*dev;
815	unsigned long		flags;
816
817	ep = container_of(_ep, struct goku_ep, ep);
818	if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
819		return -EINVAL;
820	dev = ep->dev;
821	if (!dev->driver)
822		return -ESHUTDOWN;
823
824	/* we can't touch (dma) registers when suspended */
825	if (dev->ep0state == EP0_SUSPEND)
826		return -EBUSY;
827
828	VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
829		ep->is_in ? "IN" : "OUT",
830		ep->dma ? "dma" : "pio",
831		_req);
832
833	spin_lock_irqsave(&dev->lock, flags);
834
835	/* make sure it's actually queued on this endpoint */
836	list_for_each_entry (req, &ep->queue, queue) {
837		if (&req->req == _req)
838			break;
839	}
840	if (&req->req != _req) {
841		spin_unlock_irqrestore (&dev->lock, flags);
842		return -EINVAL;
843	}
844
845	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
846		abort_dma(ep, -ECONNRESET);
847		done(ep, req, -ECONNRESET);
848		dma_advance(dev, ep);
849	} else if (!list_empty(&req->queue))
850		done(ep, req, -ECONNRESET);
851	else
852		req = NULL;
853	spin_unlock_irqrestore(&dev->lock, flags);
854
855	return req ? 0 : -EOPNOTSUPP;
856}
857
858/*-------------------------------------------------------------------------*/
859
860static void goku_clear_halt(struct goku_ep *ep)
861{
862	// assert (ep->num !=0)
863	VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
864	command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
865	command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
866	if (ep->stopped) {
867		ep->stopped = 0;
868		if (ep->dma) {
869			struct goku_request	*req;
870
871			if (list_empty(&ep->queue))
872				return;
873			req = list_entry(ep->queue.next, struct goku_request,
874						queue);
875			(void) start_dma(ep, req);
876		} else
877			pio_advance(ep);
878	}
879}
880
881static int goku_set_halt(struct usb_ep *_ep, int value)
882{
883	struct goku_ep	*ep;
884	unsigned long	flags;
885	int		retval = 0;
886
887	if (!_ep)
888		return -ENODEV;
889	ep = container_of (_ep, struct goku_ep, ep);
890
891	if (ep->num == 0) {
892		if (value) {
893			ep->dev->ep0state = EP0_STALL;
894			ep->dev->ep[0].stopped = 1;
895		} else
896			return -EINVAL;
897
898	/* don't change EPxSTATUS_EP_INVALID to READY */
899	} else if (!ep->ep.desc) {
900		DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
901		return -EINVAL;
902	}
903
904	spin_lock_irqsave(&ep->dev->lock, flags);
905	if (!list_empty(&ep->queue))
906		retval = -EAGAIN;
907	else if (ep->is_in && value
908			/* data in (either) packet buffer? */
909			&& (readl(&ep->dev->regs->DataSet)
910					& DATASET_AB(ep->num)))
911		retval = -EAGAIN;
912	else if (!value)
913		goku_clear_halt(ep);
914	else {
915		ep->stopped = 1;
916		VDBG(ep->dev, "%s set halt\n", ep->ep.name);
917		command(ep->dev->regs, COMMAND_STALL, ep->num);
918		readl(ep->reg_status);
919	}
920	spin_unlock_irqrestore(&ep->dev->lock, flags);
921	return retval;
922}
923
924static int goku_fifo_status(struct usb_ep *_ep)
925{
926	struct goku_ep			*ep;
927	struct goku_udc_regs __iomem	*regs;
928	u32				size;
929
930	if (!_ep)
931		return -ENODEV;
932	ep = container_of(_ep, struct goku_ep, ep);
933
934	/* size is only reported sanely for OUT */
935	if (ep->is_in)
936		return -EOPNOTSUPP;
937
938	/* ignores 16-byte dma buffer; SizeH == 0 */
939	regs = ep->dev->regs;
940	size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
941	size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
942	VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
943	return size;
944}
945
946static void goku_fifo_flush(struct usb_ep *_ep)
947{
948	struct goku_ep			*ep;
949	struct goku_udc_regs __iomem	*regs;
950	u32				size;
951
952	if (!_ep)
953		return;
954	ep = container_of(_ep, struct goku_ep, ep);
955	VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
956
957	/* don't change EPxSTATUS_EP_INVALID to READY */
958	if (!ep->ep.desc && ep->num != 0) {
959		DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
960		return;
961	}
962
963	regs = ep->dev->regs;
964	size = readl(&regs->EPxSizeLA[ep->num]);
965	size &= DATASIZE;
966
967	/* Non-desirable behavior:  FIFO_CLEAR also clears the
968	 * endpoint halt feature.  For OUT, we _could_ just read
969	 * the bytes out (PIO, if !ep->dma); for in, no choice.
970	 */
971	if (size)
972		command(regs, COMMAND_FIFO_CLEAR, ep->num);
973}
974
975static const struct usb_ep_ops goku_ep_ops = {
976	.enable		= goku_ep_enable,
977	.disable	= goku_ep_disable,
978
979	.alloc_request	= goku_alloc_request,
980	.free_request	= goku_free_request,
981
982	.queue		= goku_queue,
983	.dequeue	= goku_dequeue,
984
985	.set_halt	= goku_set_halt,
986	.fifo_status	= goku_fifo_status,
987	.fifo_flush	= goku_fifo_flush,
988};
989
990/*-------------------------------------------------------------------------*/
991
992static int goku_get_frame(struct usb_gadget *_gadget)
993{
994	return -EOPNOTSUPP;
995}
996
997static struct usb_ep *goku_match_ep(struct usb_gadget *g,
998		struct usb_endpoint_descriptor *desc,
999		struct usb_ss_ep_comp_descriptor *ep_comp)
1000{
1001	struct goku_udc	*dev = to_goku_udc(g);
1002	struct usb_ep *ep;
1003
1004	switch (usb_endpoint_type(desc)) {
1005	case USB_ENDPOINT_XFER_INT:
1006		/* single buffering is enough */
1007		ep = &dev->ep[3].ep;
1008		if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1009			return ep;
1010		break;
1011	case USB_ENDPOINT_XFER_BULK:
1012		if (usb_endpoint_dir_in(desc)) {
1013			/* DMA may be available */
1014			ep = &dev->ep[2].ep;
1015			if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1016				return ep;
1017		}
1018		break;
1019	default:
1020		/* nothing */ ;
1021	}
1022
1023	return NULL;
1024}
1025
1026static int goku_udc_start(struct usb_gadget *g,
1027		struct usb_gadget_driver *driver);
1028static int goku_udc_stop(struct usb_gadget *g);
1029
1030static const struct usb_gadget_ops goku_ops = {
1031	.get_frame	= goku_get_frame,
1032	.udc_start	= goku_udc_start,
1033	.udc_stop	= goku_udc_stop,
1034	.match_ep	= goku_match_ep,
1035	// no remote wakeup
1036	// not selfpowered
1037};
1038
1039/*-------------------------------------------------------------------------*/
1040
1041static inline const char *dmastr(void)
1042{
1043	if (use_dma == 0)
1044		return "(dma disabled)";
1045	else if (use_dma == 2)
1046		return "(dma IN and OUT)";
1047	else
1048		return "(dma IN)";
1049}
1050
1051#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1052
1053static const char proc_node_name [] = "driver/udc";
1054
1055#define FOURBITS "%s%s%s%s"
1056#define EIGHTBITS FOURBITS FOURBITS
1057
1058static void dump_intmask(struct seq_file *m, const char *label, u32 mask)
1059{
1060	/* int_status is the same format ... */
1061	seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1062		   label, mask,
1063		   (mask & INT_PWRDETECT) ? " power" : "",
1064		   (mask & INT_SYSERROR) ? " sys" : "",
1065		   (mask & INT_MSTRDEND) ? " in-dma" : "",
1066		   (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1067
1068		   (mask & INT_MSTWREND) ? " out-dma" : "",
1069		   (mask & INT_MSTWRSET) ? " wrset" : "",
1070		   (mask & INT_ERR) ? " err" : "",
1071		   (mask & INT_SOF) ? " sof" : "",
1072
1073		   (mask & INT_EP3NAK) ? " ep3nak" : "",
1074		   (mask & INT_EP2NAK) ? " ep2nak" : "",
1075		   (mask & INT_EP1NAK) ? " ep1nak" : "",
1076		   (mask & INT_EP3DATASET) ? " ep3" : "",
1077
1078		   (mask & INT_EP2DATASET) ? " ep2" : "",
1079		   (mask & INT_EP1DATASET) ? " ep1" : "",
1080		   (mask & INT_STATUSNAK) ? " ep0snak" : "",
1081		   (mask & INT_STATUS) ? " ep0status" : "",
1082
1083		   (mask & INT_SETUP) ? " setup" : "",
1084		   (mask & INT_ENDPOINT0) ? " ep0" : "",
1085		   (mask & INT_USBRESET) ? " reset" : "",
1086		   (mask & INT_SUSPEND) ? " suspend" : "");
1087}
1088
1089static const char *udc_ep_state(enum ep0state state)
1090{
1091	switch (state) {
1092	case EP0_DISCONNECT:
1093		return "ep0_disconnect";
1094	case EP0_IDLE:
1095		return "ep0_idle";
1096	case EP0_IN:
1097		return "ep0_in";
1098	case EP0_OUT:
1099		return "ep0_out";
1100	case EP0_STATUS:
1101		return "ep0_status";
1102	case EP0_STALL:
1103		return "ep0_stall";
1104	case EP0_SUSPEND:
1105		return "ep0_suspend";
1106	}
1107
1108	return "ep0_?";
1109}
1110
1111static const char *udc_ep_status(u32 status)
1112{
1113	switch (status & EPxSTATUS_EP_MASK) {
1114	case EPxSTATUS_EP_READY:
1115		return "ready";
1116	case EPxSTATUS_EP_DATAIN:
1117		return "packet";
1118	case EPxSTATUS_EP_FULL:
1119		return "full";
1120	case EPxSTATUS_EP_TX_ERR:	/* host will retry */
1121		return "tx_err";
1122	case EPxSTATUS_EP_RX_ERR:
1123		return "rx_err";
1124	case EPxSTATUS_EP_BUSY:		/* ep0 only */
1125		return "busy";
1126	case EPxSTATUS_EP_STALL:
1127		return "stall";
1128	case EPxSTATUS_EP_INVALID:	/* these "can't happen" */
1129		return "invalid";
1130	}
1131
1132	return "?";
1133}
1134
1135static int udc_proc_read(struct seq_file *m, void *v)
1136{
1137	struct goku_udc			*dev = m->private;
1138	struct goku_udc_regs __iomem	*regs = dev->regs;
1139	unsigned long			flags;
1140	int				i, is_usb_connected;
1141	u32				tmp;
1142
1143	local_irq_save(flags);
1144
1145	/* basic device status */
1146	tmp = readl(&regs->power_detect);
1147	is_usb_connected = tmp & PW_DETECT;
1148	seq_printf(m,
1149		   "%s - %s\n"
1150		   "%s version: %s %s\n"
1151		   "Gadget driver: %s\n"
1152		   "Host %s, %s\n"
1153		   "\n",
1154		   pci_name(dev->pdev), driver_desc,
1155		   driver_name, DRIVER_VERSION, dmastr(),
1156		   dev->driver ? dev->driver->driver.name : "(none)",
1157		   is_usb_connected
1158			   ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1159			   : "disconnected",
1160		   udc_ep_state(dev->ep0state));
1161
1162	dump_intmask(m, "int_status", readl(&regs->int_status));
1163	dump_intmask(m, "int_enable", readl(&regs->int_enable));
1164
1165	if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1166		goto done;
1167
1168	/* registers for (active) device and ep0 */
1169	seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n",
1170		   dev->irqs, readl(&regs->DataSet),
1171		   readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1172		   readl(&regs->UsbState),
1173		   readl(&regs->address));
1174	if (seq_has_overflowed(m))
1175		goto done;
1176
1177	tmp = readl(&regs->dma_master);
1178	seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n",
1179		   tmp,
1180		   (tmp & MST_EOPB_DIS) ? " eopb-" : "",
1181		   (tmp & MST_EOPB_ENA) ? " eopb+" : "",
1182		   (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1183		   (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1184
1185		   (tmp & MST_RD_EOPB) ? " eopb" : "",
1186		   (tmp & MST_RD_RESET) ? " in_reset" : "",
1187		   (tmp & MST_WR_RESET) ? " out_reset" : "",
1188		   (tmp & MST_RD_ENA) ? " IN" : "",
1189
1190		   (tmp & MST_WR_ENA) ? " OUT" : "",
1191		   (tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in");
1192	if (seq_has_overflowed(m))
1193		goto done;
1194
1195	/* dump endpoint queues */
1196	for (i = 0; i < 4; i++) {
1197		struct goku_ep		*ep = &dev->ep [i];
1198		struct goku_request	*req;
1199
1200		if (i && !ep->ep.desc)
1201			continue;
1202
1203		tmp = readl(ep->reg_status);
1204		seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n",
1205			   ep->ep.name,
1206			   ep->is_in ? "in" : "out",
1207			   ep->ep.maxpacket,
1208			   ep->dma ? "dma" : "pio",
1209			   ep->irqs,
1210			   tmp, udc_ep_status(tmp),
1211			   (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1212			   (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1213			   (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1214			   (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : "");
1215		if (seq_has_overflowed(m))
1216			goto done;
1217
1218		if (list_empty(&ep->queue)) {
1219			seq_puts(m, "\t(nothing queued)\n");
1220			if (seq_has_overflowed(m))
1221				goto done;
1222			continue;
1223		}
1224		list_for_each_entry(req, &ep->queue, queue) {
1225			if (ep->dma && req->queue.prev == &ep->queue) {
1226				if (i == UDC_MSTRD_ENDPOINT)
1227					tmp = readl(&regs->in_dma_current);
1228				else
1229					tmp = readl(&regs->out_dma_current);
1230				tmp -= req->req.dma;
1231				tmp++;
1232			} else
1233				tmp = req->req.actual;
1234
1235			seq_printf(m, "\treq %p len %u/%u buf %p\n",
1236				   &req->req, tmp, req->req.length,
1237				   req->req.buf);
1238			if (seq_has_overflowed(m))
1239				goto done;
1240		}
1241	}
1242
1243done:
1244	local_irq_restore(flags);
1245	return 0;
1246}
1247#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
1248
1249/*-------------------------------------------------------------------------*/
1250
1251static void udc_reinit (struct goku_udc *dev)
1252{
1253	static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1254
1255	unsigned i;
1256
1257	INIT_LIST_HEAD (&dev->gadget.ep_list);
1258	dev->gadget.ep0 = &dev->ep [0].ep;
1259	dev->gadget.speed = USB_SPEED_UNKNOWN;
1260	dev->ep0state = EP0_DISCONNECT;
1261	dev->irqs = 0;
1262
1263	for (i = 0; i < 4; i++) {
1264		struct goku_ep	*ep = &dev->ep[i];
1265
1266		ep->num = i;
1267		ep->ep.name = names[i];
1268		ep->reg_fifo = &dev->regs->ep_fifo [i];
1269		ep->reg_status = &dev->regs->ep_status [i];
1270		ep->reg_mode = &dev->regs->ep_mode[i];
1271
1272		ep->ep.ops = &goku_ep_ops;
1273		list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1274		ep->dev = dev;
1275		INIT_LIST_HEAD (&ep->queue);
1276
1277		ep_reset(NULL, ep);
1278
1279		if (i == 0)
1280			ep->ep.caps.type_control = true;
1281		else
1282			ep->ep.caps.type_bulk = true;
1283
1284		ep->ep.caps.dir_in = true;
1285		ep->ep.caps.dir_out = true;
1286	}
1287
1288	dev->ep[0].reg_mode = NULL;
1289	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE);
1290	list_del_init (&dev->ep[0].ep.ep_list);
1291}
1292
1293static void udc_reset(struct goku_udc *dev)
1294{
1295	struct goku_udc_regs __iomem	*regs = dev->regs;
1296
1297	writel(0, &regs->power_detect);
1298	writel(0, &regs->int_enable);
1299	readl(&regs->int_enable);
1300	dev->int_enable = 0;
1301
1302	/* deassert reset, leave USB D+ at hi-Z (no pullup)
1303	 * don't let INT_PWRDETECT sequence begin
1304	 */
1305	udelay(250);
1306	writel(PW_RESETB, &regs->power_detect);
1307	readl(&regs->int_enable);
1308}
1309
1310static void ep0_start(struct goku_udc *dev)
1311{
1312	struct goku_udc_regs __iomem	*regs = dev->regs;
1313	unsigned			i;
1314
1315	VDBG(dev, "%s\n", __func__);
1316
1317	udc_reset(dev);
1318	udc_reinit (dev);
1319	//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1320
1321	/* hw handles set_address, set_feature, get_status; maybe more */
1322	writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1323		| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1324		| G_REQMODE_GET_DESC
1325		| G_REQMODE_CLEAR_FEAT
1326		, &regs->reqmode);
1327
1328	for (i = 0; i < 4; i++)
1329		dev->ep[i].irqs = 0;
1330
1331	/* can't modify descriptors after writing UsbReady */
1332	for (i = 0; i < DESC_LEN; i++)
1333		writel(0, &regs->descriptors[i]);
1334	writel(0, &regs->UsbReady);
1335
1336	/* expect ep0 requests when the host drops reset */
1337	writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1338	dev->int_enable = INT_DEVWIDE | INT_EP0;
1339	writel(dev->int_enable, &dev->regs->int_enable);
1340	readl(&regs->int_enable);
1341	dev->gadget.speed = USB_SPEED_FULL;
1342	dev->ep0state = EP0_IDLE;
1343}
1344
1345static void udc_enable(struct goku_udc *dev)
1346{
1347	/* start enumeration now, or after power detect irq */
1348	if (readl(&dev->regs->power_detect) & PW_DETECT)
1349		ep0_start(dev);
1350	else {
1351		DBG(dev, "%s\n", __func__);
1352		dev->int_enable = INT_PWRDETECT;
1353		writel(dev->int_enable, &dev->regs->int_enable);
1354	}
1355}
1356
1357/*-------------------------------------------------------------------------*/
1358
1359/* keeping it simple:
1360 * - one bus driver, initted first;
1361 * - one function driver, initted second
1362 */
1363
1364/* when a driver is successfully registered, it will receive
1365 * control requests including set_configuration(), which enables
1366 * non-control requests.  then usb traffic follows until a
1367 * disconnect is reported.  then a host may connect again, or
1368 * the driver might get unbound.
1369 */
1370static int goku_udc_start(struct usb_gadget *g,
1371		struct usb_gadget_driver *driver)
1372{
1373	struct goku_udc	*dev = to_goku_udc(g);
1374
1375	/* hook up the driver */
1376	driver->driver.bus = NULL;
1377	dev->driver = driver;
1378
1379	/*
1380	 * then enable host detection and ep0; and we're ready
1381	 * for set_configuration as well as eventual disconnect.
1382	 */
1383	udc_enable(dev);
1384
1385	return 0;
1386}
1387
1388static void stop_activity(struct goku_udc *dev)
1389{
1390	unsigned	i;
1391
1392	DBG (dev, "%s\n", __func__);
1393
1394	/* disconnect gadget driver after quiesceing hw and the driver */
1395	udc_reset (dev);
1396	for (i = 0; i < 4; i++)
1397		nuke(&dev->ep [i], -ESHUTDOWN);
1398
1399	if (dev->driver)
1400		udc_enable(dev);
1401}
1402
1403static int goku_udc_stop(struct usb_gadget *g)
1404{
1405	struct goku_udc	*dev = to_goku_udc(g);
1406	unsigned long	flags;
1407
1408	spin_lock_irqsave(&dev->lock, flags);
1409	dev->driver = NULL;
1410	stop_activity(dev);
1411	spin_unlock_irqrestore(&dev->lock, flags);
1412
1413	return 0;
1414}
1415
1416/*-------------------------------------------------------------------------*/
1417
1418static void ep0_setup(struct goku_udc *dev)
1419{
1420	struct goku_udc_regs __iomem	*regs = dev->regs;
1421	struct usb_ctrlrequest		ctrl;
1422	int				tmp;
1423
1424	/* read SETUP packet and enter DATA stage */
1425	ctrl.bRequestType = readl(&regs->bRequestType);
1426	ctrl.bRequest = readl(&regs->bRequest);
1427	ctrl.wValue  = cpu_to_le16((readl(&regs->wValueH)  << 8)
1428					| readl(&regs->wValueL));
1429	ctrl.wIndex  = cpu_to_le16((readl(&regs->wIndexH)  << 8)
1430					| readl(&regs->wIndexL));
1431	ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
1432					| readl(&regs->wLengthL));
1433	writel(0, &regs->SetupRecv);
1434
1435	nuke(&dev->ep[0], 0);
1436	dev->ep[0].stopped = 0;
1437	if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1438		dev->ep[0].is_in = 1;
1439		dev->ep0state = EP0_IN;
1440		/* detect early status stages */
1441		writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1442	} else {
1443		dev->ep[0].is_in = 0;
1444		dev->ep0state = EP0_OUT;
1445
1446		/* NOTE:  CLEAR_FEATURE is done in software so that we can
1447		 * synchronize transfer restarts after bulk IN stalls.  data
1448		 * won't even enter the fifo until the halt is cleared.
1449		 */
1450		switch (ctrl.bRequest) {
1451		case USB_REQ_CLEAR_FEATURE:
1452			switch (ctrl.bRequestType) {
1453			case USB_RECIP_ENDPOINT:
1454				tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
1455				/* active endpoint */
1456				if (tmp > 3 ||
1457				    (!dev->ep[tmp].ep.desc && tmp != 0))
1458					goto stall;
1459				if (ctrl.wIndex & cpu_to_le16(
1460						USB_DIR_IN)) {
1461					if (!dev->ep[tmp].is_in)
1462						goto stall;
1463				} else {
1464					if (dev->ep[tmp].is_in)
1465						goto stall;
1466				}
1467				if (ctrl.wValue != cpu_to_le16(
1468						USB_ENDPOINT_HALT))
1469					goto stall;
1470				if (tmp)
1471					goku_clear_halt(&dev->ep[tmp]);
1472succeed:
1473				/* start ep0out status stage */
1474				writel(~(1<<0), &regs->EOP);
1475				dev->ep[0].stopped = 1;
1476				dev->ep0state = EP0_STATUS;
1477				return;
1478			case USB_RECIP_DEVICE:
1479				/* device remote wakeup: always clear */
1480				if (ctrl.wValue != cpu_to_le16(1))
1481					goto stall;
1482				VDBG(dev, "clear dev remote wakeup\n");
1483				goto succeed;
1484			case USB_RECIP_INTERFACE:
1485				goto stall;
1486			default:		/* pass to gadget driver */
1487				break;
1488			}
1489			break;
1490		default:
1491			break;
1492		}
1493	}
1494
1495#ifdef USB_TRACE
1496	VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1497		ctrl.bRequestType, ctrl.bRequest,
1498		le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
1499		le16_to_cpu(ctrl.wLength));
1500#endif
1501
1502	/* hw wants to know when we're configured (or not) */
1503	dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1504				&& ctrl.bRequestType == USB_RECIP_DEVICE);
1505	if (unlikely(dev->req_config))
1506		dev->configured = (ctrl.wValue != cpu_to_le16(0));
1507
1508	/* delegate everything to the gadget driver.
1509	 * it may respond after this irq handler returns.
1510	 */
1511	spin_unlock (&dev->lock);
1512	tmp = dev->driver->setup(&dev->gadget, &ctrl);
1513	spin_lock (&dev->lock);
1514	if (unlikely(tmp < 0)) {
1515stall:
1516#ifdef USB_TRACE
1517		VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1518				ctrl.bRequestType, ctrl.bRequest, tmp);
1519#endif
1520		command(regs, COMMAND_STALL, 0);
1521		dev->ep[0].stopped = 1;
1522		dev->ep0state = EP0_STALL;
1523	}
1524
1525	/* expect at least one data or status stage irq */
1526}
1527
1528#define ACK(irqbit) { \
1529		stat &= ~irqbit; \
1530		writel(~irqbit, &regs->int_status); \
1531		handled = 1; \
1532		}
1533
1534static irqreturn_t goku_irq(int irq, void *_dev)
1535{
1536	struct goku_udc			*dev = _dev;
1537	struct goku_udc_regs __iomem	*regs = dev->regs;
1538	struct goku_ep			*ep;
1539	u32				stat, handled = 0;
1540	unsigned			i, rescans = 5;
1541
1542	spin_lock(&dev->lock);
1543
1544rescan:
1545	stat = readl(&regs->int_status) & dev->int_enable;
1546        if (!stat)
1547		goto done;
1548	dev->irqs++;
1549
1550	/* device-wide irqs */
1551	if (unlikely(stat & INT_DEVWIDE)) {
1552		if (stat & INT_SYSERROR) {
1553			ERROR(dev, "system error\n");
1554			stop_activity(dev);
1555			stat = 0;
1556			handled = 1;
1557			// FIXME have a neater way to prevent re-enumeration
1558			dev->driver = NULL;
1559			goto done;
1560		}
1561		if (stat & INT_PWRDETECT) {
1562			writel(~stat, &regs->int_status);
1563			if (readl(&dev->regs->power_detect) & PW_DETECT) {
1564				VDBG(dev, "connect\n");
1565				ep0_start(dev);
1566			} else {
1567				DBG(dev, "disconnect\n");
1568				if (dev->gadget.speed == USB_SPEED_FULL)
1569					stop_activity(dev);
1570				dev->ep0state = EP0_DISCONNECT;
1571				dev->int_enable = INT_DEVWIDE;
1572				writel(dev->int_enable, &dev->regs->int_enable);
1573			}
1574			stat = 0;
1575			handled = 1;
1576			goto done;
1577		}
1578		if (stat & INT_SUSPEND) {
1579			ACK(INT_SUSPEND);
1580			if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1581				switch (dev->ep0state) {
1582				case EP0_DISCONNECT:
1583				case EP0_SUSPEND:
1584					goto pm_next;
1585				default:
1586					break;
1587				}
1588				DBG(dev, "USB suspend\n");
1589				dev->ep0state = EP0_SUSPEND;
1590				if (dev->gadget.speed != USB_SPEED_UNKNOWN
1591						&& dev->driver
1592						&& dev->driver->suspend) {
1593					spin_unlock(&dev->lock);
1594					dev->driver->suspend(&dev->gadget);
1595					spin_lock(&dev->lock);
1596				}
1597			} else {
1598				if (dev->ep0state != EP0_SUSPEND) {
1599					DBG(dev, "bogus USB resume %d\n",
1600						dev->ep0state);
1601					goto pm_next;
1602				}
1603				DBG(dev, "USB resume\n");
1604				dev->ep0state = EP0_IDLE;
1605				if (dev->gadget.speed != USB_SPEED_UNKNOWN
1606						&& dev->driver
1607						&& dev->driver->resume) {
1608					spin_unlock(&dev->lock);
1609					dev->driver->resume(&dev->gadget);
1610					spin_lock(&dev->lock);
1611				}
1612			}
1613		}
1614pm_next:
1615		if (stat & INT_USBRESET) {		/* hub reset done */
1616			ACK(INT_USBRESET);
1617			INFO(dev, "USB reset done, gadget %s\n",
1618				dev->driver->driver.name);
1619		}
1620		// and INT_ERR on some endpoint's crc/bitstuff/... problem
1621	}
1622
1623	/* progress ep0 setup, data, or status stages.
1624	 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1625	 */
1626	if (stat & INT_SETUP) {
1627		ACK(INT_SETUP);
1628		dev->ep[0].irqs++;
1629		ep0_setup(dev);
1630	}
1631        if (stat & INT_STATUSNAK) {
1632		ACK(INT_STATUSNAK|INT_ENDPOINT0);
1633		if (dev->ep0state == EP0_IN) {
1634			ep = &dev->ep[0];
1635			ep->irqs++;
1636			nuke(ep, 0);
1637			writel(~(1<<0), &regs->EOP);
1638			dev->ep0state = EP0_STATUS;
1639		}
1640	}
1641        if (stat & INT_ENDPOINT0) {
1642		ACK(INT_ENDPOINT0);
1643		ep = &dev->ep[0];
1644		ep->irqs++;
1645		pio_advance(ep);
1646        }
1647
1648	/* dma completion */
1649        if (stat & INT_MSTRDEND) {	/* IN */
1650		ACK(INT_MSTRDEND);
1651		ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1652		ep->irqs++;
1653		dma_advance(dev, ep);
1654        }
1655        if (stat & INT_MSTWREND) {	/* OUT */
1656		ACK(INT_MSTWREND);
1657		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1658		ep->irqs++;
1659		dma_advance(dev, ep);
1660        }
1661        if (stat & INT_MSTWRTMOUT) {	/* OUT */
1662		ACK(INT_MSTWRTMOUT);
1663		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1664		ep->irqs++;
1665		ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1666		// reset dma? then dma_advance()
1667        }
1668
1669	/* pio */
1670	for (i = 1; i < 4; i++) {
1671		u32		tmp = INT_EPxDATASET(i);
1672
1673		if (!(stat & tmp))
1674			continue;
1675		ep = &dev->ep[i];
1676		pio_advance(ep);
1677		if (list_empty (&ep->queue))
1678			pio_irq_disable(dev, regs, i);
1679		stat &= ~tmp;
1680		handled = 1;
1681		ep->irqs++;
1682	}
1683
1684	if (rescans--)
1685		goto rescan;
1686
1687done:
1688	(void)readl(&regs->int_enable);
1689	spin_unlock(&dev->lock);
1690	if (stat)
1691		DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1692				readl(&regs->int_status), dev->int_enable);
1693	return IRQ_RETVAL(handled);
1694}
1695
1696#undef ACK
1697
1698/*-------------------------------------------------------------------------*/
1699
1700static void gadget_release(struct device *_dev)
1701{
1702	struct goku_udc	*dev = dev_get_drvdata(_dev);
1703
1704	kfree(dev);
1705}
1706
1707/* tear down the binding between this driver and the pci device */
1708
1709static void goku_remove(struct pci_dev *pdev)
1710{
1711	struct goku_udc		*dev = pci_get_drvdata(pdev);
1712
1713	DBG(dev, "%s\n", __func__);
1714
1715	usb_del_gadget_udc(&dev->gadget);
1716
1717	BUG_ON(dev->driver);
1718
1719#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1720	remove_proc_entry(proc_node_name, NULL);
1721#endif
1722	if (dev->regs)
1723		udc_reset(dev);
1724	if (dev->got_irq)
1725		free_irq(pdev->irq, dev);
1726	if (dev->regs)
1727		iounmap(dev->regs);
1728	if (dev->got_region)
1729		release_mem_region(pci_resource_start (pdev, 0),
1730				pci_resource_len (pdev, 0));
1731	if (dev->enabled)
1732		pci_disable_device(pdev);
1733
1734	dev->regs = NULL;
1735
1736	INFO(dev, "unbind\n");
1737}
1738
1739/* wrap this driver around the specified pci device, but
1740 * don't respond over USB until a gadget driver binds to us.
1741 */
1742
1743static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1744{
1745	struct goku_udc		*dev = NULL;
1746	unsigned long		resource, len;
1747	void __iomem		*base = NULL;
1748	int			retval;
1749
1750	if (!pdev->irq) {
1751		printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1752		retval = -ENODEV;
1753		goto err;
1754	}
1755
1756	/* alloc, and start init */
1757	dev = kzalloc (sizeof *dev, GFP_KERNEL);
1758	if (!dev) {
1759		retval = -ENOMEM;
1760		goto err;
1761	}
1762
1763	pci_set_drvdata(pdev, dev);
1764	spin_lock_init(&dev->lock);
1765	dev->pdev = pdev;
1766	dev->gadget.ops = &goku_ops;
1767	dev->gadget.max_speed = USB_SPEED_FULL;
1768
1769	/* the "gadget" abstracts/virtualizes the controller */
1770	dev->gadget.name = driver_name;
1771
1772	/* now all the pci goodies ... */
1773	retval = pci_enable_device(pdev);
1774	if (retval < 0) {
1775		DBG(dev, "can't enable, %d\n", retval);
1776		goto err;
1777	}
1778	dev->enabled = 1;
1779
1780	resource = pci_resource_start(pdev, 0);
1781	len = pci_resource_len(pdev, 0);
1782	if (!request_mem_region(resource, len, driver_name)) {
1783		DBG(dev, "controller already in use\n");
1784		retval = -EBUSY;
1785		goto err;
1786	}
1787	dev->got_region = 1;
1788
1789	base = ioremap(resource, len);
1790	if (base == NULL) {
1791		DBG(dev, "can't map memory\n");
1792		retval = -EFAULT;
1793		goto err;
1794	}
1795	dev->regs = (struct goku_udc_regs __iomem *) base;
1796
1797	INFO(dev, "%s\n", driver_desc);
1798	INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1799	INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
1800
1801	/* init to known state, then setup irqs */
1802	udc_reset(dev);
1803	udc_reinit (dev);
1804	if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
1805			driver_name, dev) != 0) {
1806		DBG(dev, "request interrupt %d failed\n", pdev->irq);
1807		retval = -EBUSY;
1808		goto err;
1809	}
1810	dev->got_irq = 1;
1811	if (use_dma)
1812		pci_set_master(pdev);
1813
1814
1815#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1816	proc_create_single_data(proc_node_name, 0, NULL, udc_proc_read, dev);
1817#endif
1818
1819	retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
1820			gadget_release);
1821	if (retval)
1822		goto err;
1823
1824	return 0;
1825
1826err:
1827	if (dev)
1828		goku_remove (pdev);
1829	/* gadget_release is not registered yet, kfree explicitly */
1830	kfree(dev);
1831	return retval;
1832}
1833
1834
1835/*-------------------------------------------------------------------------*/
1836
1837static const struct pci_device_id pci_ids[] = { {
1838	.class =	PCI_CLASS_SERIAL_USB_DEVICE,
1839	.class_mask =	~0,
1840	.vendor =	0x102f,		/* Toshiba */
1841	.device =	0x0107,		/* this UDC */
1842	.subvendor =	PCI_ANY_ID,
1843	.subdevice =	PCI_ANY_ID,
1844
1845}, { /* end: all zeroes */ }
1846};
1847MODULE_DEVICE_TABLE (pci, pci_ids);
1848
1849static struct pci_driver goku_pci_driver = {
1850	.name =		driver_name,
1851	.id_table =	pci_ids,
1852
1853	.probe =	goku_probe,
1854	.remove =	goku_remove,
1855
1856	/* FIXME add power management support */
1857};
1858
1859module_pci_driver(goku_pci_driver);
1860