1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * driver/usb/gadget/fsl_qe_udc.c
4 *
5 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
6 *
7 * 	Xie Xiaobo <X.Xie@freescale.com>
8 * 	Li Yang <leoli@freescale.com>
9 * 	Based on bareboard code from Shlomi Gridish.
10 *
11 * Description:
12 * Freescle QE/CPM USB Pheripheral Controller Driver
13 * The controller can be found on MPC8360, MPC8272, and etc.
14 * MPC8360 Rev 1.1 may need QE mircocode update
15 */
16
17#undef USB_TRACE
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/ioport.h>
22#include <linux/types.h>
23#include <linux/errno.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/list.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/moduleparam.h>
30#include <linux/of_address.h>
31#include <linux/of_irq.h>
32#include <linux/of_platform.h>
33#include <linux/dma-mapping.h>
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
36#include <linux/usb/otg.h>
37#include <soc/fsl/qe/qe.h>
38#include <asm/cpm.h>
39#include <asm/dma.h>
40#include <asm/reg.h>
41#include "fsl_qe_udc.h"
42
43#define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
44#define DRIVER_AUTHOR   "Xie XiaoBo"
45#define DRIVER_VERSION  "1.0"
46
47#define DMA_ADDR_INVALID        (~(dma_addr_t)0)
48
49static const char driver_name[] = "fsl_qe_udc";
50static const char driver_desc[] = DRIVER_DESC;
51
52/*ep name is important in gadget, it should obey the convention of ep_match()*/
53static const char *const ep_name[] = {
54	"ep0-control", /* everyone has ep0 */
55	/* 3 configurable endpoints */
56	"ep1",
57	"ep2",
58	"ep3",
59};
60
61static const struct usb_endpoint_descriptor qe_ep0_desc = {
62	.bLength =		USB_DT_ENDPOINT_SIZE,
63	.bDescriptorType =	USB_DT_ENDPOINT,
64
65	.bEndpointAddress =	0,
66	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
67	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
68};
69
70/********************************************************************
71 *      Internal Used Function Start
72********************************************************************/
73/*-----------------------------------------------------------------
74 * done() - retire a request; caller blocked irqs
75 *--------------------------------------------------------------*/
76static void done(struct qe_ep *ep, struct qe_req *req, int status)
77{
78	struct qe_udc *udc = ep->udc;
79	unsigned char stopped = ep->stopped;
80
81	/* the req->queue pointer is used by ep_queue() func, in which
82	 * the request will be added into a udc_ep->queue 'd tail
83	 * so here the req will be dropped from the ep->queue
84	 */
85	list_del_init(&req->queue);
86
87	/* req.status should be set as -EINPROGRESS in ep_queue() */
88	if (req->req.status == -EINPROGRESS)
89		req->req.status = status;
90	else
91		status = req->req.status;
92
93	if (req->mapped) {
94		dma_unmap_single(udc->gadget.dev.parent,
95			req->req.dma, req->req.length,
96			ep_is_in(ep)
97				? DMA_TO_DEVICE
98				: DMA_FROM_DEVICE);
99		req->req.dma = DMA_ADDR_INVALID;
100		req->mapped = 0;
101	} else
102		dma_sync_single_for_cpu(udc->gadget.dev.parent,
103			req->req.dma, req->req.length,
104			ep_is_in(ep)
105				? DMA_TO_DEVICE
106				: DMA_FROM_DEVICE);
107
108	if (status && (status != -ESHUTDOWN))
109		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
110			ep->ep.name, &req->req, status,
111			req->req.actual, req->req.length);
112
113	/* don't modify queue heads during completion callback */
114	ep->stopped = 1;
115	spin_unlock(&udc->lock);
116
117	usb_gadget_giveback_request(&ep->ep, &req->req);
118
119	spin_lock(&udc->lock);
120
121	ep->stopped = stopped;
122}
123
124/*-----------------------------------------------------------------
125 * nuke(): delete all requests related to this ep
126 *--------------------------------------------------------------*/
127static void nuke(struct qe_ep *ep, int status)
128{
129	/* Whether this eq has request linked */
130	while (!list_empty(&ep->queue)) {
131		struct qe_req *req = NULL;
132		req = list_entry(ep->queue.next, struct qe_req, queue);
133
134		done(ep, req, status);
135	}
136}
137
138/*---------------------------------------------------------------------------*
139 * USB and Endpoint manipulate process, include parameter and register       *
140 *---------------------------------------------------------------------------*/
141/* @value: 1--set stall 0--clean stall */
142static int qe_eprx_stall_change(struct qe_ep *ep, int value)
143{
144	u16 tem_usep;
145	u8 epnum = ep->epnum;
146	struct qe_udc *udc = ep->udc;
147
148	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
149	tem_usep = tem_usep & ~USB_RHS_MASK;
150	if (value == 1)
151		tem_usep |= USB_RHS_STALL;
152	else if (ep->dir == USB_DIR_IN)
153		tem_usep |= USB_RHS_IGNORE_OUT;
154
155	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
156	return 0;
157}
158
159static int qe_eptx_stall_change(struct qe_ep *ep, int value)
160{
161	u16 tem_usep;
162	u8 epnum = ep->epnum;
163	struct qe_udc *udc = ep->udc;
164
165	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
166	tem_usep = tem_usep & ~USB_THS_MASK;
167	if (value == 1)
168		tem_usep |= USB_THS_STALL;
169	else if (ep->dir == USB_DIR_OUT)
170		tem_usep |= USB_THS_IGNORE_IN;
171
172	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
173
174	return 0;
175}
176
177static int qe_ep0_stall(struct qe_udc *udc)
178{
179	qe_eptx_stall_change(&udc->eps[0], 1);
180	qe_eprx_stall_change(&udc->eps[0], 1);
181	udc->ep0_state = WAIT_FOR_SETUP;
182	udc->ep0_dir = 0;
183	return 0;
184}
185
186static int qe_eprx_nack(struct qe_ep *ep)
187{
188	u8 epnum = ep->epnum;
189	struct qe_udc *udc = ep->udc;
190
191	if (ep->state == EP_STATE_IDLE) {
192		/* Set the ep's nack */
193		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
194				USB_RHS_MASK, USB_RHS_NACK);
195
196		/* Mask Rx and Busy interrupts */
197		clrbits16(&udc->usb_regs->usb_usbmr,
198				(USB_E_RXB_MASK | USB_E_BSY_MASK));
199
200		ep->state = EP_STATE_NACK;
201	}
202	return 0;
203}
204
205static int qe_eprx_normal(struct qe_ep *ep)
206{
207	struct qe_udc *udc = ep->udc;
208
209	if (ep->state == EP_STATE_NACK) {
210		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
211				USB_RTHS_MASK, USB_THS_IGNORE_IN);
212
213		/* Unmask RX interrupts */
214		out_be16(&udc->usb_regs->usb_usber,
215				USB_E_BSY_MASK | USB_E_RXB_MASK);
216		setbits16(&udc->usb_regs->usb_usbmr,
217				(USB_E_RXB_MASK | USB_E_BSY_MASK));
218
219		ep->state = EP_STATE_IDLE;
220		ep->has_data = 0;
221	}
222
223	return 0;
224}
225
226static int qe_ep_cmd_stoptx(struct qe_ep *ep)
227{
228	if (ep->udc->soc_type == PORT_CPM)
229		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
230				CPM_USB_STOP_TX_OPCODE);
231	else
232		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
233				ep->epnum, 0);
234
235	return 0;
236}
237
238static int qe_ep_cmd_restarttx(struct qe_ep *ep)
239{
240	if (ep->udc->soc_type == PORT_CPM)
241		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
242				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
243	else
244		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
245				ep->epnum, 0);
246
247	return 0;
248}
249
250static int qe_ep_flushtxfifo(struct qe_ep *ep)
251{
252	struct qe_udc *udc = ep->udc;
253	int i;
254
255	i = (int)ep->epnum;
256
257	qe_ep_cmd_stoptx(ep);
258	out_8(&udc->usb_regs->usb_uscom,
259		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
260	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
261	out_be32(&udc->ep_param[i]->tstate, 0);
262	out_be16(&udc->ep_param[i]->tbcnt, 0);
263
264	ep->c_txbd = ep->txbase;
265	ep->n_txbd = ep->txbase;
266	qe_ep_cmd_restarttx(ep);
267	return 0;
268}
269
270static int qe_ep_filltxfifo(struct qe_ep *ep)
271{
272	struct qe_udc *udc = ep->udc;
273
274	out_8(&udc->usb_regs->usb_uscom,
275			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
276	return 0;
277}
278
279static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
280{
281	struct qe_ep *ep;
282	u32 bdring_len;
283	struct qe_bd __iomem *bd;
284	int i;
285
286	ep = &udc->eps[pipe_num];
287
288	if (ep->dir == USB_DIR_OUT)
289		bdring_len = USB_BDRING_LEN_RX;
290	else
291		bdring_len = USB_BDRING_LEN;
292
293	bd = ep->rxbase;
294	for (i = 0; i < (bdring_len - 1); i++) {
295		out_be32((u32 __iomem *)bd, R_E | R_I);
296		bd++;
297	}
298	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
299
300	bd = ep->txbase;
301	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
302		out_be32(&bd->buf, 0);
303		out_be32((u32 __iomem *)bd, 0);
304		bd++;
305	}
306	out_be32((u32 __iomem *)bd, T_W);
307
308	return 0;
309}
310
311static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
312{
313	struct qe_ep *ep;
314	u16 tmpusep;
315
316	ep = &udc->eps[pipe_num];
317	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
318	tmpusep &= ~USB_RTHS_MASK;
319
320	switch (ep->dir) {
321	case USB_DIR_BOTH:
322		qe_ep_flushtxfifo(ep);
323		break;
324	case USB_DIR_OUT:
325		tmpusep |= USB_THS_IGNORE_IN;
326		break;
327	case USB_DIR_IN:
328		qe_ep_flushtxfifo(ep);
329		tmpusep |= USB_RHS_IGNORE_OUT;
330		break;
331	default:
332		break;
333	}
334	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
335
336	qe_epbds_reset(udc, pipe_num);
337
338	return 0;
339}
340
341static int qe_ep_toggledata01(struct qe_ep *ep)
342{
343	ep->data01 ^= 0x1;
344	return 0;
345}
346
347static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
348{
349	struct qe_ep *ep = &udc->eps[pipe_num];
350	unsigned long tmp_addr = 0;
351	struct usb_ep_para __iomem *epparam;
352	int i;
353	struct qe_bd __iomem *bd;
354	int bdring_len;
355
356	if (ep->dir == USB_DIR_OUT)
357		bdring_len = USB_BDRING_LEN_RX;
358	else
359		bdring_len = USB_BDRING_LEN;
360
361	epparam = udc->ep_param[pipe_num];
362	/* alloc multi-ram for BD rings and set the ep parameters */
363	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
364				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
365	if (IS_ERR_VALUE(tmp_addr))
366		return -ENOMEM;
367
368	out_be16(&epparam->rbase, (u16)tmp_addr);
369	out_be16(&epparam->tbase, (u16)(tmp_addr +
370				(sizeof(struct qe_bd) * bdring_len)));
371
372	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
373	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
374
375	ep->rxbase = cpm_muram_addr(tmp_addr);
376	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
377				* bdring_len));
378	ep->n_rxbd = ep->rxbase;
379	ep->e_rxbd = ep->rxbase;
380	ep->n_txbd = ep->txbase;
381	ep->c_txbd = ep->txbase;
382	ep->data01 = 0; /* data0 */
383
384	/* Init TX and RX bds */
385	bd = ep->rxbase;
386	for (i = 0; i < bdring_len - 1; i++) {
387		out_be32(&bd->buf, 0);
388		out_be32((u32 __iomem *)bd, 0);
389		bd++;
390	}
391	out_be32(&bd->buf, 0);
392	out_be32((u32 __iomem *)bd, R_W);
393
394	bd = ep->txbase;
395	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
396		out_be32(&bd->buf, 0);
397		out_be32((u32 __iomem *)bd, 0);
398		bd++;
399	}
400	out_be32(&bd->buf, 0);
401	out_be32((u32 __iomem *)bd, T_W);
402
403	return 0;
404}
405
406static int qe_ep_rxbd_update(struct qe_ep *ep)
407{
408	unsigned int size;
409	int i;
410	unsigned int tmp;
411	struct qe_bd __iomem *bd;
412	unsigned int bdring_len;
413
414	if (ep->rxbase == NULL)
415		return -EINVAL;
416
417	bd = ep->rxbase;
418
419	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
420	if (!ep->rxframe)
421		return -ENOMEM;
422
423	qe_frame_init(ep->rxframe);
424
425	if (ep->dir == USB_DIR_OUT)
426		bdring_len = USB_BDRING_LEN_RX;
427	else
428		bdring_len = USB_BDRING_LEN;
429
430	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
431	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
432	if (!ep->rxbuffer) {
433		kfree(ep->rxframe);
434		return -ENOMEM;
435	}
436
437	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
438	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
439		ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
440					ep->rxbuffer,
441					size,
442					DMA_FROM_DEVICE);
443		ep->rxbufmap = 1;
444	} else {
445		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
446					ep->rxbuf_d, size,
447					DMA_FROM_DEVICE);
448		ep->rxbufmap = 0;
449	}
450
451	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
452	tmp = ep->rxbuf_d;
453	tmp = (u32)(((tmp >> 2) << 2) + 4);
454
455	for (i = 0; i < bdring_len - 1; i++) {
456		out_be32(&bd->buf, tmp);
457		out_be32((u32 __iomem *)bd, (R_E | R_I));
458		tmp = tmp + size;
459		bd++;
460	}
461	out_be32(&bd->buf, tmp);
462	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
463
464	return 0;
465}
466
467static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
468{
469	struct qe_ep *ep = &udc->eps[pipe_num];
470	struct usb_ep_para __iomem *epparam;
471	u16 usep, logepnum;
472	u16 tmp;
473	u8 rtfcr = 0;
474
475	epparam = udc->ep_param[pipe_num];
476
477	usep = 0;
478	logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
479	usep |= (logepnum << USB_EPNUM_SHIFT);
480
481	switch (ep->ep.desc->bmAttributes & 0x03) {
482	case USB_ENDPOINT_XFER_BULK:
483		usep |= USB_TRANS_BULK;
484		break;
485	case USB_ENDPOINT_XFER_ISOC:
486		usep |=  USB_TRANS_ISO;
487		break;
488	case USB_ENDPOINT_XFER_INT:
489		usep |= USB_TRANS_INT;
490		break;
491	default:
492		usep |= USB_TRANS_CTR;
493		break;
494	}
495
496	switch (ep->dir) {
497	case USB_DIR_OUT:
498		usep |= USB_THS_IGNORE_IN;
499		break;
500	case USB_DIR_IN:
501		usep |= USB_RHS_IGNORE_OUT;
502		break;
503	default:
504		break;
505	}
506	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
507
508	rtfcr = 0x30;
509	out_8(&epparam->rbmr, rtfcr);
510	out_8(&epparam->tbmr, rtfcr);
511
512	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
513	/* MRBLR must be divisble by 4 */
514	tmp = (u16)(((tmp >> 2) << 2) + 4);
515	out_be16(&epparam->mrblr, tmp);
516
517	return 0;
518}
519
520static int qe_ep_init(struct qe_udc *udc,
521		      unsigned char pipe_num,
522		      const struct usb_endpoint_descriptor *desc)
523{
524	struct qe_ep *ep = &udc->eps[pipe_num];
525	unsigned long flags;
526	int reval = 0;
527	u16 max = 0;
528
529	max = usb_endpoint_maxp(desc);
530
531	/* check the max package size validate for this endpoint */
532	/* Refer to USB2.0 spec table 9-13,
533	*/
534	if (pipe_num != 0) {
535		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
536		case USB_ENDPOINT_XFER_BULK:
537			if (strstr(ep->ep.name, "-iso")
538					|| strstr(ep->ep.name, "-int"))
539				goto en_done;
540			switch (udc->gadget.speed) {
541			case USB_SPEED_HIGH:
542			if ((max == 128) || (max == 256) || (max == 512))
543				break;
544			default:
545				switch (max) {
546				case 4:
547				case 8:
548				case 16:
549				case 32:
550				case 64:
551					break;
552				default:
553				case USB_SPEED_LOW:
554					goto en_done;
555				}
556			}
557			break;
558		case USB_ENDPOINT_XFER_INT:
559			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
560				goto en_done;
561			switch (udc->gadget.speed) {
562			case USB_SPEED_HIGH:
563				if (max <= 1024)
564					break;
565			case USB_SPEED_FULL:
566				if (max <= 64)
567					break;
568			default:
569				if (max <= 8)
570					break;
571				goto en_done;
572			}
573			break;
574		case USB_ENDPOINT_XFER_ISOC:
575			if (strstr(ep->ep.name, "-bulk")
576				|| strstr(ep->ep.name, "-int"))
577				goto en_done;
578			switch (udc->gadget.speed) {
579			case USB_SPEED_HIGH:
580				if (max <= 1024)
581					break;
582			case USB_SPEED_FULL:
583				if (max <= 1023)
584					break;
585			default:
586				goto en_done;
587			}
588			break;
589		case USB_ENDPOINT_XFER_CONTROL:
590			if (strstr(ep->ep.name, "-iso")
591				|| strstr(ep->ep.name, "-int"))
592				goto en_done;
593			switch (udc->gadget.speed) {
594			case USB_SPEED_HIGH:
595			case USB_SPEED_FULL:
596				switch (max) {
597				case 1:
598				case 2:
599				case 4:
600				case 8:
601				case 16:
602				case 32:
603				case 64:
604					break;
605				default:
606					goto en_done;
607				}
608			case USB_SPEED_LOW:
609				switch (max) {
610				case 1:
611				case 2:
612				case 4:
613				case 8:
614					break;
615				default:
616					goto en_done;
617				}
618			default:
619				goto en_done;
620			}
621			break;
622
623		default:
624			goto en_done;
625		}
626	} /* if ep0*/
627
628	spin_lock_irqsave(&udc->lock, flags);
629
630	/* initialize ep structure */
631	ep->ep.maxpacket = max;
632	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
633	ep->ep.desc = desc;
634	ep->stopped = 0;
635	ep->init = 1;
636
637	if (pipe_num == 0) {
638		ep->dir = USB_DIR_BOTH;
639		udc->ep0_dir = USB_DIR_OUT;
640		udc->ep0_state = WAIT_FOR_SETUP;
641	} else	{
642		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
643		case USB_DIR_OUT:
644			ep->dir = USB_DIR_OUT;
645			break;
646		case USB_DIR_IN:
647			ep->dir = USB_DIR_IN;
648		default:
649			break;
650		}
651	}
652
653	/* hardware special operation */
654	qe_ep_bd_init(udc, pipe_num);
655	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
656		reval = qe_ep_rxbd_update(ep);
657		if (reval)
658			goto en_done1;
659	}
660
661	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
662		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
663		if (!ep->txframe)
664			goto en_done2;
665		qe_frame_init(ep->txframe);
666	}
667
668	qe_ep_register_init(udc, pipe_num);
669
670	/* Now HW will be NAKing transfers to that EP,
671	 * until a buffer is queued to it. */
672	spin_unlock_irqrestore(&udc->lock, flags);
673
674	return 0;
675en_done2:
676	kfree(ep->rxbuffer);
677	kfree(ep->rxframe);
678en_done1:
679	spin_unlock_irqrestore(&udc->lock, flags);
680en_done:
681	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
682	return -ENODEV;
683}
684
685static inline void qe_usb_enable(struct qe_udc *udc)
686{
687	setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
688}
689
690static inline void qe_usb_disable(struct qe_udc *udc)
691{
692	clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
693}
694
695/*----------------------------------------------------------------------------*
696 *		USB and EP basic manipulate function end		      *
697 *----------------------------------------------------------------------------*/
698
699
700/******************************************************************************
701		UDC transmit and receive process
702 ******************************************************************************/
703static void recycle_one_rxbd(struct qe_ep *ep)
704{
705	u32 bdstatus;
706
707	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
708	bdstatus = R_I | R_E | (bdstatus & R_W);
709	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
710
711	if (bdstatus & R_W)
712		ep->e_rxbd = ep->rxbase;
713	else
714		ep->e_rxbd++;
715}
716
717static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
718{
719	u32 bdstatus;
720	struct qe_bd __iomem *bd, *nextbd;
721	unsigned char stop = 0;
722
723	nextbd = ep->n_rxbd;
724	bd = ep->e_rxbd;
725	bdstatus = in_be32((u32 __iomem *)bd);
726
727	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
728		bdstatus = R_E | R_I | (bdstatus & R_W);
729		out_be32((u32 __iomem *)bd, bdstatus);
730
731		if (bdstatus & R_W)
732			bd = ep->rxbase;
733		else
734			bd++;
735
736		bdstatus = in_be32((u32 __iomem *)bd);
737		if (stopatnext && (bd == nextbd))
738			stop = 1;
739	}
740
741	ep->e_rxbd = bd;
742}
743
744static void ep_recycle_rxbds(struct qe_ep *ep)
745{
746	struct qe_bd __iomem *bd = ep->n_rxbd;
747	u32 bdstatus;
748	u8 epnum = ep->epnum;
749	struct qe_udc *udc = ep->udc;
750
751	bdstatus = in_be32((u32 __iomem *)bd);
752	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
753		bd = ep->rxbase +
754				((in_be16(&udc->ep_param[epnum]->rbptr) -
755				  in_be16(&udc->ep_param[epnum]->rbase))
756				 >> 3);
757		bdstatus = in_be32((u32 __iomem *)bd);
758
759		if (bdstatus & R_W)
760			bd = ep->rxbase;
761		else
762			bd++;
763
764		ep->e_rxbd = bd;
765		recycle_rxbds(ep, 0);
766		ep->e_rxbd = ep->n_rxbd;
767	} else
768		recycle_rxbds(ep, 1);
769
770	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
771		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
772
773	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
774		qe_eprx_normal(ep);
775
776	ep->localnack = 0;
777}
778
779static void setup_received_handle(struct qe_udc *udc,
780					struct usb_ctrlrequest *setup);
781static int qe_ep_rxframe_handle(struct qe_ep *ep);
782static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
783/* when BD PID is setup, handle the packet */
784static int ep0_setup_handle(struct qe_udc *udc)
785{
786	struct qe_ep *ep = &udc->eps[0];
787	struct qe_frame *pframe;
788	unsigned int fsize;
789	u8 *cp;
790
791	pframe = ep->rxframe;
792	if ((frame_get_info(pframe) & PID_SETUP)
793			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
794		fsize = frame_get_length(pframe);
795		if (unlikely(fsize != 8))
796			return -EINVAL;
797		cp = (u8 *)&udc->local_setup_buff;
798		memcpy(cp, pframe->data, fsize);
799		ep->data01 = 1;
800
801		/* handle the usb command base on the usb_ctrlrequest */
802		setup_received_handle(udc, &udc->local_setup_buff);
803		return 0;
804	}
805	return -EINVAL;
806}
807
808static int qe_ep0_rx(struct qe_udc *udc)
809{
810	struct qe_ep *ep = &udc->eps[0];
811	struct qe_frame *pframe;
812	struct qe_bd __iomem *bd;
813	u32 bdstatus, length;
814	u32 vaddr;
815
816	pframe = ep->rxframe;
817
818	if (ep->dir == USB_DIR_IN) {
819		dev_err(udc->dev, "ep0 not a control endpoint\n");
820		return -EINVAL;
821	}
822
823	bd = ep->n_rxbd;
824	bdstatus = in_be32((u32 __iomem *)bd);
825	length = bdstatus & BD_LENGTH_MASK;
826
827	while (!(bdstatus & R_E) && length) {
828		if ((bdstatus & R_F) && (bdstatus & R_L)
829			&& !(bdstatus & R_ERROR)) {
830			if (length == USB_CRC_SIZE) {
831				udc->ep0_state = WAIT_FOR_SETUP;
832				dev_vdbg(udc->dev,
833					"receive a ZLP in status phase\n");
834			} else {
835				qe_frame_clean(pframe);
836				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
837				frame_set_data(pframe, (u8 *)vaddr);
838				frame_set_length(pframe,
839						(length - USB_CRC_SIZE));
840				frame_set_status(pframe, FRAME_OK);
841				switch (bdstatus & R_PID) {
842				case R_PID_SETUP:
843					frame_set_info(pframe, PID_SETUP);
844					break;
845				case R_PID_DATA1:
846					frame_set_info(pframe, PID_DATA1);
847					break;
848				default:
849					frame_set_info(pframe, PID_DATA0);
850					break;
851				}
852
853				if ((bdstatus & R_PID) == R_PID_SETUP)
854					ep0_setup_handle(udc);
855				else
856					qe_ep_rxframe_handle(ep);
857			}
858		} else {
859			dev_err(udc->dev, "The receive frame with error!\n");
860		}
861
862		/* note: don't clear the rxbd's buffer address */
863		recycle_one_rxbd(ep);
864
865		/* Get next BD */
866		if (bdstatus & R_W)
867			bd = ep->rxbase;
868		else
869			bd++;
870
871		bdstatus = in_be32((u32 __iomem *)bd);
872		length = bdstatus & BD_LENGTH_MASK;
873
874	}
875
876	ep->n_rxbd = bd;
877
878	return 0;
879}
880
881static int qe_ep_rxframe_handle(struct qe_ep *ep)
882{
883	struct qe_frame *pframe;
884	u8 framepid = 0;
885	unsigned int fsize;
886	u8 *cp;
887	struct qe_req *req;
888
889	pframe = ep->rxframe;
890
891	if (frame_get_info(pframe) & PID_DATA1)
892		framepid = 0x1;
893
894	if (framepid != ep->data01) {
895		dev_err(ep->udc->dev, "the data01 error!\n");
896		return -EIO;
897	}
898
899	fsize = frame_get_length(pframe);
900	if (list_empty(&ep->queue)) {
901		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
902	} else {
903		req = list_entry(ep->queue.next, struct qe_req, queue);
904
905		cp = (u8 *)(req->req.buf) + req->req.actual;
906		if (cp) {
907			memcpy(cp, pframe->data, fsize);
908			req->req.actual += fsize;
909			if ((fsize < ep->ep.maxpacket) ||
910					(req->req.actual >= req->req.length)) {
911				if (ep->epnum == 0)
912					ep0_req_complete(ep->udc, req);
913				else
914					done(ep, req, 0);
915				if (list_empty(&ep->queue) && ep->epnum != 0)
916					qe_eprx_nack(ep);
917			}
918		}
919	}
920
921	qe_ep_toggledata01(ep);
922
923	return 0;
924}
925
926static void ep_rx_tasklet(struct tasklet_struct *t)
927{
928	struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
929	struct qe_ep *ep;
930	struct qe_frame *pframe;
931	struct qe_bd __iomem *bd;
932	unsigned long flags;
933	u32 bdstatus, length;
934	u32 vaddr, i;
935
936	spin_lock_irqsave(&udc->lock, flags);
937
938	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
939		ep = &udc->eps[i];
940
941		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
942			dev_dbg(udc->dev,
943				"This is a transmit ep or disable tasklet!\n");
944			continue;
945		}
946
947		pframe = ep->rxframe;
948		bd = ep->n_rxbd;
949		bdstatus = in_be32((u32 __iomem *)bd);
950		length = bdstatus & BD_LENGTH_MASK;
951
952		while (!(bdstatus & R_E) && length) {
953			if (list_empty(&ep->queue)) {
954				qe_eprx_nack(ep);
955				dev_dbg(udc->dev,
956					"The rxep have noreq %d\n",
957					ep->has_data);
958				break;
959			}
960
961			if ((bdstatus & R_F) && (bdstatus & R_L)
962				&& !(bdstatus & R_ERROR)) {
963				qe_frame_clean(pframe);
964				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
965				frame_set_data(pframe, (u8 *)vaddr);
966				frame_set_length(pframe,
967						(length - USB_CRC_SIZE));
968				frame_set_status(pframe, FRAME_OK);
969				switch (bdstatus & R_PID) {
970				case R_PID_DATA1:
971					frame_set_info(pframe, PID_DATA1);
972					break;
973				case R_PID_SETUP:
974					frame_set_info(pframe, PID_SETUP);
975					break;
976				default:
977					frame_set_info(pframe, PID_DATA0);
978					break;
979				}
980				/* handle the rx frame */
981				qe_ep_rxframe_handle(ep);
982			} else {
983				dev_err(udc->dev,
984					"error in received frame\n");
985			}
986			/* note: don't clear the rxbd's buffer address */
987			/*clear the length */
988			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
989			ep->has_data--;
990			if (!(ep->localnack))
991				recycle_one_rxbd(ep);
992
993			/* Get next BD */
994			if (bdstatus & R_W)
995				bd = ep->rxbase;
996			else
997				bd++;
998
999			bdstatus = in_be32((u32 __iomem *)bd);
1000			length = bdstatus & BD_LENGTH_MASK;
1001		}
1002
1003		ep->n_rxbd = bd;
1004
1005		if (ep->localnack)
1006			ep_recycle_rxbds(ep);
1007
1008		ep->enable_tasklet = 0;
1009	} /* for i=1 */
1010
1011	spin_unlock_irqrestore(&udc->lock, flags);
1012}
1013
1014static int qe_ep_rx(struct qe_ep *ep)
1015{
1016	struct qe_udc *udc;
1017	struct qe_frame *pframe;
1018	struct qe_bd __iomem *bd;
1019	u16 swoffs, ucoffs, emptybds;
1020
1021	udc = ep->udc;
1022	pframe = ep->rxframe;
1023
1024	if (ep->dir == USB_DIR_IN) {
1025		dev_err(udc->dev, "transmit ep in rx function\n");
1026		return -EINVAL;
1027	}
1028
1029	bd = ep->n_rxbd;
1030
1031	swoffs = (u16)(bd - ep->rxbase);
1032	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1033			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1034	if (swoffs < ucoffs)
1035		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1036	else
1037		emptybds = swoffs - ucoffs;
1038
1039	if (emptybds < MIN_EMPTY_BDS) {
1040		qe_eprx_nack(ep);
1041		ep->localnack = 1;
1042		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1043	}
1044	ep->has_data = USB_BDRING_LEN_RX - emptybds;
1045
1046	if (list_empty(&ep->queue)) {
1047		qe_eprx_nack(ep);
1048		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1049				ep->has_data);
1050		return 0;
1051	}
1052
1053	tasklet_schedule(&udc->rx_tasklet);
1054	ep->enable_tasklet = 1;
1055
1056	return 0;
1057}
1058
1059/* send data from a frame, no matter what tx_req */
1060static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1061{
1062	struct qe_udc *udc = ep->udc;
1063	struct qe_bd __iomem *bd;
1064	u16 saveusbmr;
1065	u32 bdstatus, pidmask;
1066	u32 paddr;
1067
1068	if (ep->dir == USB_DIR_OUT) {
1069		dev_err(udc->dev, "receive ep passed to tx function\n");
1070		return -EINVAL;
1071	}
1072
1073	/* Disable the Tx interrupt */
1074	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1075	out_be16(&udc->usb_regs->usb_usbmr,
1076			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1077
1078	bd = ep->n_txbd;
1079	bdstatus = in_be32((u32 __iomem *)bd);
1080
1081	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1082		if (frame_get_length(frame) == 0) {
1083			frame_set_data(frame, udc->nullbuf);
1084			frame_set_length(frame, 2);
1085			frame->info |= (ZLP | NO_CRC);
1086			dev_vdbg(udc->dev, "the frame size = 0\n");
1087		}
1088		paddr = virt_to_phys((void *)frame->data);
1089		out_be32(&bd->buf, paddr);
1090		bdstatus = (bdstatus&T_W);
1091		if (!(frame_get_info(frame) & NO_CRC))
1092			bdstatus |= T_R | T_I | T_L | T_TC
1093					| frame_get_length(frame);
1094		else
1095			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1096
1097		/* if the packet is a ZLP in status phase */
1098		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1099			ep->data01 = 0x1;
1100
1101		if (ep->data01) {
1102			pidmask = T_PID_DATA1;
1103			frame->info |= PID_DATA1;
1104		} else {
1105			pidmask = T_PID_DATA0;
1106			frame->info |= PID_DATA0;
1107		}
1108		bdstatus |= T_CNF;
1109		bdstatus |= pidmask;
1110		out_be32((u32 __iomem *)bd, bdstatus);
1111		qe_ep_filltxfifo(ep);
1112
1113		/* enable the TX interrupt */
1114		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1115
1116		qe_ep_toggledata01(ep);
1117		if (bdstatus & T_W)
1118			ep->n_txbd = ep->txbase;
1119		else
1120			ep->n_txbd++;
1121
1122		return 0;
1123	} else {
1124		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1125		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1126		return -EBUSY;
1127	}
1128}
1129
1130/* when a bd was transmitted, the function can
1131 * handle the tx_req, not include ep0           */
1132static int txcomplete(struct qe_ep *ep, unsigned char restart)
1133{
1134	if (ep->tx_req != NULL) {
1135		struct qe_req *req = ep->tx_req;
1136		unsigned zlp = 0, last_len = 0;
1137
1138		last_len = min_t(unsigned, req->req.length - ep->sent,
1139				ep->ep.maxpacket);
1140
1141		if (!restart) {
1142			int asent = ep->last;
1143			ep->sent += asent;
1144			ep->last -= asent;
1145		} else {
1146			ep->last = 0;
1147		}
1148
1149		/* zlp needed when req->re.zero is set */
1150		if (req->req.zero) {
1151			if (last_len == 0 ||
1152				(req->req.length % ep->ep.maxpacket) != 0)
1153				zlp = 0;
1154			else
1155				zlp = 1;
1156		} else
1157			zlp = 0;
1158
1159		/* a request already were transmitted completely */
1160		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1161			done(ep, ep->tx_req, 0);
1162			ep->tx_req = NULL;
1163			ep->last = 0;
1164			ep->sent = 0;
1165		}
1166	}
1167
1168	/* we should gain a new tx_req fot this endpoint */
1169	if (ep->tx_req == NULL) {
1170		if (!list_empty(&ep->queue)) {
1171			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
1172							queue);
1173			ep->last = 0;
1174			ep->sent = 0;
1175		}
1176	}
1177
1178	return 0;
1179}
1180
1181/* give a frame and a tx_req, send some data */
1182static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1183{
1184	unsigned int size;
1185	u8 *buf;
1186
1187	qe_frame_clean(frame);
1188	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1189				ep->ep.maxpacket);
1190	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1191	if (buf && size) {
1192		ep->last = size;
1193		ep->tx_req->req.actual += size;
1194		frame_set_data(frame, buf);
1195		frame_set_length(frame, size);
1196		frame_set_status(frame, FRAME_OK);
1197		frame_set_info(frame, 0);
1198		return qe_ep_tx(ep, frame);
1199	}
1200	return -EIO;
1201}
1202
1203/* give a frame struct,send a ZLP */
1204static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1205{
1206	struct qe_udc *udc = ep->udc;
1207
1208	if (frame == NULL)
1209		return -ENODEV;
1210
1211	qe_frame_clean(frame);
1212	frame_set_data(frame, (u8 *)udc->nullbuf);
1213	frame_set_length(frame, 2);
1214	frame_set_status(frame, FRAME_OK);
1215	frame_set_info(frame, (ZLP | NO_CRC | infor));
1216
1217	return qe_ep_tx(ep, frame);
1218}
1219
1220static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1221{
1222	struct qe_req *req = ep->tx_req;
1223	int reval;
1224
1225	if (req == NULL)
1226		return -ENODEV;
1227
1228	if ((req->req.length - ep->sent) > 0)
1229		reval = qe_usb_senddata(ep, frame);
1230	else
1231		reval = sendnulldata(ep, frame, 0);
1232
1233	return reval;
1234}
1235
1236/* if direction is DIR_IN, the status is Device->Host
1237 * if direction is DIR_OUT, the status transaction is Device<-Host
1238 * in status phase, udc create a request and gain status */
1239static int ep0_prime_status(struct qe_udc *udc, int direction)
1240{
1241
1242	struct qe_ep *ep = &udc->eps[0];
1243
1244	if (direction == USB_DIR_IN) {
1245		udc->ep0_state = DATA_STATE_NEED_ZLP;
1246		udc->ep0_dir = USB_DIR_IN;
1247		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1248	} else {
1249		udc->ep0_dir = USB_DIR_OUT;
1250		udc->ep0_state = WAIT_FOR_OUT_STATUS;
1251	}
1252
1253	return 0;
1254}
1255
1256/* a request complete in ep0, whether gadget request or udc request */
1257static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1258{
1259	struct qe_ep *ep = &udc->eps[0];
1260	/* because usb and ep's status already been set in ch9setaddress() */
1261
1262	switch (udc->ep0_state) {
1263	case DATA_STATE_XMIT:
1264		done(ep, req, 0);
1265		/* receive status phase */
1266		if (ep0_prime_status(udc, USB_DIR_OUT))
1267			qe_ep0_stall(udc);
1268		break;
1269
1270	case DATA_STATE_NEED_ZLP:
1271		done(ep, req, 0);
1272		udc->ep0_state = WAIT_FOR_SETUP;
1273		break;
1274
1275	case DATA_STATE_RECV:
1276		done(ep, req, 0);
1277		/* send status phase */
1278		if (ep0_prime_status(udc, USB_DIR_IN))
1279			qe_ep0_stall(udc);
1280		break;
1281
1282	case WAIT_FOR_OUT_STATUS:
1283		done(ep, req, 0);
1284		udc->ep0_state = WAIT_FOR_SETUP;
1285		break;
1286
1287	case WAIT_FOR_SETUP:
1288		dev_vdbg(udc->dev, "Unexpected interrupt\n");
1289		break;
1290
1291	default:
1292		qe_ep0_stall(udc);
1293		break;
1294	}
1295}
1296
1297static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1298{
1299	struct qe_req *tx_req = NULL;
1300	struct qe_frame *frame = ep->txframe;
1301
1302	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1303		if (!restart)
1304			ep->udc->ep0_state = WAIT_FOR_SETUP;
1305		else
1306			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1307		return 0;
1308	}
1309
1310	tx_req = ep->tx_req;
1311	if (tx_req != NULL) {
1312		if (!restart) {
1313			int asent = ep->last;
1314			ep->sent += asent;
1315			ep->last -= asent;
1316		} else {
1317			ep->last = 0;
1318		}
1319
1320		/* a request already were transmitted completely */
1321		if ((ep->tx_req->req.length - ep->sent) <= 0) {
1322			ep->tx_req->req.actual = (unsigned int)ep->sent;
1323			ep0_req_complete(ep->udc, ep->tx_req);
1324			ep->tx_req = NULL;
1325			ep->last = 0;
1326			ep->sent = 0;
1327		}
1328	} else {
1329		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1330	}
1331
1332	return 0;
1333}
1334
1335static int ep0_txframe_handle(struct qe_ep *ep)
1336{
1337	/* if have error, transmit again */
1338	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1339		qe_ep_flushtxfifo(ep);
1340		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1341		if (frame_get_info(ep->txframe) & PID_DATA0)
1342			ep->data01 = 0;
1343		else
1344			ep->data01 = 1;
1345
1346		ep0_txcomplete(ep, 1);
1347	} else
1348		ep0_txcomplete(ep, 0);
1349
1350	frame_create_tx(ep, ep->txframe);
1351	return 0;
1352}
1353
1354static int qe_ep0_txconf(struct qe_ep *ep)
1355{
1356	struct qe_bd __iomem *bd;
1357	struct qe_frame *pframe;
1358	u32 bdstatus;
1359
1360	bd = ep->c_txbd;
1361	bdstatus = in_be32((u32 __iomem *)bd);
1362	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1363		pframe = ep->txframe;
1364
1365		/* clear and recycle the BD */
1366		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1367		out_be32(&bd->buf, 0);
1368		if (bdstatus & T_W)
1369			ep->c_txbd = ep->txbase;
1370		else
1371			ep->c_txbd++;
1372
1373		if (ep->c_txbd == ep->n_txbd) {
1374			if (bdstatus & DEVICE_T_ERROR) {
1375				frame_set_status(pframe, FRAME_ERROR);
1376				if (bdstatus & T_TO)
1377					pframe->status |= TX_ER_TIMEOUT;
1378				if (bdstatus & T_UN)
1379					pframe->status |= TX_ER_UNDERUN;
1380			}
1381			ep0_txframe_handle(ep);
1382		}
1383
1384		bd = ep->c_txbd;
1385		bdstatus = in_be32((u32 __iomem *)bd);
1386	}
1387
1388	return 0;
1389}
1390
1391static int ep_txframe_handle(struct qe_ep *ep)
1392{
1393	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1394		qe_ep_flushtxfifo(ep);
1395		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1396		if (frame_get_info(ep->txframe) & PID_DATA0)
1397			ep->data01 = 0;
1398		else
1399			ep->data01 = 1;
1400
1401		txcomplete(ep, 1);
1402	} else
1403		txcomplete(ep, 0);
1404
1405	frame_create_tx(ep, ep->txframe); /* send the data */
1406	return 0;
1407}
1408
1409/* confirm the already trainsmited bd */
1410static int qe_ep_txconf(struct qe_ep *ep)
1411{
1412	struct qe_bd __iomem *bd;
1413	struct qe_frame *pframe = NULL;
1414	u32 bdstatus;
1415	unsigned char breakonrxinterrupt = 0;
1416
1417	bd = ep->c_txbd;
1418	bdstatus = in_be32((u32 __iomem *)bd);
1419	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1420		pframe = ep->txframe;
1421		if (bdstatus & DEVICE_T_ERROR) {
1422			frame_set_status(pframe, FRAME_ERROR);
1423			if (bdstatus & T_TO)
1424				pframe->status |= TX_ER_TIMEOUT;
1425			if (bdstatus & T_UN)
1426				pframe->status |= TX_ER_UNDERUN;
1427		}
1428
1429		/* clear and recycle the BD */
1430		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1431		out_be32(&bd->buf, 0);
1432		if (bdstatus & T_W)
1433			ep->c_txbd = ep->txbase;
1434		else
1435			ep->c_txbd++;
1436
1437		/* handle the tx frame */
1438		ep_txframe_handle(ep);
1439		bd = ep->c_txbd;
1440		bdstatus = in_be32((u32 __iomem *)bd);
1441	}
1442	if (breakonrxinterrupt)
1443		return -EIO;
1444	else
1445		return 0;
1446}
1447
1448/* Add a request in queue, and try to transmit a packet */
1449static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1450{
1451	int reval = 0;
1452
1453	if (ep->tx_req == NULL) {
1454		ep->sent = 0;
1455		ep->last = 0;
1456		txcomplete(ep, 0); /* can gain a new tx_req */
1457		reval = frame_create_tx(ep, ep->txframe);
1458	}
1459	return reval;
1460}
1461
1462/* Maybe this is a good ideal */
1463static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1464{
1465	struct qe_udc *udc = ep->udc;
1466	struct qe_frame *pframe = NULL;
1467	struct qe_bd __iomem *bd;
1468	u32 bdstatus, length;
1469	u32 vaddr, fsize;
1470	u8 *cp;
1471	u8 finish_req = 0;
1472	u8 framepid;
1473
1474	if (list_empty(&ep->queue)) {
1475		dev_vdbg(udc->dev, "the req already finish!\n");
1476		return 0;
1477	}
1478	pframe = ep->rxframe;
1479
1480	bd = ep->n_rxbd;
1481	bdstatus = in_be32((u32 __iomem *)bd);
1482	length = bdstatus & BD_LENGTH_MASK;
1483
1484	while (!(bdstatus & R_E) && length) {
1485		if (finish_req)
1486			break;
1487		if ((bdstatus & R_F) && (bdstatus & R_L)
1488					&& !(bdstatus & R_ERROR)) {
1489			qe_frame_clean(pframe);
1490			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1491			frame_set_data(pframe, (u8 *)vaddr);
1492			frame_set_length(pframe, (length - USB_CRC_SIZE));
1493			frame_set_status(pframe, FRAME_OK);
1494			switch (bdstatus & R_PID) {
1495			case R_PID_DATA1:
1496				frame_set_info(pframe, PID_DATA1); break;
1497			default:
1498				frame_set_info(pframe, PID_DATA0); break;
1499			}
1500			/* handle the rx frame */
1501
1502			if (frame_get_info(pframe) & PID_DATA1)
1503				framepid = 0x1;
1504			else
1505				framepid = 0;
1506
1507			if (framepid != ep->data01) {
1508				dev_vdbg(udc->dev, "the data01 error!\n");
1509			} else {
1510				fsize = frame_get_length(pframe);
1511
1512				cp = (u8 *)(req->req.buf) + req->req.actual;
1513				if (cp) {
1514					memcpy(cp, pframe->data, fsize);
1515					req->req.actual += fsize;
1516					if ((fsize < ep->ep.maxpacket)
1517						|| (req->req.actual >=
1518							req->req.length)) {
1519						finish_req = 1;
1520						done(ep, req, 0);
1521						if (list_empty(&ep->queue))
1522							qe_eprx_nack(ep);
1523					}
1524				}
1525				qe_ep_toggledata01(ep);
1526			}
1527		} else {
1528			dev_err(udc->dev, "The receive frame with error!\n");
1529		}
1530
1531		/* note: don't clear the rxbd's buffer address *
1532		 * only Clear the length */
1533		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1534		ep->has_data--;
1535
1536		/* Get next BD */
1537		if (bdstatus & R_W)
1538			bd = ep->rxbase;
1539		else
1540			bd++;
1541
1542		bdstatus = in_be32((u32 __iomem *)bd);
1543		length = bdstatus & BD_LENGTH_MASK;
1544	}
1545
1546	ep->n_rxbd = bd;
1547	ep_recycle_rxbds(ep);
1548
1549	return 0;
1550}
1551
1552/* only add the request in queue */
1553static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1554{
1555	if (ep->state == EP_STATE_NACK) {
1556		if (ep->has_data <= 0) {
1557			/* Enable rx and unmask rx interrupt */
1558			qe_eprx_normal(ep);
1559		} else {
1560			/* Copy the exist BD data */
1561			ep_req_rx(ep, req);
1562		}
1563	}
1564
1565	return 0;
1566}
1567
1568/********************************************************************
1569	Internal Used Function End
1570********************************************************************/
1571
1572/*-----------------------------------------------------------------------
1573	Endpoint Management Functions For Gadget
1574 -----------------------------------------------------------------------*/
1575static int qe_ep_enable(struct usb_ep *_ep,
1576			 const struct usb_endpoint_descriptor *desc)
1577{
1578	struct qe_udc *udc;
1579	struct qe_ep *ep;
1580	int retval = 0;
1581	unsigned char epnum;
1582
1583	ep = container_of(_ep, struct qe_ep, ep);
1584
1585	/* catch various bogus parameters */
1586	if (!_ep || !desc || _ep->name == ep_name[0] ||
1587			(desc->bDescriptorType != USB_DT_ENDPOINT))
1588		return -EINVAL;
1589
1590	udc = ep->udc;
1591	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1592		return -ESHUTDOWN;
1593
1594	epnum = (u8)desc->bEndpointAddress & 0xF;
1595
1596	retval = qe_ep_init(udc, epnum, desc);
1597	if (retval != 0) {
1598		cpm_muram_free(cpm_muram_offset(ep->rxbase));
1599		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1600		return -EINVAL;
1601	}
1602	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1603	return 0;
1604}
1605
1606static int qe_ep_disable(struct usb_ep *_ep)
1607{
1608	struct qe_udc *udc;
1609	struct qe_ep *ep;
1610	unsigned long flags;
1611	unsigned int size;
1612
1613	ep = container_of(_ep, struct qe_ep, ep);
1614	udc = ep->udc;
1615
1616	if (!_ep || !ep->ep.desc) {
1617		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1618		return -EINVAL;
1619	}
1620
1621	spin_lock_irqsave(&udc->lock, flags);
1622	/* Nuke all pending requests (does flush) */
1623	nuke(ep, -ESHUTDOWN);
1624	ep->ep.desc = NULL;
1625	ep->stopped = 1;
1626	ep->tx_req = NULL;
1627	qe_ep_reset(udc, ep->epnum);
1628	spin_unlock_irqrestore(&udc->lock, flags);
1629
1630	cpm_muram_free(cpm_muram_offset(ep->rxbase));
1631
1632	if (ep->dir == USB_DIR_OUT)
1633		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1634				(USB_BDRING_LEN_RX + 1);
1635	else
1636		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1637				(USB_BDRING_LEN + 1);
1638
1639	if (ep->dir != USB_DIR_IN) {
1640		kfree(ep->rxframe);
1641		if (ep->rxbufmap) {
1642			dma_unmap_single(udc->gadget.dev.parent,
1643					ep->rxbuf_d, size,
1644					DMA_FROM_DEVICE);
1645			ep->rxbuf_d = DMA_ADDR_INVALID;
1646		} else {
1647			dma_sync_single_for_cpu(
1648					udc->gadget.dev.parent,
1649					ep->rxbuf_d, size,
1650					DMA_FROM_DEVICE);
1651		}
1652		kfree(ep->rxbuffer);
1653	}
1654
1655	if (ep->dir != USB_DIR_OUT)
1656		kfree(ep->txframe);
1657
1658	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1659	return 0;
1660}
1661
1662static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
1663{
1664	struct qe_req *req;
1665
1666	req = kzalloc(sizeof(*req), gfp_flags);
1667	if (!req)
1668		return NULL;
1669
1670	req->req.dma = DMA_ADDR_INVALID;
1671
1672	INIT_LIST_HEAD(&req->queue);
1673
1674	return &req->req;
1675}
1676
1677static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1678{
1679	struct qe_req *req;
1680
1681	req = container_of(_req, struct qe_req, req);
1682
1683	if (_req)
1684		kfree(req);
1685}
1686
1687static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1688{
1689	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1690	struct qe_req *req = container_of(_req, struct qe_req, req);
1691	struct qe_udc *udc;
1692	int reval;
1693
1694	udc = ep->udc;
1695	/* catch various bogus parameters */
1696	if (!_req || !req->req.complete || !req->req.buf
1697			|| !list_empty(&req->queue)) {
1698		dev_dbg(udc->dev, "bad params\n");
1699		return -EINVAL;
1700	}
1701	if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1702		dev_dbg(udc->dev, "bad ep\n");
1703		return -EINVAL;
1704	}
1705
1706	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1707		return -ESHUTDOWN;
1708
1709	req->ep = ep;
1710
1711	/* map virtual address to hardware */
1712	if (req->req.dma == DMA_ADDR_INVALID) {
1713		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1714					req->req.buf,
1715					req->req.length,
1716					ep_is_in(ep)
1717					? DMA_TO_DEVICE :
1718					DMA_FROM_DEVICE);
1719		req->mapped = 1;
1720	} else {
1721		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1722					req->req.dma, req->req.length,
1723					ep_is_in(ep)
1724					? DMA_TO_DEVICE :
1725					DMA_FROM_DEVICE);
1726		req->mapped = 0;
1727	}
1728
1729	req->req.status = -EINPROGRESS;
1730	req->req.actual = 0;
1731
1732	list_add_tail(&req->queue, &ep->queue);
1733	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1734			ep->name, req->req.length);
1735
1736	/* push the request to device */
1737	if (ep_is_in(ep))
1738		reval = ep_req_send(ep, req);
1739
1740	/* EP0 */
1741	if (ep_index(ep) == 0 && req->req.length > 0) {
1742		if (ep_is_in(ep))
1743			udc->ep0_state = DATA_STATE_XMIT;
1744		else
1745			udc->ep0_state = DATA_STATE_RECV;
1746	}
1747
1748	if (ep->dir == USB_DIR_OUT)
1749		reval = ep_req_receive(ep, req);
1750
1751	return 0;
1752}
1753
1754/* queues (submits) an I/O request to an endpoint */
1755static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1756		       gfp_t gfp_flags)
1757{
1758	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1759	struct qe_udc *udc = ep->udc;
1760	unsigned long flags;
1761	int ret;
1762
1763	spin_lock_irqsave(&udc->lock, flags);
1764	ret = __qe_ep_queue(_ep, _req);
1765	spin_unlock_irqrestore(&udc->lock, flags);
1766	return ret;
1767}
1768
1769/* dequeues (cancels, unlinks) an I/O request from an endpoint */
1770static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1771{
1772	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1773	struct qe_req *req;
1774	unsigned long flags;
1775
1776	if (!_ep || !_req)
1777		return -EINVAL;
1778
1779	spin_lock_irqsave(&ep->udc->lock, flags);
1780
1781	/* make sure it's actually queued on this endpoint */
1782	list_for_each_entry(req, &ep->queue, queue) {
1783		if (&req->req == _req)
1784			break;
1785	}
1786
1787	if (&req->req != _req) {
1788		spin_unlock_irqrestore(&ep->udc->lock, flags);
1789		return -EINVAL;
1790	}
1791
1792	done(ep, req, -ECONNRESET);
1793
1794	spin_unlock_irqrestore(&ep->udc->lock, flags);
1795	return 0;
1796}
1797
1798/*-----------------------------------------------------------------
1799 * modify the endpoint halt feature
1800 * @ep: the non-isochronous endpoint being stalled
1801 * @value: 1--set halt  0--clear halt
1802 * Returns zero, or a negative error code.
1803*----------------------------------------------------------------*/
1804static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1805{
1806	struct qe_ep *ep;
1807	unsigned long flags;
1808	int status = -EOPNOTSUPP;
1809	struct qe_udc *udc;
1810
1811	ep = container_of(_ep, struct qe_ep, ep);
1812	if (!_ep || !ep->ep.desc) {
1813		status = -EINVAL;
1814		goto out;
1815	}
1816
1817	udc = ep->udc;
1818	/* Attempt to halt IN ep will fail if any transfer requests
1819	 * are still queue */
1820	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1821		status = -EAGAIN;
1822		goto out;
1823	}
1824
1825	status = 0;
1826	spin_lock_irqsave(&ep->udc->lock, flags);
1827	qe_eptx_stall_change(ep, value);
1828	qe_eprx_stall_change(ep, value);
1829	spin_unlock_irqrestore(&ep->udc->lock, flags);
1830
1831	if (ep->epnum == 0) {
1832		udc->ep0_state = WAIT_FOR_SETUP;
1833		udc->ep0_dir = 0;
1834	}
1835
1836	/* set data toggle to DATA0 on clear halt */
1837	if (value == 0)
1838		ep->data01 = 0;
1839out:
1840	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1841			value ?  "set" : "clear", status);
1842
1843	return status;
1844}
1845
1846static const struct usb_ep_ops qe_ep_ops = {
1847	.enable = qe_ep_enable,
1848	.disable = qe_ep_disable,
1849
1850	.alloc_request = qe_alloc_request,
1851	.free_request = qe_free_request,
1852
1853	.queue = qe_ep_queue,
1854	.dequeue = qe_ep_dequeue,
1855
1856	.set_halt = qe_ep_set_halt,
1857};
1858
1859/*------------------------------------------------------------------------
1860	Gadget Driver Layer Operations
1861 ------------------------------------------------------------------------*/
1862
1863/* Get the current frame number */
1864static int qe_get_frame(struct usb_gadget *gadget)
1865{
1866	struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1867	u16 tmp;
1868
1869	tmp = in_be16(&udc->usb_param->frame_n);
1870	if (tmp & 0x8000)
1871		return tmp & 0x07ff;
1872	return -EINVAL;
1873}
1874
1875static int fsl_qe_start(struct usb_gadget *gadget,
1876		struct usb_gadget_driver *driver);
1877static int fsl_qe_stop(struct usb_gadget *gadget);
1878
1879/* defined in usb_gadget.h */
1880static const struct usb_gadget_ops qe_gadget_ops = {
1881	.get_frame = qe_get_frame,
1882	.udc_start = fsl_qe_start,
1883	.udc_stop = fsl_qe_stop,
1884};
1885
1886/*-------------------------------------------------------------------------
1887	USB ep0 Setup process in BUS Enumeration
1888 -------------------------------------------------------------------------*/
1889static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1890{
1891	struct qe_ep *ep = &udc->eps[pipe];
1892
1893	nuke(ep, -ECONNRESET);
1894	ep->tx_req = NULL;
1895	return 0;
1896}
1897
1898static int reset_queues(struct qe_udc *udc)
1899{
1900	u8 pipe;
1901
1902	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1903		udc_reset_ep_queue(udc, pipe);
1904
1905	/* report disconnect; the driver is already quiesced */
1906	spin_unlock(&udc->lock);
1907	usb_gadget_udc_reset(&udc->gadget, udc->driver);
1908	spin_lock(&udc->lock);
1909
1910	return 0;
1911}
1912
1913static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1914			u16 length)
1915{
1916	/* Save the new address to device struct */
1917	udc->device_address = (u8) value;
1918	/* Update usb state */
1919	udc->usb_state = USB_STATE_ADDRESS;
1920
1921	/* Status phase , send a ZLP */
1922	if (ep0_prime_status(udc, USB_DIR_IN))
1923		qe_ep0_stall(udc);
1924}
1925
1926static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1927{
1928	struct qe_req *req = container_of(_req, struct qe_req, req);
1929
1930	req->req.buf = NULL;
1931	kfree(req);
1932}
1933
1934static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1935			u16 index, u16 length)
1936{
1937	u16 usb_status = 0;
1938	struct qe_req *req;
1939	struct qe_ep *ep;
1940	int status = 0;
1941
1942	ep = &udc->eps[0];
1943	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1944		/* Get device status */
1945		usb_status = 1 << USB_DEVICE_SELF_POWERED;
1946	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1947		/* Get interface status */
1948		/* We don't have interface information in udc driver */
1949		usb_status = 0;
1950	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1951		/* Get endpoint status */
1952		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1953		struct qe_ep *target_ep;
1954		u16 usep;
1955
1956		if (pipe >= USB_MAX_ENDPOINTS)
1957			goto stall;
1958		target_ep = &udc->eps[pipe];
1959
1960		/* stall if endpoint doesn't exist */
1961		if (!target_ep->ep.desc)
1962			goto stall;
1963
1964		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1965		if (index & USB_DIR_IN) {
1966			if (target_ep->dir != USB_DIR_IN)
1967				goto stall;
1968			if ((usep & USB_THS_MASK) == USB_THS_STALL)
1969				usb_status = 1 << USB_ENDPOINT_HALT;
1970		} else {
1971			if (target_ep->dir != USB_DIR_OUT)
1972				goto stall;
1973			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1974				usb_status = 1 << USB_ENDPOINT_HALT;
1975		}
1976	}
1977
1978	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1979					struct qe_req, req);
1980	req->req.length = 2;
1981	req->req.buf = udc->statusbuf;
1982	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
1983	req->req.status = -EINPROGRESS;
1984	req->req.actual = 0;
1985	req->req.complete = ownercomplete;
1986
1987	udc->ep0_dir = USB_DIR_IN;
1988
1989	/* data phase */
1990	status = __qe_ep_queue(&ep->ep, &req->req);
1991
1992	if (status == 0)
1993		return;
1994stall:
1995	dev_err(udc->dev, "Can't respond to getstatus request \n");
1996	qe_ep0_stall(udc);
1997}
1998
1999/* only handle the setup request, suppose the device in normal status */
2000static void setup_received_handle(struct qe_udc *udc,
2001				struct usb_ctrlrequest *setup)
2002{
2003	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2004	u16 wValue = le16_to_cpu(setup->wValue);
2005	u16 wIndex = le16_to_cpu(setup->wIndex);
2006	u16 wLength = le16_to_cpu(setup->wLength);
2007
2008	/* clear the previous request in the ep0 */
2009	udc_reset_ep_queue(udc, 0);
2010
2011	if (setup->bRequestType & USB_DIR_IN)
2012		udc->ep0_dir = USB_DIR_IN;
2013	else
2014		udc->ep0_dir = USB_DIR_OUT;
2015
2016	switch (setup->bRequest) {
2017	case USB_REQ_GET_STATUS:
2018		/* Data+Status phase form udc */
2019		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2020					!= (USB_DIR_IN | USB_TYPE_STANDARD))
2021			break;
2022		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2023					wLength);
2024		return;
2025
2026	case USB_REQ_SET_ADDRESS:
2027		/* Status phase from udc */
2028		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2029						USB_RECIP_DEVICE))
2030			break;
2031		ch9setaddress(udc, wValue, wIndex, wLength);
2032		return;
2033
2034	case USB_REQ_CLEAR_FEATURE:
2035	case USB_REQ_SET_FEATURE:
2036		/* Requests with no data phase, status phase from udc */
2037		if ((setup->bRequestType & USB_TYPE_MASK)
2038					!= USB_TYPE_STANDARD)
2039			break;
2040
2041		if ((setup->bRequestType & USB_RECIP_MASK)
2042				== USB_RECIP_ENDPOINT) {
2043			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2044			struct qe_ep *ep;
2045
2046			if (wValue != 0 || wLength != 0
2047				|| pipe >= USB_MAX_ENDPOINTS)
2048				break;
2049			ep = &udc->eps[pipe];
2050
2051			spin_unlock(&udc->lock);
2052			qe_ep_set_halt(&ep->ep,
2053					(setup->bRequest == USB_REQ_SET_FEATURE)
2054						? 1 : 0);
2055			spin_lock(&udc->lock);
2056		}
2057
2058		ep0_prime_status(udc, USB_DIR_IN);
2059
2060		return;
2061
2062	default:
2063		break;
2064	}
2065
2066	if (wLength) {
2067		/* Data phase from gadget, status phase from udc */
2068		if (setup->bRequestType & USB_DIR_IN) {
2069			udc->ep0_state = DATA_STATE_XMIT;
2070			udc->ep0_dir = USB_DIR_IN;
2071		} else {
2072			udc->ep0_state = DATA_STATE_RECV;
2073			udc->ep0_dir = USB_DIR_OUT;
2074		}
2075		spin_unlock(&udc->lock);
2076		if (udc->driver->setup(&udc->gadget,
2077					&udc->local_setup_buff) < 0)
2078			qe_ep0_stall(udc);
2079		spin_lock(&udc->lock);
2080	} else {
2081		/* No data phase, IN status from gadget */
2082		udc->ep0_dir = USB_DIR_IN;
2083		spin_unlock(&udc->lock);
2084		if (udc->driver->setup(&udc->gadget,
2085					&udc->local_setup_buff) < 0)
2086			qe_ep0_stall(udc);
2087		spin_lock(&udc->lock);
2088		udc->ep0_state = DATA_STATE_NEED_ZLP;
2089	}
2090}
2091
2092/*-------------------------------------------------------------------------
2093	USB Interrupt handlers
2094 -------------------------------------------------------------------------*/
2095static void suspend_irq(struct qe_udc *udc)
2096{
2097	udc->resume_state = udc->usb_state;
2098	udc->usb_state = USB_STATE_SUSPENDED;
2099
2100	/* report suspend to the driver ,serial.c not support this*/
2101	if (udc->driver->suspend)
2102		udc->driver->suspend(&udc->gadget);
2103}
2104
2105static void resume_irq(struct qe_udc *udc)
2106{
2107	udc->usb_state = udc->resume_state;
2108	udc->resume_state = 0;
2109
2110	/* report resume to the driver , serial.c not support this*/
2111	if (udc->driver->resume)
2112		udc->driver->resume(&udc->gadget);
2113}
2114
2115static void idle_irq(struct qe_udc *udc)
2116{
2117	u8 usbs;
2118
2119	usbs = in_8(&udc->usb_regs->usb_usbs);
2120	if (usbs & USB_IDLE_STATUS_MASK) {
2121		if ((udc->usb_state) != USB_STATE_SUSPENDED)
2122			suspend_irq(udc);
2123	} else {
2124		if (udc->usb_state == USB_STATE_SUSPENDED)
2125			resume_irq(udc);
2126	}
2127}
2128
2129static int reset_irq(struct qe_udc *udc)
2130{
2131	unsigned char i;
2132
2133	if (udc->usb_state == USB_STATE_DEFAULT)
2134		return 0;
2135
2136	qe_usb_disable(udc);
2137	out_8(&udc->usb_regs->usb_usadr, 0);
2138
2139	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2140		if (udc->eps[i].init)
2141			qe_ep_reset(udc, i);
2142	}
2143
2144	reset_queues(udc);
2145	udc->usb_state = USB_STATE_DEFAULT;
2146	udc->ep0_state = WAIT_FOR_SETUP;
2147	udc->ep0_dir = USB_DIR_OUT;
2148	qe_usb_enable(udc);
2149	return 0;
2150}
2151
2152static int bsy_irq(struct qe_udc *udc)
2153{
2154	return 0;
2155}
2156
2157static int txe_irq(struct qe_udc *udc)
2158{
2159	return 0;
2160}
2161
2162/* ep0 tx interrupt also in here */
2163static int tx_irq(struct qe_udc *udc)
2164{
2165	struct qe_ep *ep;
2166	struct qe_bd __iomem *bd;
2167	int i, res = 0;
2168
2169	if ((udc->usb_state == USB_STATE_ADDRESS)
2170		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
2171		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2172
2173	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2174		ep = &udc->eps[i];
2175		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2176			bd = ep->c_txbd;
2177			if (!(in_be32((u32 __iomem *)bd) & T_R)
2178						&& (in_be32(&bd->buf))) {
2179				/* confirm the transmitted bd */
2180				if (ep->epnum == 0)
2181					res = qe_ep0_txconf(ep);
2182				else
2183					res = qe_ep_txconf(ep);
2184			}
2185		}
2186	}
2187	return res;
2188}
2189
2190
2191/* setup packect's rx is handle in the function too */
2192static void rx_irq(struct qe_udc *udc)
2193{
2194	struct qe_ep *ep;
2195	struct qe_bd __iomem *bd;
2196	int i;
2197
2198	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2199		ep = &udc->eps[i];
2200		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2201			bd = ep->n_rxbd;
2202			if (!(in_be32((u32 __iomem *)bd) & R_E)
2203						&& (in_be32(&bd->buf))) {
2204				if (ep->epnum == 0) {
2205					qe_ep0_rx(udc);
2206				} else {
2207					/*non-setup package receive*/
2208					qe_ep_rx(ep);
2209				}
2210			}
2211		}
2212	}
2213}
2214
2215static irqreturn_t qe_udc_irq(int irq, void *_udc)
2216{
2217	struct qe_udc *udc = (struct qe_udc *)_udc;
2218	u16 irq_src;
2219	irqreturn_t status = IRQ_NONE;
2220	unsigned long flags;
2221
2222	spin_lock_irqsave(&udc->lock, flags);
2223
2224	irq_src = in_be16(&udc->usb_regs->usb_usber) &
2225		in_be16(&udc->usb_regs->usb_usbmr);
2226	/* Clear notification bits */
2227	out_be16(&udc->usb_regs->usb_usber, irq_src);
2228	/* USB Interrupt */
2229	if (irq_src & USB_E_IDLE_MASK) {
2230		idle_irq(udc);
2231		irq_src &= ~USB_E_IDLE_MASK;
2232		status = IRQ_HANDLED;
2233	}
2234
2235	if (irq_src & USB_E_TXB_MASK) {
2236		tx_irq(udc);
2237		irq_src &= ~USB_E_TXB_MASK;
2238		status = IRQ_HANDLED;
2239	}
2240
2241	if (irq_src & USB_E_RXB_MASK) {
2242		rx_irq(udc);
2243		irq_src &= ~USB_E_RXB_MASK;
2244		status = IRQ_HANDLED;
2245	}
2246
2247	if (irq_src & USB_E_RESET_MASK) {
2248		reset_irq(udc);
2249		irq_src &= ~USB_E_RESET_MASK;
2250		status = IRQ_HANDLED;
2251	}
2252
2253	if (irq_src & USB_E_BSY_MASK) {
2254		bsy_irq(udc);
2255		irq_src &= ~USB_E_BSY_MASK;
2256		status = IRQ_HANDLED;
2257	}
2258
2259	if (irq_src & USB_E_TXE_MASK) {
2260		txe_irq(udc);
2261		irq_src &= ~USB_E_TXE_MASK;
2262		status = IRQ_HANDLED;
2263	}
2264
2265	spin_unlock_irqrestore(&udc->lock, flags);
2266
2267	return status;
2268}
2269
2270/*-------------------------------------------------------------------------
2271	Gadget driver probe and unregister.
2272 --------------------------------------------------------------------------*/
2273static int fsl_qe_start(struct usb_gadget *gadget,
2274		struct usb_gadget_driver *driver)
2275{
2276	struct qe_udc *udc;
2277	unsigned long flags;
2278
2279	udc = container_of(gadget, struct qe_udc, gadget);
2280	/* lock is needed but whether should use this lock or another */
2281	spin_lock_irqsave(&udc->lock, flags);
2282
2283	driver->driver.bus = NULL;
2284	/* hook up the driver */
2285	udc->driver = driver;
2286	udc->gadget.speed = driver->max_speed;
2287
2288	/* Enable IRQ reg and Set usbcmd reg EN bit */
2289	qe_usb_enable(udc);
2290
2291	out_be16(&udc->usb_regs->usb_usber, 0xffff);
2292	out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2293	udc->usb_state = USB_STATE_ATTACHED;
2294	udc->ep0_state = WAIT_FOR_SETUP;
2295	udc->ep0_dir = USB_DIR_OUT;
2296	spin_unlock_irqrestore(&udc->lock, flags);
2297
2298	return 0;
2299}
2300
2301static int fsl_qe_stop(struct usb_gadget *gadget)
2302{
2303	struct qe_udc *udc;
2304	struct qe_ep *loop_ep;
2305	unsigned long flags;
2306
2307	udc = container_of(gadget, struct qe_udc, gadget);
2308	/* stop usb controller, disable intr */
2309	qe_usb_disable(udc);
2310
2311	/* in fact, no needed */
2312	udc->usb_state = USB_STATE_ATTACHED;
2313	udc->ep0_state = WAIT_FOR_SETUP;
2314	udc->ep0_dir = 0;
2315
2316	/* stand operation */
2317	spin_lock_irqsave(&udc->lock, flags);
2318	udc->gadget.speed = USB_SPEED_UNKNOWN;
2319	nuke(&udc->eps[0], -ESHUTDOWN);
2320	list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2321		nuke(loop_ep, -ESHUTDOWN);
2322	spin_unlock_irqrestore(&udc->lock, flags);
2323
2324	udc->driver = NULL;
2325
2326	return 0;
2327}
2328
2329/* udc structure's alloc and setup, include ep-param alloc */
2330static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2331{
2332	struct qe_udc *udc;
2333	struct device_node *np = ofdev->dev.of_node;
2334	unsigned long tmp_addr = 0;
2335	struct usb_device_para __iomem *usbpram;
2336	unsigned int i;
2337	u64 size;
2338	u32 offset;
2339
2340	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2341	if (!udc)
2342		goto cleanup;
2343
2344	udc->dev = &ofdev->dev;
2345
2346	/* get default address of usb parameter in MURAM from device tree */
2347	offset = *of_get_address(np, 1, &size, NULL);
2348	udc->usb_param = cpm_muram_addr(offset);
2349	memset_io(udc->usb_param, 0, size);
2350
2351	usbpram = udc->usb_param;
2352	out_be16(&usbpram->frame_n, 0);
2353	out_be32(&usbpram->rstate, 0);
2354
2355	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2356					sizeof(struct usb_ep_para)),
2357					   USB_EP_PARA_ALIGNMENT);
2358	if (IS_ERR_VALUE(tmp_addr))
2359		goto cleanup;
2360
2361	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2362		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2363		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2364		tmp_addr += 32;
2365	}
2366
2367	memset_io(udc->ep_param[0], 0,
2368			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2369
2370	udc->resume_state = USB_STATE_NOTATTACHED;
2371	udc->usb_state = USB_STATE_POWERED;
2372	udc->ep0_dir = 0;
2373
2374	spin_lock_init(&udc->lock);
2375	return udc;
2376
2377cleanup:
2378	kfree(udc);
2379	return NULL;
2380}
2381
2382/* USB Controller register init */
2383static int qe_udc_reg_init(struct qe_udc *udc)
2384{
2385	struct usb_ctlr __iomem *qe_usbregs;
2386	qe_usbregs = udc->usb_regs;
2387
2388	/* Spec says that we must enable the USB controller to change mode. */
2389	out_8(&qe_usbregs->usb_usmod, 0x01);
2390	/* Mode changed, now disable it, since muram isn't initialized yet. */
2391	out_8(&qe_usbregs->usb_usmod, 0x00);
2392
2393	/* Initialize the rest. */
2394	out_be16(&qe_usbregs->usb_usbmr, 0);
2395	out_8(&qe_usbregs->usb_uscom, 0);
2396	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2397
2398	return 0;
2399}
2400
2401static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2402{
2403	struct qe_ep *ep = &udc->eps[pipe_num];
2404
2405	ep->udc = udc;
2406	strcpy(ep->name, ep_name[pipe_num]);
2407	ep->ep.name = ep_name[pipe_num];
2408
2409	if (pipe_num == 0) {
2410		ep->ep.caps.type_control = true;
2411	} else {
2412		ep->ep.caps.type_iso = true;
2413		ep->ep.caps.type_bulk = true;
2414		ep->ep.caps.type_int = true;
2415	}
2416
2417	ep->ep.caps.dir_in = true;
2418	ep->ep.caps.dir_out = true;
2419
2420	ep->ep.ops = &qe_ep_ops;
2421	ep->stopped = 1;
2422	usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
2423	ep->ep.desc = NULL;
2424	ep->dir = 0xff;
2425	ep->epnum = (u8)pipe_num;
2426	ep->sent = 0;
2427	ep->last = 0;
2428	ep->init = 0;
2429	ep->rxframe = NULL;
2430	ep->txframe = NULL;
2431	ep->tx_req = NULL;
2432	ep->state = EP_STATE_IDLE;
2433	ep->has_data = 0;
2434
2435	/* the queue lists any req for this ep */
2436	INIT_LIST_HEAD(&ep->queue);
2437
2438	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
2439	if (pipe_num != 0)
2440		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2441
2442	ep->gadget = &udc->gadget;
2443
2444	return 0;
2445}
2446
2447/*-----------------------------------------------------------------------
2448 *	UDC device Driver operation functions				*
2449 *----------------------------------------------------------------------*/
2450static void qe_udc_release(struct device *dev)
2451{
2452	struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2453	int i;
2454
2455	complete(udc->done);
2456	cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2457	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2458		udc->ep_param[i] = NULL;
2459
2460	kfree(udc);
2461}
2462
2463/* Driver probe functions */
2464static const struct of_device_id qe_udc_match[];
2465static int qe_udc_probe(struct platform_device *ofdev)
2466{
2467	struct qe_udc *udc;
2468	const struct of_device_id *match;
2469	struct device_node *np = ofdev->dev.of_node;
2470	struct qe_ep *ep;
2471	unsigned int ret = 0;
2472	unsigned int i;
2473	const void *prop;
2474
2475	match = of_match_device(qe_udc_match, &ofdev->dev);
2476	if (!match)
2477		return -EINVAL;
2478
2479	prop = of_get_property(np, "mode", NULL);
2480	if (!prop || strcmp(prop, "peripheral"))
2481		return -ENODEV;
2482
2483	/* Initialize the udc structure including QH member and other member */
2484	udc = qe_udc_config(ofdev);
2485	if (!udc) {
2486		dev_err(&ofdev->dev, "failed to initialize\n");
2487		return -ENOMEM;
2488	}
2489
2490	udc->soc_type = (unsigned long)match->data;
2491	udc->usb_regs = of_iomap(np, 0);
2492	if (!udc->usb_regs) {
2493		ret = -ENOMEM;
2494		goto err1;
2495	}
2496
2497	/* initialize usb hw reg except for regs for EP,
2498	 * leave usbintr reg untouched*/
2499	qe_udc_reg_init(udc);
2500
2501	/* here comes the stand operations for probe
2502	 * set the qe_udc->gadget.xxx */
2503	udc->gadget.ops = &qe_gadget_ops;
2504
2505	/* gadget.ep0 is a pointer */
2506	udc->gadget.ep0 = &udc->eps[0].ep;
2507
2508	INIT_LIST_HEAD(&udc->gadget.ep_list);
2509
2510	/* modify in register gadget process */
2511	udc->gadget.speed = USB_SPEED_UNKNOWN;
2512
2513	/* name: Identifies the controller hardware type. */
2514	udc->gadget.name = driver_name;
2515	udc->gadget.dev.parent = &ofdev->dev;
2516
2517	/* initialize qe_ep struct */
2518	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2519		/* because the ep type isn't decide here so
2520		 * qe_ep_init() should be called in ep_enable() */
2521
2522		/* setup the qe_ep struct and link ep.ep.list
2523		 * into gadget.ep_list */
2524		qe_ep_config(udc, (unsigned char)i);
2525	}
2526
2527	/* ep0 initialization in here */
2528	ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2529	if (ret)
2530		goto err2;
2531
2532	/* create a buf for ZLP send, need to remain zeroed */
2533	udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
2534	if (udc->nullbuf == NULL) {
2535		ret = -ENOMEM;
2536		goto err3;
2537	}
2538
2539	/* buffer for data of get_status request */
2540	udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
2541	if (udc->statusbuf == NULL) {
2542		ret = -ENOMEM;
2543		goto err3;
2544	}
2545
2546	udc->nullp = virt_to_phys((void *)udc->nullbuf);
2547	if (udc->nullp == DMA_ADDR_INVALID) {
2548		udc->nullp = dma_map_single(
2549					udc->gadget.dev.parent,
2550					udc->nullbuf,
2551					256,
2552					DMA_TO_DEVICE);
2553		udc->nullmap = 1;
2554	} else {
2555		dma_sync_single_for_device(udc->gadget.dev.parent,
2556					udc->nullp, 256,
2557					DMA_TO_DEVICE);
2558	}
2559
2560	tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
2561	/* request irq and disable DR  */
2562	udc->usb_irq = irq_of_parse_and_map(np, 0);
2563	if (!udc->usb_irq) {
2564		ret = -EINVAL;
2565		goto err_noirq;
2566	}
2567
2568	ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2569				driver_name, udc);
2570	if (ret) {
2571		dev_err(udc->dev, "cannot request irq %d err %d\n",
2572				udc->usb_irq, ret);
2573		goto err4;
2574	}
2575
2576	ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2577			qe_udc_release);
2578	if (ret)
2579		goto err5;
2580
2581	platform_set_drvdata(ofdev, udc);
2582	dev_info(udc->dev,
2583			"%s USB controller initialized as device\n",
2584			(udc->soc_type == PORT_QE) ? "QE" : "CPM");
2585	return 0;
2586
2587err5:
2588	free_irq(udc->usb_irq, udc);
2589err4:
2590	irq_dispose_mapping(udc->usb_irq);
2591err_noirq:
2592	if (udc->nullmap) {
2593		dma_unmap_single(udc->gadget.dev.parent,
2594			udc->nullp, 256,
2595				DMA_TO_DEVICE);
2596			udc->nullp = DMA_ADDR_INVALID;
2597	} else {
2598		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2599			udc->nullp, 256,
2600				DMA_TO_DEVICE);
2601	}
2602err3:
2603	ep = &udc->eps[0];
2604	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2605	kfree(ep->rxframe);
2606	kfree(ep->rxbuffer);
2607	kfree(ep->txframe);
2608err2:
2609	iounmap(udc->usb_regs);
2610err1:
2611	kfree(udc);
2612	return ret;
2613}
2614
2615#ifdef CONFIG_PM
2616static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2617{
2618	return -ENOTSUPP;
2619}
2620
2621static int qe_udc_resume(struct platform_device *dev)
2622{
2623	return -ENOTSUPP;
2624}
2625#endif
2626
2627static int qe_udc_remove(struct platform_device *ofdev)
2628{
2629	struct qe_udc *udc = platform_get_drvdata(ofdev);
2630	struct qe_ep *ep;
2631	unsigned int size;
2632	DECLARE_COMPLETION_ONSTACK(done);
2633
2634	usb_del_gadget_udc(&udc->gadget);
2635
2636	udc->done = &done;
2637	tasklet_disable(&udc->rx_tasklet);
2638
2639	if (udc->nullmap) {
2640		dma_unmap_single(udc->gadget.dev.parent,
2641			udc->nullp, 256,
2642				DMA_TO_DEVICE);
2643			udc->nullp = DMA_ADDR_INVALID;
2644	} else {
2645		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2646			udc->nullp, 256,
2647				DMA_TO_DEVICE);
2648	}
2649
2650	ep = &udc->eps[0];
2651	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2652	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2653
2654	kfree(ep->rxframe);
2655	if (ep->rxbufmap) {
2656		dma_unmap_single(udc->gadget.dev.parent,
2657				ep->rxbuf_d, size,
2658				DMA_FROM_DEVICE);
2659		ep->rxbuf_d = DMA_ADDR_INVALID;
2660	} else {
2661		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2662				ep->rxbuf_d, size,
2663				DMA_FROM_DEVICE);
2664	}
2665
2666	kfree(ep->rxbuffer);
2667	kfree(ep->txframe);
2668
2669	free_irq(udc->usb_irq, udc);
2670	irq_dispose_mapping(udc->usb_irq);
2671
2672	tasklet_kill(&udc->rx_tasklet);
2673
2674	iounmap(udc->usb_regs);
2675
2676	/* wait for release() of gadget.dev to free udc */
2677	wait_for_completion(&done);
2678
2679	return 0;
2680}
2681
2682/*-------------------------------------------------------------------------*/
2683static const struct of_device_id qe_udc_match[] = {
2684	{
2685		.compatible = "fsl,mpc8323-qe-usb",
2686		.data = (void *)PORT_QE,
2687	},
2688	{
2689		.compatible = "fsl,mpc8360-qe-usb",
2690		.data = (void *)PORT_QE,
2691	},
2692	{
2693		.compatible = "fsl,mpc8272-cpm-usb",
2694		.data = (void *)PORT_CPM,
2695	},
2696	{},
2697};
2698
2699MODULE_DEVICE_TABLE(of, qe_udc_match);
2700
2701static struct platform_driver udc_driver = {
2702	.driver = {
2703		.name = driver_name,
2704		.of_match_table = qe_udc_match,
2705	},
2706	.probe          = qe_udc_probe,
2707	.remove         = qe_udc_remove,
2708#ifdef CONFIG_PM
2709	.suspend        = qe_udc_suspend,
2710	.resume         = qe_udc_resume,
2711#endif
2712};
2713
2714module_platform_driver(udc_driver);
2715
2716MODULE_DESCRIPTION(DRIVER_DESC);
2717MODULE_AUTHOR(DRIVER_AUTHOR);
2718MODULE_LICENSE("GPL");
2719