1// SPDX-License-Identifier: GPL-2.0
2/*
3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/dma-mapping.h>
21
22#include <linux/usb/ch9.h>
23#include <linux/usb/gadget.h>
24
25#include "debug.h"
26#include "core.h"
27#include "gadget.h"
28#include "io.h"
29
30#define DWC3_ALIGN_FRAME(d, n)	(((d)->frame_number + ((d)->interval * (n))) \
31					& ~((d)->interval - 1))
32
33/**
34 * dwc3_gadget_set_test_mode - enables usb2 test modes
35 * @dwc: pointer to our context structure
36 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
37 *
38 * Caller should take care of locking. This function will return 0 on
39 * success or -EINVAL if wrong Test Selector is passed.
40 */
41int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
42{
43	u32		reg;
44
45	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
46	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
47
48	switch (mode) {
49	case USB_TEST_J:
50	case USB_TEST_K:
51	case USB_TEST_SE0_NAK:
52	case USB_TEST_PACKET:
53	case USB_TEST_FORCE_ENABLE:
54		reg |= mode << 1;
55		break;
56	default:
57		return -EINVAL;
58	}
59
60	dwc3_gadget_dctl_write_safe(dwc, reg);
61
62	return 0;
63}
64
65/**
66 * dwc3_gadget_get_link_state - gets current state of usb link
67 * @dwc: pointer to our context structure
68 *
69 * Caller should take care of locking. This function will
70 * return the link state on success (>= 0) or -ETIMEDOUT.
71 */
72int dwc3_gadget_get_link_state(struct dwc3 *dwc)
73{
74	u32		reg;
75
76	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
77
78	return DWC3_DSTS_USBLNKST(reg);
79}
80
81/**
82 * dwc3_gadget_set_link_state - sets usb link to a particular state
83 * @dwc: pointer to our context structure
84 * @state: the state to put link into
85 *
86 * Caller should take care of locking. This function will
87 * return 0 on success or -ETIMEDOUT.
88 */
89int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
90{
91	int		retries = 10000;
92	u32		reg;
93
94	/*
95	 * Wait until device controller is ready. Only applies to 1.94a and
96	 * later RTL.
97	 */
98	if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
99		while (--retries) {
100			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
101			if (reg & DWC3_DSTS_DCNRD)
102				udelay(5);
103			else
104				break;
105		}
106
107		if (retries <= 0)
108			return -ETIMEDOUT;
109	}
110
111	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
112	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
113
114	/* set no action before sending new link state change */
115	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
116
117	/* set requested state */
118	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
119	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
120
121	/*
122	 * The following code is racy when called from dwc3_gadget_wakeup,
123	 * and is not needed, at least on newer versions
124	 */
125	if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
126		return 0;
127
128	/* wait for a change in DSTS */
129	retries = 10000;
130	while (--retries) {
131		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
132
133		if (DWC3_DSTS_USBLNKST(reg) == state)
134			return 0;
135
136		udelay(5);
137	}
138
139	return -ETIMEDOUT;
140}
141
142/**
143 * dwc3_ep_inc_trb - increment a trb index.
144 * @index: Pointer to the TRB index to increment.
145 *
146 * The index should never point to the link TRB. After incrementing,
147 * if it is point to the link TRB, wrap around to the beginning. The
148 * link TRB is always at the last TRB entry.
149 */
150static void dwc3_ep_inc_trb(u8 *index)
151{
152	(*index)++;
153	if (*index == (DWC3_TRB_NUM - 1))
154		*index = 0;
155}
156
157/**
158 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
159 * @dep: The endpoint whose enqueue pointer we're incrementing
160 */
161static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
162{
163	dwc3_ep_inc_trb(&dep->trb_enqueue);
164}
165
166/**
167 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
168 * @dep: The endpoint whose enqueue pointer we're incrementing
169 */
170static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
171{
172	dwc3_ep_inc_trb(&dep->trb_dequeue);
173}
174
175static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
176		struct dwc3_request *req, int status)
177{
178	struct dwc3			*dwc = dep->dwc;
179
180	list_del(&req->list);
181	req->remaining = 0;
182	req->needs_extra_trb = false;
183	req->num_trbs = 0;
184
185	if (req->request.status == -EINPROGRESS)
186		req->request.status = status;
187
188	if (req->trb)
189		usb_gadget_unmap_request_by_dev(dwc->sysdev,
190				&req->request, req->direction);
191
192	req->trb = NULL;
193	trace_dwc3_gadget_giveback(req);
194
195	if (dep->number > 1)
196		pm_runtime_put(dwc->dev);
197}
198
199/**
200 * dwc3_gadget_giveback - call struct usb_request's ->complete callback
201 * @dep: The endpoint to whom the request belongs to
202 * @req: The request we're giving back
203 * @status: completion code for the request
204 *
205 * Must be called with controller's lock held and interrupts disabled. This
206 * function will unmap @req and call its ->complete() callback to notify upper
207 * layers that it has completed.
208 */
209void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
210		int status)
211{
212	struct dwc3			*dwc = dep->dwc;
213
214	dwc3_gadget_del_and_unmap_request(dep, req, status);
215	req->status = DWC3_REQUEST_STATUS_COMPLETED;
216
217	spin_unlock(&dwc->lock);
218	usb_gadget_giveback_request(&dep->endpoint, &req->request);
219	spin_lock(&dwc->lock);
220}
221
222/**
223 * dwc3_send_gadget_generic_command - issue a generic command for the controller
224 * @dwc: pointer to the controller context
225 * @cmd: the command to be issued
226 * @param: command parameter
227 *
228 * Caller should take care of locking. Issue @cmd with a given @param to @dwc
229 * and wait for its completion.
230 */
231int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
232		u32 param)
233{
234	u32		timeout = 500;
235	int		status = 0;
236	int		ret = 0;
237	u32		reg;
238
239	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
240	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
241
242	do {
243		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
244		if (!(reg & DWC3_DGCMD_CMDACT)) {
245			status = DWC3_DGCMD_STATUS(reg);
246			if (status)
247				ret = -EINVAL;
248			break;
249		}
250	} while (--timeout);
251
252	if (!timeout) {
253		ret = -ETIMEDOUT;
254		status = -ETIMEDOUT;
255	}
256
257	trace_dwc3_gadget_generic_cmd(cmd, param, status);
258
259	return ret;
260}
261
262static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
263
264/**
265 * dwc3_send_gadget_ep_cmd - issue an endpoint command
266 * @dep: the endpoint to which the command is going to be issued
267 * @cmd: the command to be issued
268 * @params: parameters to the command
269 *
270 * Caller should handle locking. This function will issue @cmd with given
271 * @params to @dep and wait for its completion.
272 */
273int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
274		struct dwc3_gadget_ep_cmd_params *params)
275{
276	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
277	struct dwc3		*dwc = dep->dwc;
278	u32			timeout = 5000;
279	u32			saved_config = 0;
280	u32			reg;
281
282	int			cmd_status = 0;
283	int			ret = -EINVAL;
284
285	/*
286	 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
287	 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
288	 * endpoint command.
289	 *
290	 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
291	 * settings. Restore them after the command is completed.
292	 *
293	 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
294	 */
295	if (dwc->gadget->speed <= USB_SPEED_HIGH ||
296	    DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
297		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
298		if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
299			saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
300			reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
301		}
302
303		if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
304			saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
305			reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
306		}
307
308		if (saved_config)
309			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
310	}
311
312	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
313		int link_state;
314
315		/*
316		 * Initiate remote wakeup if the link state is in U3 when
317		 * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
318		 * link state is in U1/U2, no remote wakeup is needed. The Start
319		 * Transfer command will initiate the link recovery.
320		 */
321		link_state = dwc3_gadget_get_link_state(dwc);
322		switch (link_state) {
323		case DWC3_LINK_STATE_U2:
324			if (dwc->gadget->speed >= USB_SPEED_SUPER)
325				break;
326
327			fallthrough;
328		case DWC3_LINK_STATE_U3:
329			ret = __dwc3_gadget_wakeup(dwc);
330			dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
331					ret);
332			break;
333		}
334	}
335
336	dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
337	dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
338	dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
339
340	/*
341	 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
342	 * not relying on XferNotReady, we can make use of a special "No
343	 * Response Update Transfer" command where we should clear both CmdAct
344	 * and CmdIOC bits.
345	 *
346	 * With this, we don't need to wait for command completion and can
347	 * straight away issue further commands to the endpoint.
348	 *
349	 * NOTICE: We're making an assumption that control endpoints will never
350	 * make use of Update Transfer command. This is a safe assumption
351	 * because we can never have more than one request at a time with
352	 * Control Endpoints. If anybody changes that assumption, this chunk
353	 * needs to be updated accordingly.
354	 */
355	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
356			!usb_endpoint_xfer_isoc(desc))
357		cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
358	else
359		cmd |= DWC3_DEPCMD_CMDACT;
360
361	dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
362	do {
363		reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
364		if (!(reg & DWC3_DEPCMD_CMDACT)) {
365			cmd_status = DWC3_DEPCMD_STATUS(reg);
366
367			switch (cmd_status) {
368			case 0:
369				ret = 0;
370				break;
371			case DEPEVT_TRANSFER_NO_RESOURCE:
372				dev_WARN(dwc->dev, "No resource for %s\n",
373					 dep->name);
374				ret = -EINVAL;
375				break;
376			case DEPEVT_TRANSFER_BUS_EXPIRY:
377				/*
378				 * SW issues START TRANSFER command to
379				 * isochronous ep with future frame interval. If
380				 * future interval time has already passed when
381				 * core receives the command, it will respond
382				 * with an error status of 'Bus Expiry'.
383				 *
384				 * Instead of always returning -EINVAL, let's
385				 * give a hint to the gadget driver that this is
386				 * the case by returning -EAGAIN.
387				 */
388				ret = -EAGAIN;
389				break;
390			default:
391				dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
392			}
393
394			break;
395		}
396	} while (--timeout);
397
398	if (timeout == 0) {
399		ret = -ETIMEDOUT;
400		cmd_status = -ETIMEDOUT;
401	}
402
403	trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
404
405	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
406		if (ret == 0)
407			dep->flags |= DWC3_EP_TRANSFER_STARTED;
408
409		if (ret != -ETIMEDOUT)
410			dwc3_gadget_ep_get_transfer_index(dep);
411	}
412
413	if (saved_config) {
414		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
415		reg |= saved_config;
416		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
417	}
418
419	return ret;
420}
421
422static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
423{
424	struct dwc3 *dwc = dep->dwc;
425	struct dwc3_gadget_ep_cmd_params params;
426	u32 cmd = DWC3_DEPCMD_CLEARSTALL;
427
428	/*
429	 * As of core revision 2.60a the recommended programming model
430	 * is to set the ClearPendIN bit when issuing a Clear Stall EP
431	 * command for IN endpoints. This is to prevent an issue where
432	 * some (non-compliant) hosts may not send ACK TPs for pending
433	 * IN transfers due to a mishandled error condition. Synopsys
434	 * STAR 9000614252.
435	 */
436	if (dep->direction &&
437	    !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
438	    (dwc->gadget->speed >= USB_SPEED_SUPER))
439		cmd |= DWC3_DEPCMD_CLEARPENDIN;
440
441	memset(&params, 0, sizeof(params));
442
443	return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
444}
445
446static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
447		struct dwc3_trb *trb)
448{
449	u32		offset = (char *) trb - (char *) dep->trb_pool;
450
451	return dep->trb_pool_dma + offset;
452}
453
454static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
455{
456	struct dwc3		*dwc = dep->dwc;
457
458	if (dep->trb_pool)
459		return 0;
460
461	dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
462			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
463			&dep->trb_pool_dma, GFP_KERNEL);
464	if (!dep->trb_pool) {
465		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
466				dep->name);
467		return -ENOMEM;
468	}
469
470	return 0;
471}
472
473static void dwc3_free_trb_pool(struct dwc3_ep *dep)
474{
475	struct dwc3		*dwc = dep->dwc;
476
477	dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
478			dep->trb_pool, dep->trb_pool_dma);
479
480	dep->trb_pool = NULL;
481	dep->trb_pool_dma = 0;
482}
483
484static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
485{
486	struct dwc3_gadget_ep_cmd_params params;
487
488	memset(&params, 0x00, sizeof(params));
489
490	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
491
492	return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
493			&params);
494}
495
496/**
497 * dwc3_gadget_start_config - configure ep resources
498 * @dep: endpoint that is being enabled
499 *
500 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
501 * completion, it will set Transfer Resource for all available endpoints.
502 *
503 * The assignment of transfer resources cannot perfectly follow the data book
504 * due to the fact that the controller driver does not have all knowledge of the
505 * configuration in advance. It is given this information piecemeal by the
506 * composite gadget framework after every SET_CONFIGURATION and
507 * SET_INTERFACE. Trying to follow the databook programming model in this
508 * scenario can cause errors. For two reasons:
509 *
510 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
511 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
512 * incorrect in the scenario of multiple interfaces.
513 *
514 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
515 * endpoint on alt setting (8.1.6).
516 *
517 * The following simplified method is used instead:
518 *
519 * All hardware endpoints can be assigned a transfer resource and this setting
520 * will stay persistent until either a core reset or hibernation. So whenever we
521 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
522 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
523 * guaranteed that there are as many transfer resources as endpoints.
524 *
525 * This function is called for each endpoint when it is being enabled but is
526 * triggered only when called for EP0-out, which always happens first, and which
527 * should only happen in one of the above conditions.
528 */
529static int dwc3_gadget_start_config(struct dwc3_ep *dep)
530{
531	struct dwc3_gadget_ep_cmd_params params;
532	struct dwc3		*dwc;
533	u32			cmd;
534	int			i;
535	int			ret;
536
537	if (dep->number)
538		return 0;
539
540	memset(&params, 0x00, sizeof(params));
541	cmd = DWC3_DEPCMD_DEPSTARTCFG;
542	dwc = dep->dwc;
543
544	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
545	if (ret)
546		return ret;
547
548	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
549		struct dwc3_ep *dep = dwc->eps[i];
550
551		if (!dep)
552			continue;
553
554		ret = dwc3_gadget_set_xfer_resource(dep);
555		if (ret)
556			return ret;
557	}
558
559	return 0;
560}
561
562static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
563{
564	const struct usb_ss_ep_comp_descriptor *comp_desc;
565	const struct usb_endpoint_descriptor *desc;
566	struct dwc3_gadget_ep_cmd_params params;
567	struct dwc3 *dwc = dep->dwc;
568
569	comp_desc = dep->endpoint.comp_desc;
570	desc = dep->endpoint.desc;
571
572	memset(&params, 0x00, sizeof(params));
573
574	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
575		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
576
577	/* Burst size is only needed in SuperSpeed mode */
578	if (dwc->gadget->speed >= USB_SPEED_SUPER) {
579		u32 burst = dep->endpoint.maxburst;
580
581		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
582	}
583
584	params.param0 |= action;
585	if (action == DWC3_DEPCFG_ACTION_RESTORE)
586		params.param2 |= dep->saved_state;
587
588	if (usb_endpoint_xfer_control(desc))
589		params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
590
591	if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
592		params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
593
594	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
595		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
596			| DWC3_DEPCFG_XFER_COMPLETE_EN
597			| DWC3_DEPCFG_STREAM_EVENT_EN;
598		dep->stream_capable = true;
599	}
600
601	if (!usb_endpoint_xfer_control(desc))
602		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
603
604	/*
605	 * We are doing 1:1 mapping for endpoints, meaning
606	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
607	 * so on. We consider the direction bit as part of the physical
608	 * endpoint number. So USB endpoint 0x81 is 0x03.
609	 */
610	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
611
612	/*
613	 * We must use the lower 16 TX FIFOs even though
614	 * HW might have more
615	 */
616	if (dep->direction)
617		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
618
619	if (desc->bInterval) {
620		u8 bInterval_m1;
621
622		/*
623		 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
624		 *
625		 * NOTE: The programming guide incorrectly stated bInterval_m1
626		 * must be set to 0 when operating in fullspeed. Internally the
627		 * controller does not have this limitation. See DWC_usb3x
628		 * programming guide section 3.2.2.1.
629		 */
630		bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
631
632		if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
633		    dwc->gadget->speed == USB_SPEED_FULL)
634			dep->interval = desc->bInterval;
635		else
636			dep->interval = 1 << (desc->bInterval - 1);
637
638		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
639	}
640
641	return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
642}
643
644static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
645		bool interrupt);
646
647/**
648 * __dwc3_gadget_ep_enable - initializes a hw endpoint
649 * @dep: endpoint to be initialized
650 * @action: one of INIT, MODIFY or RESTORE
651 *
652 * Caller should take care of locking. Execute all necessary commands to
653 * initialize a HW endpoint so it can be used by a gadget driver.
654 */
655static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
656{
657	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
658	struct dwc3		*dwc = dep->dwc;
659
660	u32			reg;
661	int			ret;
662
663	if (!(dep->flags & DWC3_EP_ENABLED)) {
664		ret = dwc3_gadget_start_config(dep);
665		if (ret)
666			return ret;
667	}
668
669	ret = dwc3_gadget_set_ep_config(dep, action);
670	if (ret)
671		return ret;
672
673	if (!(dep->flags & DWC3_EP_ENABLED)) {
674		struct dwc3_trb	*trb_st_hw;
675		struct dwc3_trb	*trb_link;
676
677		dep->type = usb_endpoint_type(desc);
678		dep->flags |= DWC3_EP_ENABLED;
679
680		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
681		reg |= DWC3_DALEPENA_EP(dep->number);
682		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
683
684		if (usb_endpoint_xfer_control(desc))
685			goto out;
686
687		/* Initialize the TRB ring */
688		dep->trb_dequeue = 0;
689		dep->trb_enqueue = 0;
690		memset(dep->trb_pool, 0,
691		       sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
692
693		/* Link TRB. The HWO bit is never reset */
694		trb_st_hw = &dep->trb_pool[0];
695
696		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
697		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
698		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
699		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
700		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
701	}
702
703	/*
704	 * Issue StartTransfer here with no-op TRB so we can always rely on No
705	 * Response Update Transfer command.
706	 */
707	if (usb_endpoint_xfer_bulk(desc) ||
708			usb_endpoint_xfer_int(desc)) {
709		struct dwc3_gadget_ep_cmd_params params;
710		struct dwc3_trb	*trb;
711		dma_addr_t trb_dma;
712		u32 cmd;
713
714		memset(&params, 0, sizeof(params));
715		trb = &dep->trb_pool[0];
716		trb_dma = dwc3_trb_dma_offset(dep, trb);
717
718		params.param0 = upper_32_bits(trb_dma);
719		params.param1 = lower_32_bits(trb_dma);
720
721		cmd = DWC3_DEPCMD_STARTTRANSFER;
722
723		ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
724		if (ret < 0)
725			return ret;
726
727		if (dep->stream_capable) {
728			/*
729			 * For streams, at start, there maybe a race where the
730			 * host primes the endpoint before the function driver
731			 * queues a request to initiate a stream. In that case,
732			 * the controller will not see the prime to generate the
733			 * ERDY and start stream. To workaround this, issue a
734			 * no-op TRB as normal, but end it immediately. As a
735			 * result, when the function driver queues the request,
736			 * the next START_TRANSFER command will cause the
737			 * controller to generate an ERDY to initiate the
738			 * stream.
739			 */
740			dwc3_stop_active_transfer(dep, true, true);
741
742			/*
743			 * All stream eps will reinitiate stream on NoStream
744			 * rejection until we can determine that the host can
745			 * prime after the first transfer.
746			 */
747			dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
748		}
749	}
750
751out:
752	trace_dwc3_gadget_ep_enable(dep);
753
754	return 0;
755}
756
757static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
758{
759	struct dwc3_request		*req;
760
761	dwc3_stop_active_transfer(dep, true, false);
762
763	/* - giveback all requests to gadget driver */
764	while (!list_empty(&dep->started_list)) {
765		req = next_request(&dep->started_list);
766
767		dwc3_gadget_giveback(dep, req, status);
768	}
769
770	while (!list_empty(&dep->pending_list)) {
771		req = next_request(&dep->pending_list);
772
773		dwc3_gadget_giveback(dep, req, status);
774	}
775
776	while (!list_empty(&dep->cancelled_list)) {
777		req = next_request(&dep->cancelled_list);
778
779		dwc3_gadget_giveback(dep, req, status);
780	}
781}
782
783/**
784 * __dwc3_gadget_ep_disable - disables a hw endpoint
785 * @dep: the endpoint to disable
786 *
787 * This function undoes what __dwc3_gadget_ep_enable did and also removes
788 * requests which are currently being processed by the hardware and those which
789 * are not yet scheduled.
790 *
791 * Caller should take care of locking.
792 */
793static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
794{
795	struct dwc3		*dwc = dep->dwc;
796	u32			reg;
797
798	trace_dwc3_gadget_ep_disable(dep);
799
800	/* make sure HW endpoint isn't stalled */
801	if (dep->flags & DWC3_EP_STALL)
802		__dwc3_gadget_ep_set_halt(dep, 0, false);
803
804	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
805	reg &= ~DWC3_DALEPENA_EP(dep->number);
806	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
807
808	dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
809
810	dep->stream_capable = false;
811	dep->type = 0;
812	dep->flags = 0;
813
814	/* Clear out the ep descriptors for non-ep0 */
815	if (dep->number > 1) {
816		dep->endpoint.comp_desc = NULL;
817		dep->endpoint.desc = NULL;
818	}
819
820	return 0;
821}
822
823/* -------------------------------------------------------------------------- */
824
825static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
826		const struct usb_endpoint_descriptor *desc)
827{
828	return -EINVAL;
829}
830
831static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
832{
833	return -EINVAL;
834}
835
836/* -------------------------------------------------------------------------- */
837
838static int dwc3_gadget_ep_enable(struct usb_ep *ep,
839		const struct usb_endpoint_descriptor *desc)
840{
841	struct dwc3_ep			*dep;
842	struct dwc3			*dwc;
843	unsigned long			flags;
844	int				ret;
845
846	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
847		pr_debug("dwc3: invalid parameters\n");
848		return -EINVAL;
849	}
850
851	if (!desc->wMaxPacketSize) {
852		pr_debug("dwc3: missing wMaxPacketSize\n");
853		return -EINVAL;
854	}
855
856	dep = to_dwc3_ep(ep);
857	dwc = dep->dwc;
858
859	if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
860					"%s is already enabled\n",
861					dep->name))
862		return 0;
863
864	spin_lock_irqsave(&dwc->lock, flags);
865	ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
866	spin_unlock_irqrestore(&dwc->lock, flags);
867
868	return ret;
869}
870
871static int dwc3_gadget_ep_disable(struct usb_ep *ep)
872{
873	struct dwc3_ep			*dep;
874	struct dwc3			*dwc;
875	unsigned long			flags;
876	int				ret;
877
878	if (!ep) {
879		pr_debug("dwc3: invalid parameters\n");
880		return -EINVAL;
881	}
882
883	dep = to_dwc3_ep(ep);
884	dwc = dep->dwc;
885
886	if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
887					"%s is already disabled\n",
888					dep->name))
889		return 0;
890
891	spin_lock_irqsave(&dwc->lock, flags);
892	ret = __dwc3_gadget_ep_disable(dep);
893	spin_unlock_irqrestore(&dwc->lock, flags);
894
895	return ret;
896}
897
898static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
899		gfp_t gfp_flags)
900{
901	struct dwc3_request		*req;
902	struct dwc3_ep			*dep = to_dwc3_ep(ep);
903
904	req = kzalloc(sizeof(*req), gfp_flags);
905	if (!req)
906		return NULL;
907
908	req->direction	= dep->direction;
909	req->epnum	= dep->number;
910	req->dep	= dep;
911	req->status	= DWC3_REQUEST_STATUS_UNKNOWN;
912
913	trace_dwc3_alloc_request(req);
914
915	return &req->request;
916}
917
918static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
919		struct usb_request *request)
920{
921	struct dwc3_request		*req = to_dwc3_request(request);
922
923	trace_dwc3_free_request(req);
924	kfree(req);
925}
926
927/**
928 * dwc3_ep_prev_trb - returns the previous TRB in the ring
929 * @dep: The endpoint with the TRB ring
930 * @index: The index of the current TRB in the ring
931 *
932 * Returns the TRB prior to the one pointed to by the index. If the
933 * index is 0, we will wrap backwards, skip the link TRB, and return
934 * the one just before that.
935 */
936static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
937{
938	u8 tmp = index;
939
940	if (!tmp)
941		tmp = DWC3_TRB_NUM - 1;
942
943	return &dep->trb_pool[tmp - 1];
944}
945
946static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
947{
948	u8			trbs_left;
949
950	/*
951	 * If the enqueue & dequeue are equal then the TRB ring is either full
952	 * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
953	 * pending to be processed by the driver.
954	 */
955	if (dep->trb_enqueue == dep->trb_dequeue) {
956		/*
957		 * If there is any request remained in the started_list at
958		 * this point, that means there is no TRB available.
959		 */
960		if (!list_empty(&dep->started_list))
961			return 0;
962
963		return DWC3_TRB_NUM - 1;
964	}
965
966	trbs_left = dep->trb_dequeue - dep->trb_enqueue;
967	trbs_left &= (DWC3_TRB_NUM - 1);
968
969	if (dep->trb_dequeue < dep->trb_enqueue)
970		trbs_left--;
971
972	return trbs_left;
973}
974
975/**
976 * dwc3_prepare_one_trb - setup one TRB from one request
977 * @dep: endpoint for which this request is prepared
978 * @req: dwc3_request pointer
979 * @trb_length: buffer size of the TRB
980 * @chain: should this TRB be chained to the next?
981 * @node: only for isochronous endpoints. First TRB needs different type.
982 * @use_bounce_buffer: set to use bounce buffer
983 * @must_interrupt: set to interrupt on TRB completion
984 */
985static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
986		struct dwc3_request *req, unsigned int trb_length,
987		unsigned int chain, unsigned int node, bool use_bounce_buffer,
988		bool must_interrupt)
989{
990	struct dwc3_trb		*trb;
991	dma_addr_t		dma;
992	unsigned int		stream_id = req->request.stream_id;
993	unsigned int		short_not_ok = req->request.short_not_ok;
994	unsigned int		no_interrupt = req->request.no_interrupt;
995	unsigned int		is_last = req->request.is_last;
996	struct dwc3		*dwc = dep->dwc;
997	struct usb_gadget	*gadget = dwc->gadget;
998	enum usb_device_speed	speed = gadget->speed;
999
1000	if (use_bounce_buffer)
1001		dma = dep->dwc->bounce_addr;
1002	else if (req->request.num_sgs > 0)
1003		dma = sg_dma_address(req->start_sg);
1004	else
1005		dma = req->request.dma;
1006
1007	trb = &dep->trb_pool[dep->trb_enqueue];
1008
1009	if (!req->trb) {
1010		dwc3_gadget_move_started_request(req);
1011		req->trb = trb;
1012		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
1013	}
1014
1015	req->num_trbs++;
1016
1017	trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
1018	trb->bpl = lower_32_bits(dma);
1019	trb->bph = upper_32_bits(dma);
1020
1021	switch (usb_endpoint_type(dep->endpoint.desc)) {
1022	case USB_ENDPOINT_XFER_CONTROL:
1023		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
1024		break;
1025
1026	case USB_ENDPOINT_XFER_ISOC:
1027		if (!node) {
1028			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
1029
1030			/*
1031			 * USB Specification 2.0 Section 5.9.2 states that: "If
1032			 * there is only a single transaction in the microframe,
1033			 * only a DATA0 data packet PID is used.  If there are
1034			 * two transactions per microframe, DATA1 is used for
1035			 * the first transaction data packet and DATA0 is used
1036			 * for the second transaction data packet.  If there are
1037			 * three transactions per microframe, DATA2 is used for
1038			 * the first transaction data packet, DATA1 is used for
1039			 * the second, and DATA0 is used for the third."
1040			 *
1041			 * IOW, we should satisfy the following cases:
1042			 *
1043			 * 1) length <= maxpacket
1044			 *	- DATA0
1045			 *
1046			 * 2) maxpacket < length <= (2 * maxpacket)
1047			 *	- DATA1, DATA0
1048			 *
1049			 * 3) (2 * maxpacket) < length <= (3 * maxpacket)
1050			 *	- DATA2, DATA1, DATA0
1051			 */
1052			if (speed == USB_SPEED_HIGH) {
1053				struct usb_ep *ep = &dep->endpoint;
1054				unsigned int mult = 2;
1055				unsigned int maxp = usb_endpoint_maxp(ep->desc);
1056
1057				if (req->request.length <= (2 * maxp))
1058					mult--;
1059
1060				if (req->request.length <= maxp)
1061					mult--;
1062
1063				trb->size |= DWC3_TRB_SIZE_PCM1(mult);
1064			}
1065		} else {
1066			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
1067		}
1068
1069		if (!no_interrupt && !chain)
1070			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1071		break;
1072
1073	case USB_ENDPOINT_XFER_BULK:
1074	case USB_ENDPOINT_XFER_INT:
1075		trb->ctrl = DWC3_TRBCTL_NORMAL;
1076		break;
1077	default:
1078		/*
1079		 * This is only possible with faulty memory because we
1080		 * checked it already :)
1081		 */
1082		dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
1083				usb_endpoint_type(dep->endpoint.desc));
1084	}
1085
1086	/*
1087	 * Enable Continue on Short Packet
1088	 * when endpoint is not a stream capable
1089	 */
1090	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
1091		if (!dep->stream_capable)
1092			trb->ctrl |= DWC3_TRB_CTRL_CSP;
1093
1094		if (short_not_ok)
1095			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1096	}
1097
1098	if ((!no_interrupt && !chain) || must_interrupt)
1099		trb->ctrl |= DWC3_TRB_CTRL_IOC;
1100
1101	if (chain)
1102		trb->ctrl |= DWC3_TRB_CTRL_CHN;
1103	else if (dep->stream_capable && is_last)
1104		trb->ctrl |= DWC3_TRB_CTRL_LST;
1105
1106	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
1107		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
1108
1109	/*
1110	 * As per data book 4.2.3.2TRB Control Bit Rules section
1111	 *
1112	 * The controller autonomously checks the HWO field of a TRB to determine if the
1113	 * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
1114	 * is valid before setting the HWO field to '1'. In most systems, this means that
1115	 * software must update the fourth DWORD of a TRB last.
1116	 *
1117	 * However there is a possibility of CPU re-ordering here which can cause
1118	 * controller to observe the HWO bit set prematurely.
1119	 * Add a write memory barrier to prevent CPU re-ordering.
1120	 */
1121	wmb();
1122	trb->ctrl |= DWC3_TRB_CTRL_HWO;
1123
1124	dwc3_ep_inc_enq(dep);
1125
1126	trace_dwc3_prepare_trb(dep, trb);
1127}
1128
1129static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
1130{
1131	unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1132	unsigned int rem = req->request.length % maxp;
1133
1134	if ((req->request.length && req->request.zero && !rem &&
1135			!usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
1136			(!req->direction && rem))
1137		return true;
1138
1139	return false;
1140}
1141
1142/**
1143 * dwc3_prepare_last_sg - prepare TRBs for the last SG entry
1144 * @dep: The endpoint that the request belongs to
1145 * @req: The request to prepare
1146 * @entry_length: The last SG entry size
1147 * @node: Indicates whether this is not the first entry (for isoc only)
1148 *
1149 * Return the number of TRBs prepared.
1150 */
1151static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
1152		struct dwc3_request *req, unsigned int entry_length,
1153		unsigned int node)
1154{
1155	unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1156	unsigned int rem = req->request.length % maxp;
1157	unsigned int num_trbs = 1;
1158
1159	if (dwc3_needs_extra_trb(dep, req))
1160		num_trbs++;
1161
1162	if (dwc3_calc_trbs_left(dep) < num_trbs)
1163		return 0;
1164
1165	req->needs_extra_trb = num_trbs > 1;
1166
1167	/* Prepare a normal TRB */
1168	if (req->direction || req->request.length)
1169		dwc3_prepare_one_trb(dep, req, entry_length,
1170				req->needs_extra_trb, node, false, false);
1171
1172	/* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
1173	if ((!req->direction && !req->request.length) || req->needs_extra_trb)
1174		dwc3_prepare_one_trb(dep, req,
1175				req->direction ? 0 : maxp - rem,
1176				false, 1, true, false);
1177
1178	return num_trbs;
1179}
1180
1181static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
1182		struct dwc3_request *req)
1183{
1184	struct scatterlist *sg = req->start_sg;
1185	struct scatterlist *s;
1186	int		i;
1187	unsigned int length = req->request.length;
1188	unsigned int remaining = req->request.num_mapped_sgs
1189		- req->num_queued_sgs;
1190	unsigned int num_trbs = req->num_trbs;
1191	bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
1192
1193	/*
1194	 * If we resume preparing the request, then get the remaining length of
1195	 * the request and resume where we left off.
1196	 */
1197	for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
1198		length -= sg_dma_len(s);
1199
1200	for_each_sg(sg, s, remaining, i) {
1201		unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
1202		unsigned int trb_length;
1203		bool must_interrupt = false;
1204		bool last_sg = false;
1205
1206		trb_length = min_t(unsigned int, length, sg_dma_len(s));
1207
1208		length -= trb_length;
1209
1210		/*
1211		 * IOMMU driver is coalescing the list of sgs which shares a
1212		 * page boundary into one and giving it to USB driver. With
1213		 * this the number of sgs mapped is not equal to the number of
1214		 * sgs passed. So mark the chain bit to false if it isthe last
1215		 * mapped sg.
1216		 */
1217		if ((i == remaining - 1) || !length)
1218			last_sg = true;
1219
1220		if (!num_trbs_left)
1221			break;
1222
1223		if (last_sg) {
1224			if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
1225				break;
1226		} else {
1227			/*
1228			 * Look ahead to check if we have enough TRBs for the
1229			 * next SG entry. If not, set interrupt on this TRB to
1230			 * resume preparing the next SG entry when more TRBs are
1231			 * free.
1232			 */
1233			if (num_trbs_left == 1 || (needs_extra_trb &&
1234					num_trbs_left <= 2 &&
1235					sg_dma_len(sg_next(s)) >= length))
1236				must_interrupt = true;
1237
1238			dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
1239					must_interrupt);
1240		}
1241
1242		/*
1243		 * There can be a situation where all sgs in sglist are not
1244		 * queued because of insufficient trb number. To handle this
1245		 * case, update start_sg to next sg to be queued, so that
1246		 * we have free trbs we can continue queuing from where we
1247		 * previously stopped
1248		 */
1249		if (!last_sg)
1250			req->start_sg = sg_next(s);
1251
1252		req->num_queued_sgs++;
1253		req->num_pending_sgs--;
1254
1255		/*
1256		 * The number of pending SG entries may not correspond to the
1257		 * number of mapped SG entries. If all the data are queued, then
1258		 * don't include unused SG entries.
1259		 */
1260		if (length == 0) {
1261			req->num_pending_sgs = 0;
1262			break;
1263		}
1264
1265		if (must_interrupt)
1266			break;
1267	}
1268
1269	return req->num_trbs - num_trbs;
1270}
1271
1272static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
1273		struct dwc3_request *req)
1274{
1275	return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
1276}
1277
1278/*
1279 * dwc3_prepare_trbs - setup TRBs from requests
1280 * @dep: endpoint for which requests are being prepared
1281 *
1282 * The function goes through the requests list and sets up TRBs for the
1283 * transfers. The function returns once there are no more TRBs available or
1284 * it runs out of requests.
1285 *
1286 * Returns the number of TRBs prepared or negative errno.
1287 */
1288static int dwc3_prepare_trbs(struct dwc3_ep *dep)
1289{
1290	struct dwc3_request	*req, *n;
1291	int			ret = 0;
1292
1293	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
1294
1295	/*
1296	 * We can get in a situation where there's a request in the started list
1297	 * but there weren't enough TRBs to fully kick it in the first time
1298	 * around, so it has been waiting for more TRBs to be freed up.
1299	 *
1300	 * In that case, we should check if we have a request with pending_sgs
1301	 * in the started list and prepare TRBs for that request first,
1302	 * otherwise we will prepare TRBs completely out of order and that will
1303	 * break things.
1304	 */
1305	list_for_each_entry(req, &dep->started_list, list) {
1306		if (req->num_pending_sgs > 0) {
1307			ret = dwc3_prepare_trbs_sg(dep, req);
1308			if (!ret || req->num_pending_sgs)
1309				return ret;
1310		}
1311
1312		if (!dwc3_calc_trbs_left(dep))
1313			return ret;
1314
1315		/*
1316		 * Don't prepare beyond a transfer. In DWC_usb32, its transfer
1317		 * burst capability may try to read and use TRBs beyond the
1318		 * active transfer instead of stopping.
1319		 */
1320		if (dep->stream_capable && req->request.is_last)
1321			return ret;
1322	}
1323
1324	list_for_each_entry_safe(req, n, &dep->pending_list, list) {
1325		struct dwc3	*dwc = dep->dwc;
1326
1327		ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
1328						    dep->direction);
1329		if (ret)
1330			return ret;
1331
1332		req->sg			= req->request.sg;
1333		req->start_sg		= req->sg;
1334		req->num_queued_sgs	= 0;
1335		req->num_pending_sgs	= req->request.num_mapped_sgs;
1336
1337		if (req->num_pending_sgs > 0) {
1338			ret = dwc3_prepare_trbs_sg(dep, req);
1339			if (req->num_pending_sgs)
1340				return ret;
1341		} else {
1342			ret = dwc3_prepare_trbs_linear(dep, req);
1343		}
1344
1345		if (!ret || !dwc3_calc_trbs_left(dep))
1346			return ret;
1347
1348		/*
1349		 * Don't prepare beyond a transfer. In DWC_usb32, its transfer
1350		 * burst capability may try to read and use TRBs beyond the
1351		 * active transfer instead of stopping.
1352		 */
1353		if (dep->stream_capable && req->request.is_last)
1354			return ret;
1355	}
1356
1357	return ret;
1358}
1359
1360static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
1361
1362static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
1363{
1364	struct dwc3_gadget_ep_cmd_params params;
1365	struct dwc3_request		*req;
1366	int				starting;
1367	int				ret;
1368	u32				cmd;
1369
1370	/*
1371	 * Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
1372	 * This happens when we need to stop and restart a transfer such as in
1373	 * the case of reinitiating a stream or retrying an isoc transfer.
1374	 */
1375	ret = dwc3_prepare_trbs(dep);
1376	if (ret < 0)
1377		return ret;
1378
1379	starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
1380
1381	/*
1382	 * If there's no new TRB prepared and we don't need to restart a
1383	 * transfer, there's no need to update the transfer.
1384	 */
1385	if (!ret && !starting)
1386		return ret;
1387
1388	req = next_request(&dep->started_list);
1389	if (!req) {
1390		dep->flags |= DWC3_EP_PENDING_REQUEST;
1391		return 0;
1392	}
1393
1394	memset(&params, 0, sizeof(params));
1395
1396	if (starting) {
1397		params.param0 = upper_32_bits(req->trb_dma);
1398		params.param1 = lower_32_bits(req->trb_dma);
1399		cmd = DWC3_DEPCMD_STARTTRANSFER;
1400
1401		if (dep->stream_capable)
1402			cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
1403
1404		if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
1405			cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
1406	} else {
1407		cmd = DWC3_DEPCMD_UPDATETRANSFER |
1408			DWC3_DEPCMD_PARAM(dep->resource_index);
1409	}
1410
1411	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1412	if (ret < 0) {
1413		struct dwc3_request *tmp;
1414
1415		if (ret == -EAGAIN)
1416			return ret;
1417
1418		dwc3_stop_active_transfer(dep, true, true);
1419
1420		list_for_each_entry_safe(req, tmp, &dep->started_list, list)
1421			dwc3_gadget_move_cancelled_request(req);
1422
1423		/* If ep isn't started, then there's no end transfer pending */
1424		if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
1425			dwc3_gadget_ep_cleanup_cancelled_requests(dep);
1426
1427		return ret;
1428	}
1429
1430	if (dep->stream_capable && req->request.is_last)
1431		dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
1432
1433	return 0;
1434}
1435
1436static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
1437{
1438	u32			reg;
1439
1440	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1441	return DWC3_DSTS_SOFFN(reg);
1442}
1443
1444/**
1445 * __dwc3_stop_active_transfer - stop the current active transfer
1446 * @dep: isoc endpoint
1447 * @force: set forcerm bit in the command
1448 * @interrupt: command complete interrupt after End Transfer command
1449 *
1450 * When setting force, the ForceRM bit will be set. In that case
1451 * the controller won't update the TRB progress on command
1452 * completion. It also won't clear the HWO bit in the TRB.
1453 * The command will also not complete immediately in that case.
1454 */
1455static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
1456{
1457	struct dwc3 *dwc = dep->dwc;
1458	struct dwc3_gadget_ep_cmd_params params;
1459	u32 cmd;
1460	int ret;
1461
1462	cmd = DWC3_DEPCMD_ENDTRANSFER;
1463	cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
1464	cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
1465	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
1466	memset(&params, 0, sizeof(params));
1467	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1468	WARN_ON_ONCE(ret);
1469	dep->resource_index = 0;
1470
1471	if (!interrupt) {
1472		if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
1473			mdelay(1);
1474		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
1475	} else if (!ret) {
1476		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
1477	}
1478
1479	return ret;
1480}
1481
1482/**
1483 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
1484 * @dep: isoc endpoint
1485 *
1486 * This function tests for the correct combination of BIT[15:14] from the 16-bit
1487 * microframe number reported by the XferNotReady event for the future frame
1488 * number to start the isoc transfer.
1489 *
1490 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
1491 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
1492 * XferNotReady event are invalid. The driver uses this number to schedule the
1493 * isochronous transfer and passes it to the START TRANSFER command. Because
1494 * this number is invalid, the command may fail. If BIT[15:14] matches the
1495 * internal 16-bit microframe, the START TRANSFER command will pass and the
1496 * transfer will start at the scheduled time, if it is off by 1, the command
1497 * will still pass, but the transfer will start 2 seconds in the future. For all
1498 * other conditions, the START TRANSFER command will fail with bus-expiry.
1499 *
1500 * In order to workaround this issue, we can test for the correct combination of
1501 * BIT[15:14] by sending START TRANSFER commands with different values of
1502 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
1503 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
1504 * As the result, within the 4 possible combinations for BIT[15:14], there will
1505 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
1506 * command status will result in a 2-second delay start. The smaller BIT[15:14]
1507 * value is the correct combination.
1508 *
1509 * Since there are only 4 outcomes and the results are ordered, we can simply
1510 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
1511 * deduce the smaller successful combination.
1512 *
1513 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
1514 * of BIT[15:14]. The correct combination is as follow:
1515 *
1516 * if test0 fails and test1 passes, BIT[15:14] is 'b01
1517 * if test0 fails and test1 fails, BIT[15:14] is 'b10
1518 * if test0 passes and test1 fails, BIT[15:14] is 'b11
1519 * if test0 passes and test1 passes, BIT[15:14] is 'b00
1520 *
1521 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
1522 * endpoints.
1523 */
1524static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
1525{
1526	int cmd_status = 0;
1527	bool test0;
1528	bool test1;
1529
1530	while (dep->combo_num < 2) {
1531		struct dwc3_gadget_ep_cmd_params params;
1532		u32 test_frame_number;
1533		u32 cmd;
1534
1535		/*
1536		 * Check if we can start isoc transfer on the next interval or
1537		 * 4 uframes in the future with BIT[15:14] as dep->combo_num
1538		 */
1539		test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK;
1540		test_frame_number |= dep->combo_num << 14;
1541		test_frame_number += max_t(u32, 4, dep->interval);
1542
1543		params.param0 = upper_32_bits(dep->dwc->bounce_addr);
1544		params.param1 = lower_32_bits(dep->dwc->bounce_addr);
1545
1546		cmd = DWC3_DEPCMD_STARTTRANSFER;
1547		cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
1548		cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1549
1550		/* Redo if some other failure beside bus-expiry is received */
1551		if (cmd_status && cmd_status != -EAGAIN) {
1552			dep->start_cmd_status = 0;
1553			dep->combo_num = 0;
1554			return 0;
1555		}
1556
1557		/* Store the first test status */
1558		if (dep->combo_num == 0)
1559			dep->start_cmd_status = cmd_status;
1560
1561		dep->combo_num++;
1562
1563		/*
1564		 * End the transfer if the START_TRANSFER command is successful
1565		 * to wait for the next XferNotReady to test the command again
1566		 */
1567		if (cmd_status == 0) {
1568			dwc3_stop_active_transfer(dep, true, true);
1569			return 0;
1570		}
1571	}
1572
1573	/* test0 and test1 are both completed at this point */
1574	test0 = (dep->start_cmd_status == 0);
1575	test1 = (cmd_status == 0);
1576
1577	if (!test0 && test1)
1578		dep->combo_num = 1;
1579	else if (!test0 && !test1)
1580		dep->combo_num = 2;
1581	else if (test0 && !test1)
1582		dep->combo_num = 3;
1583	else if (test0 && test1)
1584		dep->combo_num = 0;
1585
1586	dep->frame_number &= DWC3_FRNUMBER_MASK;
1587	dep->frame_number |= dep->combo_num << 14;
1588	dep->frame_number += max_t(u32, 4, dep->interval);
1589
1590	/* Reinitialize test variables */
1591	dep->start_cmd_status = 0;
1592	dep->combo_num = 0;
1593
1594	return __dwc3_gadget_kick_transfer(dep);
1595}
1596
1597static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
1598{
1599	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
1600	struct dwc3 *dwc = dep->dwc;
1601	int ret;
1602	int i;
1603
1604	if (list_empty(&dep->pending_list) &&
1605	    list_empty(&dep->started_list)) {
1606		dep->flags |= DWC3_EP_PENDING_REQUEST;
1607		return -EAGAIN;
1608	}
1609
1610	if (!dwc->dis_start_transfer_quirk &&
1611	    (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
1612	     DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
1613		if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
1614			return dwc3_gadget_start_isoc_quirk(dep);
1615	}
1616
1617	if (desc->bInterval <= 14 &&
1618	    dwc->gadget->speed >= USB_SPEED_HIGH) {
1619		u32 frame = __dwc3_gadget_get_frame(dwc);
1620		bool rollover = frame <
1621				(dep->frame_number & DWC3_FRNUMBER_MASK);
1622
1623		/*
1624		 * frame_number is set from XferNotReady and may be already
1625		 * out of date. DSTS only provides the lower 14 bit of the
1626		 * current frame number. So add the upper two bits of
1627		 * frame_number and handle a possible rollover.
1628		 * This will provide the correct frame_number unless more than
1629		 * rollover has happened since XferNotReady.
1630		 */
1631
1632		dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) |
1633				     frame;
1634		if (rollover)
1635			dep->frame_number += BIT(14);
1636	}
1637
1638	for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
1639		dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1);
1640
1641		ret = __dwc3_gadget_kick_transfer(dep);
1642		if (ret != -EAGAIN)
1643			break;
1644	}
1645
1646	/*
1647	 * After a number of unsuccessful start attempts due to bus-expiry
1648	 * status, issue END_TRANSFER command and retry on the next XferNotReady
1649	 * event.
1650	 */
1651	if (ret == -EAGAIN)
1652		ret = __dwc3_stop_active_transfer(dep, false, true);
1653
1654	return ret;
1655}
1656
1657static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1658{
1659	struct dwc3		*dwc = dep->dwc;
1660
1661	if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
1662		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
1663				dep->name);
1664		return -ESHUTDOWN;
1665	}
1666
1667	if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
1668				&req->request, req->dep->name))
1669		return -EINVAL;
1670
1671	if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
1672				"%s: request %pK already in flight\n",
1673				dep->name, &req->request))
1674		return -EINVAL;
1675
1676	pm_runtime_get(dwc->dev);
1677
1678	req->request.actual	= 0;
1679	req->request.status	= -EINPROGRESS;
1680
1681	trace_dwc3_ep_queue(req);
1682
1683	list_add_tail(&req->list, &dep->pending_list);
1684	req->status = DWC3_REQUEST_STATUS_QUEUED;
1685
1686	if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
1687		return 0;
1688
1689	/*
1690	 * Start the transfer only after the END_TRANSFER is completed
1691	 * and endpoint STALL is cleared.
1692	 */
1693	if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
1694	    (dep->flags & DWC3_EP_WEDGE) ||
1695	    (dep->flags & DWC3_EP_STALL)) {
1696		dep->flags |= DWC3_EP_DELAY_START;
1697		return 0;
1698	}
1699
1700	/*
1701	 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1702	 * wait for a XferNotReady event so we will know what's the current
1703	 * (micro-)frame number.
1704	 *
1705	 * Without this trick, we are very, very likely gonna get Bus Expiry
1706	 * errors which will force us issue EndTransfer command.
1707	 */
1708	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1709		if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
1710				!(dep->flags & DWC3_EP_TRANSFER_STARTED))
1711			return 0;
1712
1713		if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
1714			if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
1715				return __dwc3_gadget_start_isoc(dep);
1716		}
1717	}
1718
1719	__dwc3_gadget_kick_transfer(dep);
1720
1721	return 0;
1722}
1723
1724static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1725	gfp_t gfp_flags)
1726{
1727	struct dwc3_request		*req = to_dwc3_request(request);
1728	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1729	struct dwc3			*dwc = dep->dwc;
1730
1731	unsigned long			flags;
1732
1733	int				ret;
1734
1735	spin_lock_irqsave(&dwc->lock, flags);
1736	ret = __dwc3_gadget_ep_queue(dep, req);
1737	spin_unlock_irqrestore(&dwc->lock, flags);
1738
1739	return ret;
1740}
1741
1742static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
1743{
1744	int i;
1745
1746	/* If req->trb is not set, then the request has not started */
1747	if (!req->trb)
1748		return;
1749
1750	/*
1751	 * If request was already started, this means we had to
1752	 * stop the transfer. With that we also need to ignore
1753	 * all TRBs used by the request, however TRBs can only
1754	 * be modified after completion of END_TRANSFER
1755	 * command. So what we do here is that we wait for
1756	 * END_TRANSFER completion and only after that, we jump
1757	 * over TRBs by clearing HWO and incrementing dequeue
1758	 * pointer.
1759	 */
1760	for (i = 0; i < req->num_trbs; i++) {
1761		struct dwc3_trb *trb;
1762
1763		trb = &dep->trb_pool[dep->trb_dequeue];
1764		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1765		dwc3_ep_inc_deq(dep);
1766	}
1767
1768	req->num_trbs = 0;
1769}
1770
1771static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
1772{
1773	struct dwc3_request		*req;
1774	struct dwc3_request		*tmp;
1775
1776	list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
1777		dwc3_gadget_ep_skip_trbs(dep, req);
1778		dwc3_gadget_giveback(dep, req, -ECONNRESET);
1779	}
1780}
1781
1782static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1783		struct usb_request *request)
1784{
1785	struct dwc3_request		*req = to_dwc3_request(request);
1786	struct dwc3_request		*r = NULL;
1787
1788	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1789	struct dwc3			*dwc = dep->dwc;
1790
1791	unsigned long			flags;
1792	int				ret = 0;
1793
1794	trace_dwc3_ep_dequeue(req);
1795
1796	spin_lock_irqsave(&dwc->lock, flags);
1797
1798	list_for_each_entry(r, &dep->cancelled_list, list) {
1799		if (r == req)
1800			goto out;
1801	}
1802
1803	list_for_each_entry(r, &dep->pending_list, list) {
1804		if (r == req) {
1805			dwc3_gadget_giveback(dep, req, -ECONNRESET);
1806			goto out;
1807		}
1808	}
1809
1810	list_for_each_entry(r, &dep->started_list, list) {
1811		if (r == req) {
1812			struct dwc3_request *t;
1813
1814			/* wait until it is processed */
1815			dwc3_stop_active_transfer(dep, true, true);
1816
1817			/*
1818			 * Remove any started request if the transfer is
1819			 * cancelled.
1820			 */
1821			list_for_each_entry_safe(r, t, &dep->started_list, list)
1822				dwc3_gadget_move_cancelled_request(r);
1823
1824			dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
1825
1826			goto out;
1827		}
1828	}
1829
1830	dev_err(dwc->dev, "request %pK was not queued to %s\n",
1831		request, ep->name);
1832	ret = -EINVAL;
1833out:
1834	spin_unlock_irqrestore(&dwc->lock, flags);
1835
1836	return ret;
1837}
1838
1839int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1840{
1841	struct dwc3_gadget_ep_cmd_params	params;
1842	struct dwc3				*dwc = dep->dwc;
1843	struct dwc3_request			*req;
1844	struct dwc3_request			*tmp;
1845	int					ret;
1846
1847	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1848		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1849		return -EINVAL;
1850	}
1851
1852	memset(&params, 0x00, sizeof(params));
1853
1854	if (value) {
1855		struct dwc3_trb *trb;
1856
1857		unsigned int transfer_in_flight;
1858		unsigned int started;
1859
1860		if (dep->number > 1)
1861			trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1862		else
1863			trb = &dwc->ep0_trb[dep->trb_enqueue];
1864
1865		transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1866		started = !list_empty(&dep->started_list);
1867
1868		if (!protocol && ((dep->direction && transfer_in_flight) ||
1869				(!dep->direction && started))) {
1870			return -EAGAIN;
1871		}
1872
1873		ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1874				&params);
1875		if (ret)
1876			dev_err(dwc->dev, "failed to set STALL on %s\n",
1877					dep->name);
1878		else
1879			dep->flags |= DWC3_EP_STALL;
1880	} else {
1881		/*
1882		 * Don't issue CLEAR_STALL command to control endpoints. The
1883		 * controller automatically clears the STALL when it receives
1884		 * the SETUP token.
1885		 */
1886		if (dep->number <= 1) {
1887			dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1888			return 0;
1889		}
1890
1891		dwc3_stop_active_transfer(dep, true, true);
1892
1893		list_for_each_entry_safe(req, tmp, &dep->started_list, list)
1894			dwc3_gadget_move_cancelled_request(req);
1895
1896		if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
1897			dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
1898			return 0;
1899		}
1900
1901		dwc3_gadget_ep_cleanup_cancelled_requests(dep);
1902
1903		ret = dwc3_send_clear_stall_ep_cmd(dep);
1904		if (ret) {
1905			dev_err(dwc->dev, "failed to clear STALL on %s\n",
1906					dep->name);
1907			return ret;
1908		}
1909
1910		dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1911
1912		if ((dep->flags & DWC3_EP_DELAY_START) &&
1913		    !usb_endpoint_xfer_isoc(dep->endpoint.desc))
1914			__dwc3_gadget_kick_transfer(dep);
1915
1916		dep->flags &= ~DWC3_EP_DELAY_START;
1917	}
1918
1919	return ret;
1920}
1921
1922static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1923{
1924	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1925	struct dwc3			*dwc = dep->dwc;
1926
1927	unsigned long			flags;
1928
1929	int				ret;
1930
1931	spin_lock_irqsave(&dwc->lock, flags);
1932	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1933	spin_unlock_irqrestore(&dwc->lock, flags);
1934
1935	return ret;
1936}
1937
1938static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1939{
1940	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1941	struct dwc3			*dwc = dep->dwc;
1942	unsigned long			flags;
1943	int				ret;
1944
1945	spin_lock_irqsave(&dwc->lock, flags);
1946	dep->flags |= DWC3_EP_WEDGE;
1947
1948	if (dep->number == 0 || dep->number == 1)
1949		ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1950	else
1951		ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1952	spin_unlock_irqrestore(&dwc->lock, flags);
1953
1954	return ret;
1955}
1956
1957/* -------------------------------------------------------------------------- */
1958
1959static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1960	.bLength	= USB_DT_ENDPOINT_SIZE,
1961	.bDescriptorType = USB_DT_ENDPOINT,
1962	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1963};
1964
1965static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1966	.enable		= dwc3_gadget_ep0_enable,
1967	.disable	= dwc3_gadget_ep0_disable,
1968	.alloc_request	= dwc3_gadget_ep_alloc_request,
1969	.free_request	= dwc3_gadget_ep_free_request,
1970	.queue		= dwc3_gadget_ep0_queue,
1971	.dequeue	= dwc3_gadget_ep_dequeue,
1972	.set_halt	= dwc3_gadget_ep0_set_halt,
1973	.set_wedge	= dwc3_gadget_ep_set_wedge,
1974};
1975
1976static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1977	.enable		= dwc3_gadget_ep_enable,
1978	.disable	= dwc3_gadget_ep_disable,
1979	.alloc_request	= dwc3_gadget_ep_alloc_request,
1980	.free_request	= dwc3_gadget_ep_free_request,
1981	.queue		= dwc3_gadget_ep_queue,
1982	.dequeue	= dwc3_gadget_ep_dequeue,
1983	.set_halt	= dwc3_gadget_ep_set_halt,
1984	.set_wedge	= dwc3_gadget_ep_set_wedge,
1985};
1986
1987/* -------------------------------------------------------------------------- */
1988
1989static int dwc3_gadget_get_frame(struct usb_gadget *g)
1990{
1991	struct dwc3		*dwc = gadget_to_dwc(g);
1992
1993	return __dwc3_gadget_get_frame(dwc);
1994}
1995
1996static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1997{
1998	int			retries;
1999
2000	int			ret;
2001	u32			reg;
2002
2003	u8			link_state;
2004
2005	/*
2006	 * According to the Databook Remote wakeup request should
2007	 * be issued only when the device is in early suspend state.
2008	 *
2009	 * We can check that via USB Link State bits in DSTS register.
2010	 */
2011	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2012
2013	link_state = DWC3_DSTS_USBLNKST(reg);
2014
2015	switch (link_state) {
2016	case DWC3_LINK_STATE_RESET:
2017	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
2018	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
2019	case DWC3_LINK_STATE_U2:	/* in HS, means Sleep (L1) */
2020	case DWC3_LINK_STATE_U1:
2021	case DWC3_LINK_STATE_RESUME:
2022		break;
2023	default:
2024		return -EINVAL;
2025	}
2026
2027	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
2028	if (ret < 0) {
2029		dev_err(dwc->dev, "failed to put link in Recovery\n");
2030		return ret;
2031	}
2032
2033	/* Recent versions do this automatically */
2034	if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
2035		/* write zeroes to Link Change Request */
2036		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2037		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
2038		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2039	}
2040
2041	/* poll until Link State changes to ON */
2042	retries = 20000;
2043
2044	while (retries--) {
2045		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2046
2047		/* in HS, means ON */
2048		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
2049			break;
2050	}
2051
2052	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
2053		dev_err(dwc->dev, "failed to send remote wakeup\n");
2054		return -EINVAL;
2055	}
2056
2057	return 0;
2058}
2059
2060static int dwc3_gadget_wakeup(struct usb_gadget *g)
2061{
2062	struct dwc3		*dwc = gadget_to_dwc(g);
2063	unsigned long		flags;
2064	int			ret;
2065
2066	spin_lock_irqsave(&dwc->lock, flags);
2067	ret = __dwc3_gadget_wakeup(dwc);
2068	spin_unlock_irqrestore(&dwc->lock, flags);
2069
2070	return ret;
2071}
2072
2073static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
2074		int is_selfpowered)
2075{
2076	struct dwc3		*dwc = gadget_to_dwc(g);
2077	unsigned long		flags;
2078
2079	spin_lock_irqsave(&dwc->lock, flags);
2080	g->is_selfpowered = !!is_selfpowered;
2081	spin_unlock_irqrestore(&dwc->lock, flags);
2082
2083	return 0;
2084}
2085
2086static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2087{
2088	u32 epnum;
2089
2090	for (epnum = 2; epnum < dwc->num_eps; epnum++) {
2091		struct dwc3_ep *dep;
2092
2093		dep = dwc->eps[epnum];
2094		if (!dep)
2095			continue;
2096
2097		dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
2098	}
2099}
2100
2101static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
2102{
2103	u32			reg;
2104	u32			timeout = 500;
2105
2106	if (pm_runtime_suspended(dwc->dev))
2107		return 0;
2108
2109	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2110	if (is_on) {
2111		if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
2112			reg &= ~DWC3_DCTL_TRGTULST_MASK;
2113			reg |= DWC3_DCTL_TRGTULST_RX_DET;
2114		}
2115
2116		if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
2117			reg &= ~DWC3_DCTL_KEEP_CONNECT;
2118		reg |= DWC3_DCTL_RUN_STOP;
2119
2120		if (dwc->has_hibernation)
2121			reg |= DWC3_DCTL_KEEP_CONNECT;
2122
2123		dwc->pullups_connected = true;
2124	} else {
2125		reg &= ~DWC3_DCTL_RUN_STOP;
2126
2127		if (dwc->has_hibernation && !suspend)
2128			reg &= ~DWC3_DCTL_KEEP_CONNECT;
2129
2130		dwc->pullups_connected = false;
2131	}
2132
2133	dwc3_gadget_dctl_write_safe(dwc, reg);
2134
2135	do {
2136		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2137		reg &= DWC3_DSTS_DEVCTRLHLT;
2138	} while (--timeout && !(!is_on ^ !reg));
2139
2140	if (!timeout)
2141		return -ETIMEDOUT;
2142
2143	return 0;
2144}
2145
2146static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
2147static void __dwc3_gadget_stop(struct dwc3 *dwc);
2148static int __dwc3_gadget_start(struct dwc3 *dwc);
2149
2150static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
2151{
2152	unsigned long flags;
2153
2154	spin_lock_irqsave(&dwc->lock, flags);
2155	dwc->connected = false;
2156
2157	/*
2158	 * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
2159	 * Section 4.1.8 Table 4-7, it states that for a device-initiated
2160	 * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
2161	 * command for any active transfers" before clearing the RunStop
2162	 * bit.
2163	 */
2164	dwc3_stop_active_transfers(dwc);
2165	__dwc3_gadget_stop(dwc);
2166	spin_unlock_irqrestore(&dwc->lock, flags);
2167
2168	/*
2169	 * Note: if the GEVNTCOUNT indicates events in the event buffer, the
2170	 * driver needs to acknowledge them before the controller can halt.
2171	 * Simply let the interrupt handler acknowledges and handle the
2172	 * remaining event generated by the controller while polling for
2173	 * DSTS.DEVCTLHLT.
2174	 */
2175	return dwc3_gadget_run_stop(dwc, false, false);
2176}
2177
2178static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
2179{
2180	struct dwc3		*dwc = gadget_to_dwc(g);
2181	int			ret;
2182
2183	is_on = !!is_on;
2184
2185	dwc->softconnect = is_on;
2186	/*
2187	 * Per databook, when we want to stop the gadget, if a control transfer
2188	 * is still in process, complete it and get the core into setup phase.
2189	 */
2190	if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
2191		reinit_completion(&dwc->ep0_in_setup);
2192
2193		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
2194				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
2195		if (ret == 0)
2196			dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
2197	}
2198
2199	/*
2200	 * Avoid issuing a runtime resume if the device is already in the
2201	 * suspended state during gadget disconnect.  DWC3 gadget was already
2202	 * halted/stopped during runtime suspend.
2203	 */
2204	if (!is_on) {
2205		pm_runtime_barrier(dwc->dev);
2206		if (pm_runtime_suspended(dwc->dev))
2207			return 0;
2208	}
2209
2210	/*
2211	 * Check the return value for successful resume, or error.  For a
2212	 * successful resume, the DWC3 runtime PM resume routine will handle
2213	 * the run stop sequence, so avoid duplicate operations here.
2214	 */
2215	ret = pm_runtime_get_sync(dwc->dev);
2216	if (!ret || ret < 0) {
2217		pm_runtime_put(dwc->dev);
2218		if (ret < 0)
2219			pm_runtime_set_suspended(dwc->dev);
2220		return ret;
2221	}
2222
2223	if (dwc->pullups_connected == is_on) {
2224		pm_runtime_put(dwc->dev);
2225		return 0;
2226	}
2227
2228	if (!is_on) {
2229		ret = dwc3_gadget_soft_disconnect(dwc);
2230	} else {
2231		/*
2232		 * In the Synopsys DWC_usb31 1.90a programming guide section
2233		 * 4.1.9, it specifies that for a reconnect after a
2234		 * device-initiated disconnect requires a core soft reset
2235		 * (DCTL.CSftRst) before enabling the run/stop bit.
2236		 */
2237		dwc3_core_soft_reset(dwc);
2238
2239		dwc3_event_buffers_setup(dwc);
2240		__dwc3_gadget_start(dwc);
2241		ret = dwc3_gadget_run_stop(dwc, true, false);
2242	}
2243
2244	pm_runtime_put(dwc->dev);
2245
2246	return ret;
2247}
2248
2249static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
2250{
2251	u32			reg;
2252
2253	/* Enable all but Start and End of Frame IRQs */
2254	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2255			DWC3_DEVTEN_EVNTOVERFLOWEN |
2256			DWC3_DEVTEN_CMDCMPLTEN |
2257			DWC3_DEVTEN_ERRTICERREN |
2258			DWC3_DEVTEN_WKUPEVTEN |
2259			DWC3_DEVTEN_CONNECTDONEEN |
2260			DWC3_DEVTEN_USBRSTEN |
2261			DWC3_DEVTEN_DISCONNEVTEN);
2262
2263	if (DWC3_VER_IS_PRIOR(DWC3, 250A))
2264		reg |= DWC3_DEVTEN_ULSTCNGEN;
2265
2266	/* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
2267	if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
2268		reg |= DWC3_DEVTEN_EOPFEN;
2269
2270	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2271}
2272
2273static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
2274{
2275	/* mask all interrupts */
2276	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2277}
2278
2279static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
2280static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
2281
2282/**
2283 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
2284 * @dwc: pointer to our context structure
2285 *
2286 * The following looks like complex but it's actually very simple. In order to
2287 * calculate the number of packets we can burst at once on OUT transfers, we're
2288 * gonna use RxFIFO size.
2289 *
2290 * To calculate RxFIFO size we need two numbers:
2291 * MDWIDTH = size, in bits, of the internal memory bus
2292 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
2293 *
2294 * Given these two numbers, the formula is simple:
2295 *
2296 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
2297 *
2298 * 24 bytes is for 3x SETUP packets
2299 * 16 bytes is a clock domain crossing tolerance
2300 *
2301 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
2302 */
2303static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
2304{
2305	u32 ram2_depth;
2306	u32 mdwidth;
2307	u32 nump;
2308	u32 reg;
2309
2310	ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
2311	mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
2312	if (DWC3_IP_IS(DWC32))
2313		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
2314
2315	nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
2316	nump = min_t(u32, nump, 16);
2317
2318	/* update NumP */
2319	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2320	reg &= ~DWC3_DCFG_NUMP_MASK;
2321	reg |= nump << DWC3_DCFG_NUMP_SHIFT;
2322	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2323}
2324
2325static int __dwc3_gadget_start(struct dwc3 *dwc)
2326{
2327	struct dwc3_ep		*dep;
2328	int			ret = 0;
2329	u32			reg;
2330
2331	/*
2332	 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
2333	 * the core supports IMOD, disable it.
2334	 */
2335	if (dwc->imod_interval) {
2336		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
2337		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
2338	} else if (dwc3_has_imod(dwc)) {
2339		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
2340	}
2341
2342	/*
2343	 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
2344	 * field instead of letting dwc3 itself calculate that automatically.
2345	 *
2346	 * This way, we maximize the chances that we'll be able to get several
2347	 * bursts of data without going through any sort of endpoint throttling.
2348	 */
2349	reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
2350	if (DWC3_IP_IS(DWC3))
2351		reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
2352	else
2353		reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
2354
2355	dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
2356
2357	dwc3_gadget_setup_nump(dwc);
2358
2359	/* Start with SuperSpeed Default */
2360	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2361
2362	dep = dwc->eps[0];
2363	ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
2364	if (ret) {
2365		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2366		goto err0;
2367	}
2368
2369	dep = dwc->eps[1];
2370	ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
2371	if (ret) {
2372		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2373		goto err1;
2374	}
2375
2376	/* begin to receive SETUP packets */
2377	dwc->ep0state = EP0_SETUP_PHASE;
2378	dwc->link_state = DWC3_LINK_STATE_SS_DIS;
2379	dwc->delayed_status = false;
2380	dwc3_ep0_out_start(dwc);
2381
2382	dwc3_gadget_enable_irq(dwc);
2383
2384	return 0;
2385
2386err1:
2387	__dwc3_gadget_ep_disable(dwc->eps[0]);
2388
2389err0:
2390	return ret;
2391}
2392
2393static int dwc3_gadget_start(struct usb_gadget *g,
2394		struct usb_gadget_driver *driver)
2395{
2396	struct dwc3		*dwc = gadget_to_dwc(g);
2397	unsigned long		flags;
2398	int			ret = 0;
2399	int			irq;
2400
2401	irq = dwc->irq_gadget;
2402	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
2403			IRQF_SHARED, "dwc3", dwc->ev_buf);
2404	if (ret) {
2405		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2406				irq, ret);
2407		goto err0;
2408	}
2409
2410	spin_lock_irqsave(&dwc->lock, flags);
2411	if (dwc->gadget_driver) {
2412		dev_err(dwc->dev, "%s is already bound to %s\n",
2413				dwc->gadget->name,
2414				dwc->gadget_driver->driver.name);
2415		ret = -EBUSY;
2416		goto err1;
2417	}
2418
2419	dwc->gadget_driver	= driver;
2420	spin_unlock_irqrestore(&dwc->lock, flags);
2421
2422	return 0;
2423
2424err1:
2425	spin_unlock_irqrestore(&dwc->lock, flags);
2426	free_irq(irq, dwc);
2427
2428err0:
2429	return ret;
2430}
2431
2432static void __dwc3_gadget_stop(struct dwc3 *dwc)
2433{
2434	dwc3_gadget_disable_irq(dwc);
2435	__dwc3_gadget_ep_disable(dwc->eps[0]);
2436	__dwc3_gadget_ep_disable(dwc->eps[1]);
2437}
2438
2439static int dwc3_gadget_stop(struct usb_gadget *g)
2440{
2441	struct dwc3		*dwc = gadget_to_dwc(g);
2442	unsigned long		flags;
2443
2444	spin_lock_irqsave(&dwc->lock, flags);
2445	dwc->gadget_driver	= NULL;
2446	spin_unlock_irqrestore(&dwc->lock, flags);
2447
2448	free_irq(dwc->irq_gadget, dwc->ev_buf);
2449
2450	return 0;
2451}
2452
2453static void dwc3_gadget_config_params(struct usb_gadget *g,
2454				      struct usb_dcd_config_params *params)
2455{
2456	struct dwc3		*dwc = gadget_to_dwc(g);
2457
2458	params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
2459	params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
2460
2461	/* Recommended BESL */
2462	if (!dwc->dis_enblslpm_quirk) {
2463		/*
2464		 * If the recommended BESL baseline is 0 or if the BESL deep is
2465		 * less than 2, Microsoft's Windows 10 host usb stack will issue
2466		 * a usb reset immediately after it receives the extended BOS
2467		 * descriptor and the enumeration will fail. To maintain
2468		 * compatibility with the Windows' usb stack, let's set the
2469		 * recommended BESL baseline to 1 and clamp the BESL deep to be
2470		 * within 2 to 15.
2471		 */
2472		params->besl_baseline = 1;
2473		if (dwc->is_utmi_l1_suspend)
2474			params->besl_deep =
2475				clamp_t(u8, dwc->hird_threshold, 2, 15);
2476	}
2477
2478	/* U1 Device exit Latency */
2479	if (dwc->dis_u1_entry_quirk)
2480		params->bU1devExitLat = 0;
2481	else
2482		params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
2483
2484	/* U2 Device exit Latency */
2485	if (dwc->dis_u2_entry_quirk)
2486		params->bU2DevExitLat = 0;
2487	else
2488		params->bU2DevExitLat =
2489				cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
2490}
2491
2492static void dwc3_gadget_set_speed(struct usb_gadget *g,
2493				  enum usb_device_speed speed)
2494{
2495	struct dwc3		*dwc = gadget_to_dwc(g);
2496	unsigned long		flags;
2497	u32			reg;
2498
2499	spin_lock_irqsave(&dwc->lock, flags);
2500	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2501	reg &= ~(DWC3_DCFG_SPEED_MASK);
2502
2503	/*
2504	 * WORKAROUND: DWC3 revision < 2.20a have an issue
2505	 * which would cause metastability state on Run/Stop
2506	 * bit if we try to force the IP to USB2-only mode.
2507	 *
2508	 * Because of that, we cannot configure the IP to any
2509	 * speed other than the SuperSpeed
2510	 *
2511	 * Refers to:
2512	 *
2513	 * STAR#9000525659: Clock Domain Crossing on DCTL in
2514	 * USB 2.0 Mode
2515	 */
2516	if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
2517	    !dwc->dis_metastability_quirk) {
2518		reg |= DWC3_DCFG_SUPERSPEED;
2519	} else {
2520		switch (speed) {
2521		case USB_SPEED_LOW:
2522			reg |= DWC3_DCFG_LOWSPEED;
2523			break;
2524		case USB_SPEED_FULL:
2525			reg |= DWC3_DCFG_FULLSPEED;
2526			break;
2527		case USB_SPEED_HIGH:
2528			reg |= DWC3_DCFG_HIGHSPEED;
2529			break;
2530		case USB_SPEED_SUPER:
2531			reg |= DWC3_DCFG_SUPERSPEED;
2532			break;
2533		case USB_SPEED_SUPER_PLUS:
2534			if (DWC3_IP_IS(DWC3))
2535				reg |= DWC3_DCFG_SUPERSPEED;
2536			else
2537				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2538			break;
2539		default:
2540			dev_err(dwc->dev, "invalid speed (%d)\n", speed);
2541
2542			if (DWC3_IP_IS(DWC3))
2543				reg |= DWC3_DCFG_SUPERSPEED;
2544			else
2545				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2546		}
2547	}
2548	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2549
2550	spin_unlock_irqrestore(&dwc->lock, flags);
2551}
2552
2553static const struct usb_gadget_ops dwc3_gadget_ops = {
2554	.get_frame		= dwc3_gadget_get_frame,
2555	.wakeup			= dwc3_gadget_wakeup,
2556	.set_selfpowered	= dwc3_gadget_set_selfpowered,
2557	.pullup			= dwc3_gadget_pullup,
2558	.udc_start		= dwc3_gadget_start,
2559	.udc_stop		= dwc3_gadget_stop,
2560	.udc_set_speed		= dwc3_gadget_set_speed,
2561	.get_config_params	= dwc3_gadget_config_params,
2562};
2563
2564/* -------------------------------------------------------------------------- */
2565
2566static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
2567{
2568	struct dwc3 *dwc = dep->dwc;
2569
2570	usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
2571	dep->endpoint.maxburst = 1;
2572	dep->endpoint.ops = &dwc3_gadget_ep0_ops;
2573	if (!dep->direction)
2574		dwc->gadget->ep0 = &dep->endpoint;
2575
2576	dep->endpoint.caps.type_control = true;
2577
2578	return 0;
2579}
2580
2581static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
2582{
2583	struct dwc3 *dwc = dep->dwc;
2584	int mdwidth;
2585	int size;
2586
2587	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
2588	if (DWC3_IP_IS(DWC32))
2589		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
2590
2591	/* MDWIDTH is represented in bits, we need it in bytes */
2592	mdwidth /= 8;
2593
2594	size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
2595	if (DWC3_IP_IS(DWC3))
2596		size = DWC3_GTXFIFOSIZ_TXFDEP(size);
2597	else
2598		size = DWC31_GTXFIFOSIZ_TXFDEP(size);
2599
2600	/* FIFO Depth is in MDWDITH bytes. Multiply */
2601	size *= mdwidth;
2602
2603	/*
2604	 * To meet performance requirement, a minimum TxFIFO size of 3x
2605	 * MaxPacketSize is recommended for endpoints that support burst and a
2606	 * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
2607	 * support burst. Use those numbers and we can calculate the max packet
2608	 * limit as below.
2609	 */
2610	if (dwc->maximum_speed >= USB_SPEED_SUPER)
2611		size /= 3;
2612	else
2613		size /= 2;
2614
2615	usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2616
2617	dep->endpoint.max_streams = 16;
2618	dep->endpoint.ops = &dwc3_gadget_ep_ops;
2619	list_add_tail(&dep->endpoint.ep_list,
2620			&dwc->gadget->ep_list);
2621	dep->endpoint.caps.type_iso = true;
2622	dep->endpoint.caps.type_bulk = true;
2623	dep->endpoint.caps.type_int = true;
2624
2625	return dwc3_alloc_trb_pool(dep);
2626}
2627
2628static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
2629{
2630	struct dwc3 *dwc = dep->dwc;
2631	int mdwidth;
2632	int size;
2633
2634	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
2635	if (DWC3_IP_IS(DWC32))
2636		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
2637
2638	/* MDWIDTH is represented in bits, convert to bytes */
2639	mdwidth /= 8;
2640
2641	/* All OUT endpoints share a single RxFIFO space */
2642	size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
2643	if (DWC3_IP_IS(DWC3))
2644		size = DWC3_GRXFIFOSIZ_RXFDEP(size);
2645	else
2646		size = DWC31_GRXFIFOSIZ_RXFDEP(size);
2647
2648	/* FIFO depth is in MDWDITH bytes */
2649	size *= mdwidth;
2650
2651	/*
2652	 * To meet performance requirement, a minimum recommended RxFIFO size
2653	 * is defined as follow:
2654	 * RxFIFO size >= (3 x MaxPacketSize) +
2655	 * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
2656	 *
2657	 * Then calculate the max packet limit as below.
2658	 */
2659	size -= (3 * 8) + 16;
2660	if (size < 0)
2661		size = 0;
2662	else
2663		size /= 3;
2664
2665	usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2666	dep->endpoint.max_streams = 16;
2667	dep->endpoint.ops = &dwc3_gadget_ep_ops;
2668	list_add_tail(&dep->endpoint.ep_list,
2669			&dwc->gadget->ep_list);
2670	dep->endpoint.caps.type_iso = true;
2671	dep->endpoint.caps.type_bulk = true;
2672	dep->endpoint.caps.type_int = true;
2673
2674	return dwc3_alloc_trb_pool(dep);
2675}
2676
2677static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
2678{
2679	struct dwc3_ep			*dep;
2680	bool				direction = epnum & 1;
2681	int				ret;
2682	u8				num = epnum >> 1;
2683
2684	dep = kzalloc(sizeof(*dep), GFP_KERNEL);
2685	if (!dep)
2686		return -ENOMEM;
2687
2688	dep->dwc = dwc;
2689	dep->number = epnum;
2690	dep->direction = direction;
2691	dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
2692	dwc->eps[epnum] = dep;
2693	dep->combo_num = 0;
2694	dep->start_cmd_status = 0;
2695
2696	snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
2697			direction ? "in" : "out");
2698
2699	dep->endpoint.name = dep->name;
2700
2701	if (!(dep->number > 1)) {
2702		dep->endpoint.desc = &dwc3_gadget_ep0_desc;
2703		dep->endpoint.comp_desc = NULL;
2704	}
2705
2706	if (num == 0)
2707		ret = dwc3_gadget_init_control_endpoint(dep);
2708	else if (direction)
2709		ret = dwc3_gadget_init_in_endpoint(dep);
2710	else
2711		ret = dwc3_gadget_init_out_endpoint(dep);
2712
2713	if (ret)
2714		return ret;
2715
2716	dep->endpoint.caps.dir_in = direction;
2717	dep->endpoint.caps.dir_out = !direction;
2718
2719	INIT_LIST_HEAD(&dep->pending_list);
2720	INIT_LIST_HEAD(&dep->started_list);
2721	INIT_LIST_HEAD(&dep->cancelled_list);
2722
2723	dwc3_debugfs_create_endpoint_dir(dep);
2724
2725	return 0;
2726}
2727
2728static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
2729{
2730	u8				epnum;
2731
2732	INIT_LIST_HEAD(&dwc->gadget->ep_list);
2733
2734	for (epnum = 0; epnum < total; epnum++) {
2735		int			ret;
2736
2737		ret = dwc3_gadget_init_endpoint(dwc, epnum);
2738		if (ret)
2739			return ret;
2740	}
2741
2742	return 0;
2743}
2744
2745static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
2746{
2747	struct dwc3_ep			*dep;
2748	u8				epnum;
2749
2750	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2751		dep = dwc->eps[epnum];
2752		if (!dep)
2753			continue;
2754		/*
2755		 * Physical endpoints 0 and 1 are special; they form the
2756		 * bi-directional USB endpoint 0.
2757		 *
2758		 * For those two physical endpoints, we don't allocate a TRB
2759		 * pool nor do we add them the endpoints list. Due to that, we
2760		 * shouldn't do these two operations otherwise we would end up
2761		 * with all sorts of bugs when removing dwc3.ko.
2762		 */
2763		if (epnum != 0 && epnum != 1) {
2764			dwc3_free_trb_pool(dep);
2765			list_del(&dep->endpoint.ep_list);
2766		}
2767
2768		debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
2769		kfree(dep);
2770	}
2771}
2772
2773/* -------------------------------------------------------------------------- */
2774
2775static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
2776		struct dwc3_request *req, struct dwc3_trb *trb,
2777		const struct dwc3_event_depevt *event, int status, int chain)
2778{
2779	unsigned int		count;
2780
2781	dwc3_ep_inc_deq(dep);
2782
2783	trace_dwc3_complete_trb(dep, trb);
2784	req->num_trbs--;
2785
2786	/*
2787	 * If we're in the middle of series of chained TRBs and we
2788	 * receive a short transfer along the way, DWC3 will skip
2789	 * through all TRBs including the last TRB in the chain (the
2790	 * where CHN bit is zero. DWC3 will also avoid clearing HWO
2791	 * bit and SW has to do it manually.
2792	 *
2793	 * We're going to do that here to avoid problems of HW trying
2794	 * to use bogus TRBs for transfers.
2795	 */
2796	if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
2797		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2798
2799	/*
2800	 * For isochronous transfers, the first TRB in a service interval must
2801	 * have the Isoc-First type. Track and report its interval frame number.
2802	 */
2803	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2804	    (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
2805		unsigned int frame_number;
2806
2807		frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
2808		frame_number &= ~(dep->interval - 1);
2809		req->request.frame_number = frame_number;
2810	}
2811
2812	/*
2813	 * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
2814	 * this TRB points to the bounce buffer address, it's a MPS alignment
2815	 * TRB. Don't add it to req->remaining calculation.
2816	 */
2817	if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
2818	    trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
2819		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2820		return 1;
2821	}
2822
2823	count = trb->size & DWC3_TRB_SIZE_MASK;
2824	req->remaining += count;
2825
2826	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
2827		return 1;
2828
2829	if (event->status & DEPEVT_STATUS_SHORT && !chain)
2830		return 1;
2831
2832	if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
2833	    DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
2834		return 1;
2835
2836	if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
2837	    (trb->ctrl & DWC3_TRB_CTRL_LST))
2838		return 1;
2839
2840	return 0;
2841}
2842
2843static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
2844		struct dwc3_request *req, const struct dwc3_event_depevt *event,
2845		int status)
2846{
2847	struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
2848	struct scatterlist *sg = req->sg;
2849	struct scatterlist *s;
2850	unsigned int num_queued = req->num_queued_sgs;
2851	unsigned int i;
2852	int ret = 0;
2853
2854	for_each_sg(sg, s, num_queued, i) {
2855		trb = &dep->trb_pool[dep->trb_dequeue];
2856
2857		req->sg = sg_next(s);
2858		req->num_queued_sgs--;
2859
2860		ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
2861				trb, event, status, true);
2862		if (ret)
2863			break;
2864	}
2865
2866	return ret;
2867}
2868
2869static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
2870		struct dwc3_request *req, const struct dwc3_event_depevt *event,
2871		int status)
2872{
2873	struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
2874
2875	return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
2876			event, status, false);
2877}
2878
2879static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
2880{
2881	return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
2882}
2883
2884static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
2885		const struct dwc3_event_depevt *event,
2886		struct dwc3_request *req, int status)
2887{
2888	int request_status;
2889	int ret;
2890
2891	if (req->request.num_mapped_sgs)
2892		ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
2893				status);
2894	else
2895		ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
2896				status);
2897
2898	req->request.actual = req->request.length - req->remaining;
2899
2900	if (!dwc3_gadget_ep_request_completed(req))
2901		goto out;
2902
2903	if (req->needs_extra_trb) {
2904		ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
2905				status);
2906		req->needs_extra_trb = false;
2907	}
2908
2909	/*
2910	 * The event status only reflects the status of the TRB with IOC set.
2911	 * For the requests that don't set interrupt on completion, the driver
2912	 * needs to check and return the status of the completed TRBs associated
2913	 * with the request. Use the status of the last TRB of the request.
2914	 */
2915	if (req->request.no_interrupt) {
2916		struct dwc3_trb *trb;
2917
2918		trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
2919		switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
2920		case DWC3_TRBSTS_MISSED_ISOC:
2921			/* Isoc endpoint only */
2922			request_status = -EXDEV;
2923			break;
2924		case DWC3_TRB_STS_XFER_IN_PROG:
2925			/* Applicable when End Transfer with ForceRM=0 */
2926		case DWC3_TRBSTS_SETUP_PENDING:
2927			/* Control endpoint only */
2928		case DWC3_TRBSTS_OK:
2929		default:
2930			request_status = 0;
2931			break;
2932		}
2933	} else {
2934		request_status = status;
2935	}
2936
2937	dwc3_gadget_giveback(dep, req, request_status);
2938
2939out:
2940	return ret;
2941}
2942
2943static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
2944		const struct dwc3_event_depevt *event, int status)
2945{
2946	struct dwc3_request	*req;
2947	struct dwc3_request	*tmp;
2948
2949	list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
2950		int ret;
2951
2952		ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
2953				req, status);
2954		if (ret)
2955			break;
2956	}
2957}
2958
2959static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
2960{
2961	struct dwc3_request	*req;
2962
2963	if (!list_empty(&dep->pending_list))
2964		return true;
2965
2966	/*
2967	 * We only need to check the first entry of the started list. We can
2968	 * assume the completed requests are removed from the started list.
2969	 */
2970	req = next_request(&dep->started_list);
2971	if (!req)
2972		return false;
2973
2974	return !dwc3_gadget_ep_request_completed(req);
2975}
2976
2977static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
2978		const struct dwc3_event_depevt *event)
2979{
2980	dep->frame_number = event->parameters;
2981}
2982
2983static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
2984		const struct dwc3_event_depevt *event, int status)
2985{
2986	struct dwc3		*dwc = dep->dwc;
2987	bool			no_started_trb = true;
2988
2989	dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
2990
2991	if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
2992		goto out;
2993
2994	if (!dep->endpoint.desc)
2995		return no_started_trb;
2996
2997	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2998		list_empty(&dep->started_list) &&
2999		(list_empty(&dep->pending_list) || status == -EXDEV))
3000		dwc3_stop_active_transfer(dep, true, true);
3001	else if (dwc3_gadget_ep_should_continue(dep))
3002		if (__dwc3_gadget_kick_transfer(dep) == 0)
3003			no_started_trb = false;
3004
3005out:
3006	/*
3007	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
3008	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
3009	 */
3010	if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
3011		u32		reg;
3012		int		i;
3013
3014		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
3015			dep = dwc->eps[i];
3016
3017			if (!(dep->flags & DWC3_EP_ENABLED))
3018				continue;
3019
3020			if (!list_empty(&dep->started_list))
3021				return no_started_trb;
3022		}
3023
3024		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3025		reg |= dwc->u1u2;
3026		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3027
3028		dwc->u1u2 = 0;
3029	}
3030
3031	return no_started_trb;
3032}
3033
3034static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
3035		const struct dwc3_event_depevt *event)
3036{
3037	int status = 0;
3038
3039	if (!dep->endpoint.desc)
3040		return;
3041
3042	if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
3043		dwc3_gadget_endpoint_frame_from_event(dep, event);
3044
3045	if (event->status & DEPEVT_STATUS_BUSERR)
3046		status = -ECONNRESET;
3047
3048	if (event->status & DEPEVT_STATUS_MISSED_ISOC)
3049		status = -EXDEV;
3050
3051	dwc3_gadget_endpoint_trbs_complete(dep, event, status);
3052}
3053
3054static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
3055		const struct dwc3_event_depevt *event)
3056{
3057	int status = 0;
3058
3059	dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3060
3061	if (event->status & DEPEVT_STATUS_BUSERR)
3062		status = -ECONNRESET;
3063
3064	if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
3065		dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
3066}
3067
3068static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
3069		const struct dwc3_event_depevt *event)
3070{
3071	dwc3_gadget_endpoint_frame_from_event(dep, event);
3072
3073	/*
3074	 * The XferNotReady event is generated only once before the endpoint
3075	 * starts. It will be generated again when END_TRANSFER command is
3076	 * issued. For some controller versions, the XferNotReady event may be
3077	 * generated while the END_TRANSFER command is still in process. Ignore
3078	 * it and wait for the next XferNotReady event after the command is
3079	 * completed.
3080	 */
3081	if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
3082		return;
3083
3084	(void) __dwc3_gadget_start_isoc(dep);
3085}
3086
3087static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
3088		const struct dwc3_event_depevt *event)
3089{
3090	u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
3091
3092	if (cmd != DWC3_DEPCMD_ENDTRANSFER)
3093		return;
3094
3095	/*
3096	 * The END_TRANSFER command will cause the controller to generate a
3097	 * NoStream Event, and it's not due to the host DP NoStream rejection.
3098	 * Ignore the next NoStream event.
3099	 */
3100	if (dep->stream_capable)
3101		dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
3102
3103	dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
3104	dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3105	dwc3_gadget_ep_cleanup_cancelled_requests(dep);
3106
3107	if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
3108		struct dwc3 *dwc = dep->dwc;
3109
3110		dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
3111		if (dwc3_send_clear_stall_ep_cmd(dep)) {
3112			struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
3113
3114			dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
3115			if (dwc->delayed_status)
3116				__dwc3_gadget_ep0_set_halt(ep0, 1);
3117			return;
3118		}
3119
3120		dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
3121		if (dwc->delayed_status)
3122			dwc3_ep0_send_delayed_status(dwc);
3123	}
3124
3125	if ((dep->flags & DWC3_EP_DELAY_START) &&
3126	    !usb_endpoint_xfer_isoc(dep->endpoint.desc))
3127		__dwc3_gadget_kick_transfer(dep);
3128
3129	dep->flags &= ~DWC3_EP_DELAY_START;
3130}
3131
3132static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
3133		const struct dwc3_event_depevt *event)
3134{
3135	struct dwc3 *dwc = dep->dwc;
3136
3137	if (event->status == DEPEVT_STREAMEVT_FOUND) {
3138		dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
3139		goto out;
3140	}
3141
3142	/* Note: NoStream rejection event param value is 0 and not 0xFFFF */
3143	switch (event->parameters) {
3144	case DEPEVT_STREAM_PRIME:
3145		/*
3146		 * If the host can properly transition the endpoint state from
3147		 * idle to prime after a NoStream rejection, there's no need to
3148		 * force restarting the endpoint to reinitiate the stream. To
3149		 * simplify the check, assume the host follows the USB spec if
3150		 * it primed the endpoint more than once.
3151		 */
3152		if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
3153			if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
3154				dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
3155			else
3156				dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
3157		}
3158
3159		break;
3160	case DEPEVT_STREAM_NOSTREAM:
3161		if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
3162		    !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
3163		    !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
3164			break;
3165
3166		/*
3167		 * If the host rejects a stream due to no active stream, by the
3168		 * USB and xHCI spec, the endpoint will be put back to idle
3169		 * state. When the host is ready (buffer added/updated), it will
3170		 * prime the endpoint to inform the usb device controller. This
3171		 * triggers the device controller to issue ERDY to restart the
3172		 * stream. However, some hosts don't follow this and keep the
3173		 * endpoint in the idle state. No prime will come despite host
3174		 * streams are updated, and the device controller will not be
3175		 * triggered to generate ERDY to move the next stream data. To
3176		 * workaround this and maintain compatibility with various
3177		 * hosts, force to reinitate the stream until the host is ready
3178		 * instead of waiting for the host to prime the endpoint.
3179		 */
3180		if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
3181			unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
3182
3183			dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
3184		} else {
3185			dep->flags |= DWC3_EP_DELAY_START;
3186			dwc3_stop_active_transfer(dep, true, true);
3187			return;
3188		}
3189		break;
3190	}
3191
3192out:
3193	dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
3194}
3195
3196static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
3197		const struct dwc3_event_depevt *event)
3198{
3199	struct dwc3_ep		*dep;
3200	u8			epnum = event->endpoint_number;
3201
3202	dep = dwc->eps[epnum];
3203
3204	if (!(dep->flags & DWC3_EP_ENABLED)) {
3205		if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
3206			return;
3207
3208		/* Handle only EPCMDCMPLT when EP disabled */
3209		if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
3210			return;
3211	}
3212
3213	if (epnum == 0 || epnum == 1) {
3214		dwc3_ep0_interrupt(dwc, event);
3215		return;
3216	}
3217
3218	switch (event->endpoint_event) {
3219	case DWC3_DEPEVT_XFERINPROGRESS:
3220		dwc3_gadget_endpoint_transfer_in_progress(dep, event);
3221		break;
3222	case DWC3_DEPEVT_XFERNOTREADY:
3223		dwc3_gadget_endpoint_transfer_not_ready(dep, event);
3224		break;
3225	case DWC3_DEPEVT_EPCMDCMPLT:
3226		dwc3_gadget_endpoint_command_complete(dep, event);
3227		break;
3228	case DWC3_DEPEVT_XFERCOMPLETE:
3229		dwc3_gadget_endpoint_transfer_complete(dep, event);
3230		break;
3231	case DWC3_DEPEVT_STREAMEVT:
3232		dwc3_gadget_endpoint_stream_event(dep, event);
3233		break;
3234	case DWC3_DEPEVT_RXTXFIFOEVT:
3235		break;
3236	}
3237}
3238
3239static void dwc3_disconnect_gadget(struct dwc3 *dwc)
3240{
3241	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
3242		spin_unlock(&dwc->lock);
3243		dwc->gadget_driver->disconnect(dwc->gadget);
3244		spin_lock(&dwc->lock);
3245	}
3246}
3247
3248static void dwc3_suspend_gadget(struct dwc3 *dwc)
3249{
3250	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
3251		spin_unlock(&dwc->lock);
3252		dwc->gadget_driver->suspend(dwc->gadget);
3253		spin_lock(&dwc->lock);
3254	}
3255}
3256
3257static void dwc3_resume_gadget(struct dwc3 *dwc)
3258{
3259	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
3260		spin_unlock(&dwc->lock);
3261		dwc->gadget_driver->resume(dwc->gadget);
3262		spin_lock(&dwc->lock);
3263	}
3264}
3265
3266static void dwc3_reset_gadget(struct dwc3 *dwc)
3267{
3268	if (!dwc->gadget_driver)
3269		return;
3270
3271	if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
3272		spin_unlock(&dwc->lock);
3273		usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
3274		spin_lock(&dwc->lock);
3275	}
3276}
3277
3278static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
3279	bool interrupt)
3280{
3281	if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
3282	    (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
3283		return;
3284
3285	/*
3286	 * NOTICE: We are violating what the Databook says about the
3287	 * EndTransfer command. Ideally we would _always_ wait for the
3288	 * EndTransfer Command Completion IRQ, but that's causing too
3289	 * much trouble synchronizing between us and gadget driver.
3290	 *
3291	 * We have discussed this with the IP Provider and it was
3292	 * suggested to giveback all requests here.
3293	 *
3294	 * Note also that a similar handling was tested by Synopsys
3295	 * (thanks a lot Paul) and nothing bad has come out of it.
3296	 * In short, what we're doing is issuing EndTransfer with
3297	 * CMDIOC bit set and delay kicking transfer until the
3298	 * EndTransfer command had completed.
3299	 *
3300	 * As of IP version 3.10a of the DWC_usb3 IP, the controller
3301	 * supports a mode to work around the above limitation. The
3302	 * software can poll the CMDACT bit in the DEPCMD register
3303	 * after issuing a EndTransfer command. This mode is enabled
3304	 * by writing GUCTL2[14]. This polling is already done in the
3305	 * dwc3_send_gadget_ep_cmd() function so if the mode is
3306	 * enabled, the EndTransfer command will have completed upon
3307	 * returning from this function.
3308	 *
3309	 * This mode is NOT available on the DWC_usb31 IP.  In this
3310	 * case, if the IOC bit is not set, then delay by 1ms
3311	 * after issuing the EndTransfer command.  This allows for the
3312	 * controller to handle the command completely before DWC3
3313	 * remove requests attempts to unmap USB request buffers.
3314	 */
3315
3316	__dwc3_stop_active_transfer(dep, force, interrupt);
3317}
3318
3319static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
3320{
3321	u32 epnum;
3322
3323	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
3324		struct dwc3_ep *dep;
3325		int ret;
3326
3327		dep = dwc->eps[epnum];
3328		if (!dep)
3329			continue;
3330
3331		if (!(dep->flags & DWC3_EP_STALL))
3332			continue;
3333
3334		dep->flags &= ~DWC3_EP_STALL;
3335
3336		ret = dwc3_send_clear_stall_ep_cmd(dep);
3337		WARN_ON_ONCE(ret);
3338	}
3339}
3340
3341static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
3342{
3343	int			reg;
3344
3345	dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
3346
3347	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3348	reg &= ~DWC3_DCTL_INITU1ENA;
3349	reg &= ~DWC3_DCTL_INITU2ENA;
3350	dwc3_gadget_dctl_write_safe(dwc, reg);
3351
3352	dwc3_disconnect_gadget(dwc);
3353
3354	dwc->gadget->speed = USB_SPEED_UNKNOWN;
3355	dwc->setup_packet_pending = false;
3356	usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
3357
3358	dwc->connected = false;
3359}
3360
3361static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
3362{
3363	u32			reg;
3364
3365	/*
3366	 * Ideally, dwc3_reset_gadget() would trigger the function
3367	 * drivers to stop any active transfers through ep disable.
3368	 * However, for functions which defer ep disable, such as mass
3369	 * storage, we will need to rely on the call to stop active
3370	 * transfers here, and avoid allowing of request queuing.
3371	 */
3372	dwc->connected = false;
3373
3374	/*
3375	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
3376	 * would cause a missing Disconnect Event if there's a
3377	 * pending Setup Packet in the FIFO.
3378	 *
3379	 * There's no suggested workaround on the official Bug
3380	 * report, which states that "unless the driver/application
3381	 * is doing any special handling of a disconnect event,
3382	 * there is no functional issue".
3383	 *
3384	 * Unfortunately, it turns out that we _do_ some special
3385	 * handling of a disconnect event, namely complete all
3386	 * pending transfers, notify gadget driver of the
3387	 * disconnection, and so on.
3388	 *
3389	 * Our suggested workaround is to follow the Disconnect
3390	 * Event steps here, instead, based on a setup_packet_pending
3391	 * flag. Such flag gets set whenever we have a SETUP_PENDING
3392	 * status for EP0 TRBs and gets cleared on XferComplete for the
3393	 * same endpoint.
3394	 *
3395	 * Refers to:
3396	 *
3397	 * STAR#9000466709: RTL: Device : Disconnect event not
3398	 * generated if setup packet pending in FIFO
3399	 */
3400	if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
3401		if (dwc->setup_packet_pending)
3402			dwc3_gadget_disconnect_interrupt(dwc);
3403	}
3404
3405	dwc3_reset_gadget(dwc);
3406	/*
3407	 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3408	 * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
3409	 * needs to ensure that it sends "a DEPENDXFER command for any active
3410	 * transfers."
3411	 */
3412	dwc3_stop_active_transfers(dwc);
3413	dwc->connected = true;
3414
3415	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3416	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
3417	dwc3_gadget_dctl_write_safe(dwc, reg);
3418	dwc->test_mode = false;
3419	dwc3_clear_stall_all_ep(dwc);
3420
3421	/* Reset device address to zero */
3422	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3423	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
3424	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3425}
3426
3427static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
3428{
3429	struct dwc3_ep		*dep;
3430	int			ret;
3431	u32			reg;
3432	u8			speed;
3433
3434	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
3435	speed = reg & DWC3_DSTS_CONNECTSPD;
3436	dwc->speed = speed;
3437
3438	/*
3439	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
3440	 * each time on Connect Done.
3441	 *
3442	 * Currently we always use the reset value. If any platform
3443	 * wants to set this to a different value, we need to add a
3444	 * setting and update GCTL.RAMCLKSEL here.
3445	 */
3446
3447	switch (speed) {
3448	case DWC3_DSTS_SUPERSPEED_PLUS:
3449		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3450		dwc->gadget->ep0->maxpacket = 512;
3451		dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
3452		break;
3453	case DWC3_DSTS_SUPERSPEED:
3454		/*
3455		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
3456		 * would cause a missing USB3 Reset event.
3457		 *
3458		 * In such situations, we should force a USB3 Reset
3459		 * event by calling our dwc3_gadget_reset_interrupt()
3460		 * routine.
3461		 *
3462		 * Refers to:
3463		 *
3464		 * STAR#9000483510: RTL: SS : USB3 reset event may
3465		 * not be generated always when the link enters poll
3466		 */
3467		if (DWC3_VER_IS_PRIOR(DWC3, 190A))
3468			dwc3_gadget_reset_interrupt(dwc);
3469
3470		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3471		dwc->gadget->ep0->maxpacket = 512;
3472		dwc->gadget->speed = USB_SPEED_SUPER;
3473		break;
3474	case DWC3_DSTS_HIGHSPEED:
3475		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
3476		dwc->gadget->ep0->maxpacket = 64;
3477		dwc->gadget->speed = USB_SPEED_HIGH;
3478		break;
3479	case DWC3_DSTS_FULLSPEED:
3480		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
3481		dwc->gadget->ep0->maxpacket = 64;
3482		dwc->gadget->speed = USB_SPEED_FULL;
3483		break;
3484	case DWC3_DSTS_LOWSPEED:
3485		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
3486		dwc->gadget->ep0->maxpacket = 8;
3487		dwc->gadget->speed = USB_SPEED_LOW;
3488		break;
3489	}
3490
3491	dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
3492
3493	/* Enable USB2 LPM Capability */
3494
3495	if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
3496	    !dwc->usb2_gadget_lpm_disable &&
3497	    (speed != DWC3_DSTS_SUPERSPEED) &&
3498	    (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
3499		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3500		reg |= DWC3_DCFG_LPM_CAP;
3501		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3502
3503		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3504		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
3505
3506		reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
3507					    (dwc->is_utmi_l1_suspend << 4));
3508
3509		/*
3510		 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
3511		 * DCFG.LPMCap is set, core responses with an ACK and the
3512		 * BESL value in the LPM token is less than or equal to LPM
3513		 * NYET threshold.
3514		 */
3515		WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
3516				"LPM Erratum not available on dwc3 revisions < 2.40a\n");
3517
3518		if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
3519			reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
3520
3521		dwc3_gadget_dctl_write_safe(dwc, reg);
3522	} else {
3523		if (dwc->usb2_gadget_lpm_disable) {
3524			reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3525			reg &= ~DWC3_DCFG_LPM_CAP;
3526			dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3527		}
3528
3529		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3530		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
3531		dwc3_gadget_dctl_write_safe(dwc, reg);
3532	}
3533
3534	dep = dwc->eps[0];
3535	ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
3536	if (ret) {
3537		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3538		return;
3539	}
3540
3541	dep = dwc->eps[1];
3542	ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
3543	if (ret) {
3544		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3545		return;
3546	}
3547
3548	/*
3549	 * Configure PHY via GUSB3PIPECTLn if required.
3550	 *
3551	 * Update GTXFIFOSIZn
3552	 *
3553	 * In both cases reset values should be sufficient.
3554	 */
3555}
3556
3557static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
3558{
3559	/*
3560	 * TODO take core out of low power mode when that's
3561	 * implemented.
3562	 */
3563
3564	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
3565		spin_unlock(&dwc->lock);
3566		dwc->gadget_driver->resume(dwc->gadget);
3567		spin_lock(&dwc->lock);
3568	}
3569}
3570
3571static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
3572		unsigned int evtinfo)
3573{
3574	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
3575	unsigned int		pwropt;
3576
3577	/*
3578	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
3579	 * Hibernation mode enabled which would show up when device detects
3580	 * host-initiated U3 exit.
3581	 *
3582	 * In that case, device will generate a Link State Change Interrupt
3583	 * from U3 to RESUME which is only necessary if Hibernation is
3584	 * configured in.
3585	 *
3586	 * There are no functional changes due to such spurious event and we
3587	 * just need to ignore it.
3588	 *
3589	 * Refers to:
3590	 *
3591	 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
3592	 * operational mode
3593	 */
3594	pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
3595	if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
3596			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
3597		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
3598				(next == DWC3_LINK_STATE_RESUME)) {
3599			return;
3600		}
3601	}
3602
3603	/*
3604	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
3605	 * on the link partner, the USB session might do multiple entry/exit
3606	 * of low power states before a transfer takes place.
3607	 *
3608	 * Due to this problem, we might experience lower throughput. The
3609	 * suggested workaround is to disable DCTL[12:9] bits if we're
3610	 * transitioning from U1/U2 to U0 and enable those bits again
3611	 * after a transfer completes and there are no pending transfers
3612	 * on any of the enabled endpoints.
3613	 *
3614	 * This is the first half of that workaround.
3615	 *
3616	 * Refers to:
3617	 *
3618	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
3619	 * core send LGO_Ux entering U0
3620	 */
3621	if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
3622		if (next == DWC3_LINK_STATE_U0) {
3623			u32	u1u2;
3624			u32	reg;
3625
3626			switch (dwc->link_state) {
3627			case DWC3_LINK_STATE_U1:
3628			case DWC3_LINK_STATE_U2:
3629				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3630				u1u2 = reg & (DWC3_DCTL_INITU2ENA
3631						| DWC3_DCTL_ACCEPTU2ENA
3632						| DWC3_DCTL_INITU1ENA
3633						| DWC3_DCTL_ACCEPTU1ENA);
3634
3635				if (!dwc->u1u2)
3636					dwc->u1u2 = reg & u1u2;
3637
3638				reg &= ~u1u2;
3639
3640				dwc3_gadget_dctl_write_safe(dwc, reg);
3641				break;
3642			default:
3643				/* do nothing */
3644				break;
3645			}
3646		}
3647	}
3648
3649	switch (next) {
3650	case DWC3_LINK_STATE_U1:
3651		if (dwc->speed == USB_SPEED_SUPER)
3652			dwc3_suspend_gadget(dwc);
3653		break;
3654	case DWC3_LINK_STATE_U2:
3655	case DWC3_LINK_STATE_U3:
3656		dwc3_suspend_gadget(dwc);
3657		break;
3658	case DWC3_LINK_STATE_RESUME:
3659		dwc3_resume_gadget(dwc);
3660		break;
3661	default:
3662		/* do nothing */
3663		break;
3664	}
3665
3666	dwc->link_state = next;
3667}
3668
3669static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
3670					  unsigned int evtinfo)
3671{
3672	enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
3673
3674	if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
3675		dwc3_suspend_gadget(dwc);
3676
3677	dwc->link_state = next;
3678}
3679
3680static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
3681		unsigned int evtinfo)
3682{
3683	unsigned int is_ss = evtinfo & BIT(4);
3684
3685	/*
3686	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
3687	 * have a known issue which can cause USB CV TD.9.23 to fail
3688	 * randomly.
3689	 *
3690	 * Because of this issue, core could generate bogus hibernation
3691	 * events which SW needs to ignore.
3692	 *
3693	 * Refers to:
3694	 *
3695	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
3696	 * Device Fallback from SuperSpeed
3697	 */
3698	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
3699		return;
3700
3701	/* enter hibernation here */
3702}
3703
3704static void dwc3_gadget_interrupt(struct dwc3 *dwc,
3705		const struct dwc3_event_devt *event)
3706{
3707	switch (event->type) {
3708	case DWC3_DEVICE_EVENT_DISCONNECT:
3709		dwc3_gadget_disconnect_interrupt(dwc);
3710		break;
3711	case DWC3_DEVICE_EVENT_RESET:
3712		dwc3_gadget_reset_interrupt(dwc);
3713		break;
3714	case DWC3_DEVICE_EVENT_CONNECT_DONE:
3715		dwc3_gadget_conndone_interrupt(dwc);
3716		break;
3717	case DWC3_DEVICE_EVENT_WAKEUP:
3718		dwc3_gadget_wakeup_interrupt(dwc);
3719		break;
3720	case DWC3_DEVICE_EVENT_HIBER_REQ:
3721		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
3722					"unexpected hibernation event\n"))
3723			break;
3724
3725		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
3726		break;
3727	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
3728		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
3729		break;
3730	case DWC3_DEVICE_EVENT_EOPF:
3731		/* It changed to be suspend event for version 2.30a and above */
3732		if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
3733			/*
3734			 * Ignore suspend event until the gadget enters into
3735			 * USB_STATE_CONFIGURED state.
3736			 */
3737			if (dwc->gadget->state >= USB_STATE_CONFIGURED)
3738				dwc3_gadget_suspend_interrupt(dwc,
3739						event->event_info);
3740		}
3741		break;
3742	case DWC3_DEVICE_EVENT_SOF:
3743	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
3744	case DWC3_DEVICE_EVENT_CMD_CMPL:
3745	case DWC3_DEVICE_EVENT_OVERFLOW:
3746		break;
3747	default:
3748		dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
3749	}
3750}
3751
3752static void dwc3_process_event_entry(struct dwc3 *dwc,
3753		const union dwc3_event *event)
3754{
3755	trace_dwc3_event(event->raw, dwc);
3756
3757	if (!event->type.is_devspec)
3758		dwc3_endpoint_interrupt(dwc, &event->depevt);
3759	else if (event->type.type == DWC3_EVENT_TYPE_DEV)
3760		dwc3_gadget_interrupt(dwc, &event->devt);
3761	else
3762		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
3763}
3764
3765static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
3766{
3767	struct dwc3 *dwc = evt->dwc;
3768	irqreturn_t ret = IRQ_NONE;
3769	int left;
3770	u32 reg;
3771
3772	left = evt->count;
3773
3774	if (!(evt->flags & DWC3_EVENT_PENDING))
3775		return IRQ_NONE;
3776
3777	while (left > 0) {
3778		union dwc3_event event;
3779
3780		event.raw = *(u32 *) (evt->cache + evt->lpos);
3781
3782		dwc3_process_event_entry(dwc, &event);
3783
3784		/*
3785		 * FIXME we wrap around correctly to the next entry as
3786		 * almost all entries are 4 bytes in size. There is one
3787		 * entry which has 12 bytes which is a regular entry
3788		 * followed by 8 bytes data. ATM I don't know how
3789		 * things are organized if we get next to the a
3790		 * boundary so I worry about that once we try to handle
3791		 * that.
3792		 */
3793		evt->lpos = (evt->lpos + 4) % evt->length;
3794		left -= 4;
3795	}
3796
3797	evt->count = 0;
3798	ret = IRQ_HANDLED;
3799
3800	/* Unmask interrupt */
3801	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
3802	reg &= ~DWC3_GEVNTSIZ_INTMASK;
3803	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
3804
3805	if (dwc->imod_interval) {
3806		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
3807		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
3808	}
3809
3810	/* Keep the clearing of DWC3_EVENT_PENDING at the end */
3811	evt->flags &= ~DWC3_EVENT_PENDING;
3812
3813	return ret;
3814}
3815
3816static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
3817{
3818	struct dwc3_event_buffer *evt = _evt;
3819	struct dwc3 *dwc = evt->dwc;
3820	unsigned long flags;
3821	irqreturn_t ret = IRQ_NONE;
3822
3823	local_bh_disable();
3824	spin_lock_irqsave(&dwc->lock, flags);
3825	ret = dwc3_process_event_buf(evt);
3826	spin_unlock_irqrestore(&dwc->lock, flags);
3827	local_bh_enable();
3828
3829	return ret;
3830}
3831
3832static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
3833{
3834	struct dwc3 *dwc = evt->dwc;
3835	u32 amount;
3836	u32 count;
3837	u32 reg;
3838
3839	if (pm_runtime_suspended(dwc->dev)) {
3840		dwc->pending_events = true;
3841		/*
3842		 * Trigger runtime resume. The get() function will be balanced
3843		 * after processing the pending events in dwc3_process_pending
3844		 * events().
3845		 */
3846		pm_runtime_get(dwc->dev);
3847		disable_irq_nosync(dwc->irq_gadget);
3848		return IRQ_HANDLED;
3849	}
3850
3851	/*
3852	 * With PCIe legacy interrupt, test shows that top-half irq handler can
3853	 * be called again after HW interrupt deassertion. Check if bottom-half
3854	 * irq event handler completes before caching new event to prevent
3855	 * losing events.
3856	 */
3857	if (evt->flags & DWC3_EVENT_PENDING)
3858		return IRQ_HANDLED;
3859
3860	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3861	count &= DWC3_GEVNTCOUNT_MASK;
3862	if (!count)
3863		return IRQ_NONE;
3864
3865	evt->count = count;
3866	evt->flags |= DWC3_EVENT_PENDING;
3867
3868	/* Mask interrupt */
3869	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
3870	reg |= DWC3_GEVNTSIZ_INTMASK;
3871	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
3872
3873	amount = min(count, evt->length - evt->lpos);
3874	memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
3875
3876	if (amount < count)
3877		memcpy(evt->cache, evt->buf, count - amount);
3878
3879	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
3880
3881	return IRQ_WAKE_THREAD;
3882}
3883
3884static irqreturn_t dwc3_interrupt(int irq, void *_evt)
3885{
3886	struct dwc3_event_buffer	*evt = _evt;
3887
3888	return dwc3_check_event_buf(evt);
3889}
3890
3891static int dwc3_gadget_get_irq(struct dwc3 *dwc)
3892{
3893	struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
3894	int irq;
3895
3896	irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
3897	if (irq > 0)
3898		goto out;
3899
3900	if (irq == -EPROBE_DEFER)
3901		goto out;
3902
3903	irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
3904	if (irq > 0)
3905		goto out;
3906
3907	if (irq == -EPROBE_DEFER)
3908		goto out;
3909
3910	irq = platform_get_irq(dwc3_pdev, 0);
3911	if (irq > 0)
3912		goto out;
3913
3914	if (!irq)
3915		irq = -EINVAL;
3916
3917out:
3918	return irq;
3919}
3920
3921static void dwc_gadget_release(struct device *dev)
3922{
3923	struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
3924
3925	kfree(gadget);
3926}
3927
3928/**
3929 * dwc3_gadget_init - initializes gadget related registers
3930 * @dwc: pointer to our controller context structure
3931 *
3932 * Returns 0 on success otherwise negative errno.
3933 */
3934int dwc3_gadget_init(struct dwc3 *dwc)
3935{
3936	int ret;
3937	int irq;
3938	struct device *dev;
3939
3940	irq = dwc3_gadget_get_irq(dwc);
3941	if (irq < 0) {
3942		ret = irq;
3943		goto err0;
3944	}
3945
3946	dwc->irq_gadget = irq;
3947
3948	dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
3949					  sizeof(*dwc->ep0_trb) * 2,
3950					  &dwc->ep0_trb_addr, GFP_KERNEL);
3951	if (!dwc->ep0_trb) {
3952		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
3953		ret = -ENOMEM;
3954		goto err0;
3955	}
3956
3957	dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
3958	if (!dwc->setup_buf) {
3959		ret = -ENOMEM;
3960		goto err1;
3961	}
3962
3963	dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
3964			&dwc->bounce_addr, GFP_KERNEL);
3965	if (!dwc->bounce) {
3966		ret = -ENOMEM;
3967		goto err2;
3968	}
3969
3970	init_completion(&dwc->ep0_in_setup);
3971	dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
3972	if (!dwc->gadget) {
3973		ret = -ENOMEM;
3974		goto err3;
3975	}
3976
3977
3978	usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
3979	dev				= &dwc->gadget->dev;
3980	dev->platform_data		= dwc;
3981	dwc->gadget->ops		= &dwc3_gadget_ops;
3982	dwc->gadget->speed		= USB_SPEED_UNKNOWN;
3983	dwc->gadget->sg_supported	= true;
3984	dwc->gadget->name		= "dwc3-gadget";
3985	dwc->gadget->lpm_capable	= !dwc->usb2_gadget_lpm_disable;
3986
3987	/*
3988	 * FIXME We might be setting max_speed to <SUPER, however versions
3989	 * <2.20a of dwc3 have an issue with metastability (documented
3990	 * elsewhere in this driver) which tells us we can't set max speed to
3991	 * anything lower than SUPER.
3992	 *
3993	 * Because gadget.max_speed is only used by composite.c and function
3994	 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3995	 * to happen so we avoid sending SuperSpeed Capability descriptor
3996	 * together with our BOS descriptor as that could confuse host into
3997	 * thinking we can handle super speed.
3998	 *
3999	 * Note that, in fact, we won't even support GetBOS requests when speed
4000	 * is less than super speed because we don't have means, yet, to tell
4001	 * composite.c that we are USB 2.0 + LPM ECN.
4002	 */
4003	if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
4004	    !dwc->dis_metastability_quirk)
4005		dev_info(dwc->dev, "changing max_speed on rev %08x\n",
4006				dwc->revision);
4007
4008	dwc->gadget->max_speed		= dwc->maximum_speed;
4009
4010	/*
4011	 * REVISIT: Here we should clear all pending IRQs to be
4012	 * sure we're starting from a well known location.
4013	 */
4014
4015	ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
4016	if (ret)
4017		goto err4;
4018
4019	ret = usb_add_gadget(dwc->gadget);
4020	if (ret) {
4021		dev_err(dwc->dev, "failed to add gadget\n");
4022		goto err5;
4023	}
4024
4025	dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
4026
4027	return 0;
4028
4029err5:
4030	dwc3_gadget_free_endpoints(dwc);
4031err4:
4032	usb_put_gadget(dwc->gadget);
4033	dwc->gadget = NULL;
4034err3:
4035	dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
4036			dwc->bounce_addr);
4037
4038err2:
4039	kfree(dwc->setup_buf);
4040
4041err1:
4042	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
4043			dwc->ep0_trb, dwc->ep0_trb_addr);
4044
4045err0:
4046	return ret;
4047}
4048
4049/* -------------------------------------------------------------------------- */
4050
4051void dwc3_gadget_exit(struct dwc3 *dwc)
4052{
4053	if (!dwc->gadget)
4054		return;
4055
4056	usb_del_gadget(dwc->gadget);
4057	dwc3_gadget_free_endpoints(dwc);
4058	usb_put_gadget(dwc->gadget);
4059	dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
4060			  dwc->bounce_addr);
4061	kfree(dwc->setup_buf);
4062	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
4063			  dwc->ep0_trb, dwc->ep0_trb_addr);
4064}
4065
4066int dwc3_gadget_suspend(struct dwc3 *dwc)
4067{
4068	if (!dwc->gadget_driver)
4069		return 0;
4070
4071	dwc3_gadget_run_stop(dwc, false, false);
4072	dwc3_disconnect_gadget(dwc);
4073	__dwc3_gadget_stop(dwc);
4074
4075	return 0;
4076}
4077
4078int dwc3_gadget_resume(struct dwc3 *dwc)
4079{
4080	int			ret;
4081
4082	if (!dwc->gadget_driver || !dwc->softconnect)
4083		return 0;
4084
4085	ret = __dwc3_gadget_start(dwc);
4086	if (ret < 0)
4087		goto err0;
4088
4089	ret = dwc3_gadget_run_stop(dwc, true, false);
4090	if (ret < 0)
4091		goto err1;
4092
4093	return 0;
4094
4095err1:
4096	__dwc3_gadget_stop(dwc);
4097
4098err0:
4099	return ret;
4100}
4101
4102void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
4103{
4104	if (dwc->pending_events) {
4105		dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
4106		dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
4107		pm_runtime_put(dwc->dev);
4108		dwc->pending_events = false;
4109		enable_irq(dwc->irq_gadget);
4110	}
4111}
4112