1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 *
5 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
6 * Copyright (C) 2012 Broadcom Corporation
7 */
8
9#include <linux/bitops.h>
10#include <linux/bug.h>
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/timer.h>
29#include <linux/usb.h>
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32#include <linux/workqueue.h>
33
34#include <bcm63xx_cpu.h>
35#include <bcm63xx_iudma.h>
36#include <bcm63xx_dev_usb_usbd.h>
37#include <bcm63xx_io.h>
38#include <bcm63xx_regs.h>
39
40#define DRV_MODULE_NAME		"bcm63xx_udc"
41
42static const char bcm63xx_ep0name[] = "ep0";
43
44static const struct {
45	const char *name;
46	const struct usb_ep_caps caps;
47} bcm63xx_ep_info[] = {
48#define EP_INFO(_name, _caps) \
49	{ \
50		.name = _name, \
51		.caps = _caps, \
52	}
53
54	EP_INFO(bcm63xx_ep0name,
55		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
56	EP_INFO("ep1in-bulk",
57		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
58	EP_INFO("ep2out-bulk",
59		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
60	EP_INFO("ep3in-int",
61		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
62	EP_INFO("ep4out-int",
63		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
64
65#undef EP_INFO
66};
67
68static bool use_fullspeed;
69module_param(use_fullspeed, bool, S_IRUGO);
70MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
71
72/*
73 * RX IRQ coalescing options:
74 *
75 * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
76 * driver is able to pass the "testusb" suite and recover from conditions like:
77 *
78 *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
79 *   2) Host sends 512 bytes of data
80 *   3) Host decides to reconfigure the device and sends SET_INTERFACE
81 *   4) Device shuts down the endpoint and cancels the RX transaction
82 *
83 * true - one IRQ per transfer, for transfers <= 2048B.  Generates
84 * considerably fewer IRQs, but error recovery is less robust.  Does not
85 * reliably pass "testusb".
86 *
87 * TX always uses coalescing, because we can cancel partially complete TX
88 * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
89 * this on RX.
90 */
91static bool irq_coalesce;
92module_param(irq_coalesce, bool, S_IRUGO);
93MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
94
95#define BCM63XX_NUM_EP			5
96#define BCM63XX_NUM_IUDMA		6
97#define BCM63XX_NUM_FIFO_PAIRS		3
98
99#define IUDMA_RESET_TIMEOUT_US		10000
100
101#define IUDMA_EP0_RXCHAN		0
102#define IUDMA_EP0_TXCHAN		1
103
104#define IUDMA_MAX_FRAGMENT		2048
105#define BCM63XX_MAX_CTRL_PKT		64
106
107#define BCMEP_CTRL			0x00
108#define BCMEP_ISOC			0x01
109#define BCMEP_BULK			0x02
110#define BCMEP_INTR			0x03
111
112#define BCMEP_OUT			0x00
113#define BCMEP_IN			0x01
114
115#define BCM63XX_SPD_FULL		1
116#define BCM63XX_SPD_HIGH		0
117
118#define IUDMA_DMAC_OFFSET		0x200
119#define IUDMA_DMAS_OFFSET		0x400
120
121enum bcm63xx_ep0_state {
122	EP0_REQUEUE,
123	EP0_IDLE,
124	EP0_IN_DATA_PHASE_SETUP,
125	EP0_IN_DATA_PHASE_COMPLETE,
126	EP0_OUT_DATA_PHASE_SETUP,
127	EP0_OUT_DATA_PHASE_COMPLETE,
128	EP0_OUT_STATUS_PHASE,
129	EP0_IN_FAKE_STATUS_PHASE,
130	EP0_SHUTDOWN,
131};
132
133static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
134	"REQUEUE",
135	"IDLE",
136	"IN_DATA_PHASE_SETUP",
137	"IN_DATA_PHASE_COMPLETE",
138	"OUT_DATA_PHASE_SETUP",
139	"OUT_DATA_PHASE_COMPLETE",
140	"OUT_STATUS_PHASE",
141	"IN_FAKE_STATUS_PHASE",
142	"SHUTDOWN",
143};
144
145/**
146 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
147 * @ep_num: USB endpoint number.
148 * @n_bds: Number of buffer descriptors in the ring.
149 * @ep_type: Endpoint type (control, bulk, interrupt).
150 * @dir: Direction (in, out).
151 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
152 * @max_pkt_hs: Maximum packet size in high speed mode.
153 * @max_pkt_fs: Maximum packet size in full speed mode.
154 */
155struct iudma_ch_cfg {
156	int				ep_num;
157	int				n_bds;
158	int				ep_type;
159	int				dir;
160	int				n_fifo_slots;
161	int				max_pkt_hs;
162	int				max_pkt_fs;
163};
164
165static const struct iudma_ch_cfg iudma_defaults[] = {
166
167	/* This controller was designed to support a CDC/RNDIS application.
168	   It may be possible to reconfigure some of the endpoints, but
169	   the hardware limitations (FIFO sizing and number of DMA channels)
170	   may significantly impact flexibility and/or stability.  Change
171	   these values at your own risk.
172
173	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
174	idx      |  n_bds     |         dir       |  max_pkt_hs  |
175	 |       |    |       |          |        |      |       |       */
176	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
177	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
178	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
179	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
180	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
181	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
182};
183
184struct bcm63xx_udc;
185
186/**
187 * struct iudma_ch - Represents the current state of a single IUDMA channel.
188 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
189 * @ep_num: USB endpoint number.  -1 for ep0 RX.
190 * @enabled: Whether bcm63xx_ep_enable() has been called.
191 * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
192 * @is_tx: true for TX, false for RX.
193 * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
194 * @udc: Reference to the device controller.
195 * @read_bd: Next buffer descriptor to reap from the hardware.
196 * @write_bd: Next BD available for a new packet.
197 * @end_bd: Points to the final BD in the ring.
198 * @n_bds_used: Number of BD entries currently occupied.
199 * @bd_ring: Base pointer to the BD ring.
200 * @bd_ring_dma: Physical (DMA) address of bd_ring.
201 * @n_bds: Total number of BDs in the ring.
202 *
203 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
204 * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
205 * only.
206 *
207 * Each bulk/intr endpoint has a single IUDMA channel and a single
208 * struct usb_ep.
209 */
210struct iudma_ch {
211	unsigned int			ch_idx;
212	int				ep_num;
213	bool				enabled;
214	int				max_pkt;
215	bool				is_tx;
216	struct bcm63xx_ep		*bep;
217	struct bcm63xx_udc		*udc;
218
219	struct bcm_enet_desc		*read_bd;
220	struct bcm_enet_desc		*write_bd;
221	struct bcm_enet_desc		*end_bd;
222	int				n_bds_used;
223
224	struct bcm_enet_desc		*bd_ring;
225	dma_addr_t			bd_ring_dma;
226	unsigned int			n_bds;
227};
228
229/**
230 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
231 * @ep_num: USB endpoint number.
232 * @iudma: Pointer to IUDMA channel state.
233 * @ep: USB gadget layer representation of the EP.
234 * @udc: Reference to the device controller.
235 * @queue: Linked list of outstanding requests for this EP.
236 * @halted: 1 if the EP is stalled; 0 otherwise.
237 */
238struct bcm63xx_ep {
239	unsigned int			ep_num;
240	struct iudma_ch			*iudma;
241	struct usb_ep			ep;
242	struct bcm63xx_udc		*udc;
243	struct list_head		queue;
244	unsigned			halted:1;
245};
246
247/**
248 * struct bcm63xx_req - Internal (driver) state of a single request.
249 * @queue: Links back to the EP's request list.
250 * @req: USB gadget layer representation of the request.
251 * @offset: Current byte offset into the data buffer (next byte to queue).
252 * @bd_bytes: Number of data bytes in outstanding BD entries.
253 * @iudma: IUDMA channel used for the request.
254 */
255struct bcm63xx_req {
256	struct list_head		queue;		/* ep's requests */
257	struct usb_request		req;
258	unsigned int			offset;
259	unsigned int			bd_bytes;
260	struct iudma_ch			*iudma;
261};
262
263/**
264 * struct bcm63xx_udc - Driver/hardware private context.
265 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
266 * @dev: Generic Linux device structure.
267 * @pd: Platform data (board/port info).
268 * @usbd_clk: Clock descriptor for the USB device block.
269 * @usbh_clk: Clock descriptor for the USB host block.
270 * @gadget: USB device.
271 * @driver: Driver for USB device.
272 * @usbd_regs: Base address of the USBD/USB20D block.
273 * @iudma_regs: Base address of the USBD's associated IUDMA block.
274 * @bep: Array of endpoints, including ep0.
275 * @iudma: Array of all IUDMA channels used by this controller.
276 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
277 * @iface: USB interface number, from SET_INTERFACE wIndex.
278 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
279 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
280 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
281 * @ep0state: Current state of the ep0 state machine.
282 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
283 * @wedgemap: Bitmap of wedged endpoints.
284 * @ep0_req_reset: USB reset is pending.
285 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
286 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
287 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
288 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
289 * @ep0_reply: Pending reply from gadget driver.
290 * @ep0_request: Outstanding ep0 request.
291 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
292 */
293struct bcm63xx_udc {
294	spinlock_t			lock;
295
296	struct device			*dev;
297	struct bcm63xx_usbd_platform_data *pd;
298	struct clk			*usbd_clk;
299	struct clk			*usbh_clk;
300
301	struct usb_gadget		gadget;
302	struct usb_gadget_driver	*driver;
303
304	void __iomem			*usbd_regs;
305	void __iomem			*iudma_regs;
306
307	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
308	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
309
310	int				cfg;
311	int				iface;
312	int				alt_iface;
313
314	struct bcm63xx_req		ep0_ctrl_req;
315	u8				*ep0_ctrl_buf;
316
317	int				ep0state;
318	struct work_struct		ep0_wq;
319
320	unsigned long			wedgemap;
321
322	unsigned			ep0_req_reset:1;
323	unsigned			ep0_req_set_cfg:1;
324	unsigned			ep0_req_set_iface:1;
325	unsigned			ep0_req_shutdown:1;
326
327	unsigned			ep0_req_completed:1;
328	struct usb_request		*ep0_reply;
329	struct usb_request		*ep0_request;
330
331	struct dentry			*debugfs_root;
332};
333
334static const struct usb_ep_ops bcm63xx_udc_ep_ops;
335
336/***********************************************************************
337 * Convenience functions
338 ***********************************************************************/
339
340static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
341{
342	return container_of(g, struct bcm63xx_udc, gadget);
343}
344
345static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
346{
347	return container_of(ep, struct bcm63xx_ep, ep);
348}
349
350static inline struct bcm63xx_req *our_req(struct usb_request *req)
351{
352	return container_of(req, struct bcm63xx_req, req);
353}
354
355static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
356{
357	return bcm_readl(udc->usbd_regs + off);
358}
359
360static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
361{
362	bcm_writel(val, udc->usbd_regs + off);
363}
364
365static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
366{
367	return bcm_readl(udc->iudma_regs + off);
368}
369
370static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
371{
372	bcm_writel(val, udc->iudma_regs + off);
373}
374
375static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
376{
377	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
378			(ENETDMA_CHAN_WIDTH * chan));
379}
380
381static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
382					int chan)
383{
384	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
385			(ENETDMA_CHAN_WIDTH * chan));
386}
387
388static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
389{
390	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
391			(ENETDMA_CHAN_WIDTH * chan));
392}
393
394static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
395					int chan)
396{
397	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
398			(ENETDMA_CHAN_WIDTH * chan));
399}
400
401static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
402{
403	if (is_enabled) {
404		clk_enable(udc->usbh_clk);
405		clk_enable(udc->usbd_clk);
406		udelay(10);
407	} else {
408		clk_disable(udc->usbd_clk);
409		clk_disable(udc->usbh_clk);
410	}
411}
412
413/***********************************************************************
414 * Low-level IUDMA / FIFO operations
415 ***********************************************************************/
416
417/**
418 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
419 * @udc: Reference to the device controller.
420 * @idx: Desired init_sel value.
421 *
422 * The "init_sel" signal is used as a selection index for both endpoints
423 * and IUDMA channels.  Since these do not map 1:1, the use of this signal
424 * depends on the context.
425 */
426static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
427{
428	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
429
430	val &= ~USBD_CONTROL_INIT_SEL_MASK;
431	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
432	usbd_writel(udc, val, USBD_CONTROL_REG);
433}
434
435/**
436 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
437 * @udc: Reference to the device controller.
438 * @bep: Endpoint on which to operate.
439 * @is_stalled: true to enable stall, false to disable.
440 *
441 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
442 * halt/stall conditions.
443 */
444static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
445	bool is_stalled)
446{
447	u32 val;
448
449	val = USBD_STALL_UPDATE_MASK |
450		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
451		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
452	usbd_writel(udc, val, USBD_STALL_REG);
453}
454
455/**
456 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
457 * @udc: Reference to the device controller.
458 *
459 * These parameters depend on the USB link speed.  Settings are
460 * per-IUDMA-channel-pair.
461 */
462static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
463{
464	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
465	u32 i, val, rx_fifo_slot, tx_fifo_slot;
466
467	/* set up FIFO boundaries and packet sizes; this is done in pairs */
468	rx_fifo_slot = tx_fifo_slot = 0;
469	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
470		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
471		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
472
473		bcm63xx_ep_dma_select(udc, i >> 1);
474
475		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
476			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
477			 USBD_RXFIFO_CONFIG_END_SHIFT);
478		rx_fifo_slot += rx_cfg->n_fifo_slots;
479		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
480		usbd_writel(udc,
481			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
482			    USBD_RXFIFO_EPSIZE_REG);
483
484		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
485			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
486			 USBD_TXFIFO_CONFIG_END_SHIFT);
487		tx_fifo_slot += tx_cfg->n_fifo_slots;
488		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
489		usbd_writel(udc,
490			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
491			    USBD_TXFIFO_EPSIZE_REG);
492
493		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
494	}
495}
496
497/**
498 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
499 * @udc: Reference to the device controller.
500 * @ep_num: Endpoint number.
501 */
502static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
503{
504	u32 val;
505
506	bcm63xx_ep_dma_select(udc, ep_num);
507
508	val = usbd_readl(udc, USBD_CONTROL_REG);
509	val |= USBD_CONTROL_FIFO_RESET_MASK;
510	usbd_writel(udc, val, USBD_CONTROL_REG);
511	usbd_readl(udc, USBD_CONTROL_REG);
512}
513
514/**
515 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
516 * @udc: Reference to the device controller.
517 */
518static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
519{
520	int i;
521
522	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
523		bcm63xx_fifo_reset_ep(udc, i);
524}
525
526/**
527 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
528 * @udc: Reference to the device controller.
529 */
530static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
531{
532	u32 i, val;
533
534	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
535		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
536
537		if (cfg->ep_num < 0)
538			continue;
539
540		bcm63xx_ep_dma_select(udc, cfg->ep_num);
541		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
542			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
543		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
544	}
545}
546
547/**
548 * bcm63xx_ep_setup - Configure per-endpoint settings.
549 * @udc: Reference to the device controller.
550 *
551 * This needs to be rerun if the speed/cfg/intf/altintf changes.
552 */
553static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
554{
555	u32 val, i;
556
557	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
558
559	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
560		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
561		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
562			      cfg->max_pkt_hs : cfg->max_pkt_fs;
563		int idx = cfg->ep_num;
564
565		udc->iudma[i].max_pkt = max_pkt;
566
567		if (idx < 0)
568			continue;
569		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
570
571		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
572		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
573		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
574		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
575		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
576		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
577		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
578		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
579	}
580}
581
582/**
583 * iudma_write - Queue a single IUDMA transaction.
584 * @udc: Reference to the device controller.
585 * @iudma: IUDMA channel to use.
586 * @breq: Request containing the transaction data.
587 *
588 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
589 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
590 * So iudma_write() may be called several times to fulfill a single
591 * usb_request.
592 *
593 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
594 */
595static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
596	struct bcm63xx_req *breq)
597{
598	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
599	unsigned int bytes_left = breq->req.length - breq->offset;
600	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
601		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
602
603	iudma->n_bds_used = 0;
604	breq->bd_bytes = 0;
605	breq->iudma = iudma;
606
607	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
608		extra_zero_pkt = 1;
609
610	do {
611		struct bcm_enet_desc *d = iudma->write_bd;
612		u32 dmaflags = 0;
613		unsigned int n_bytes;
614
615		if (d == iudma->end_bd) {
616			dmaflags |= DMADESC_WRAP_MASK;
617			iudma->write_bd = iudma->bd_ring;
618		} else {
619			iudma->write_bd++;
620		}
621		iudma->n_bds_used++;
622
623		n_bytes = min_t(int, bytes_left, max_bd_bytes);
624		if (n_bytes)
625			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
626		else
627			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
628				    DMADESC_USB_ZERO_MASK;
629
630		dmaflags |= DMADESC_OWNER_MASK;
631		if (first_bd) {
632			dmaflags |= DMADESC_SOP_MASK;
633			first_bd = 0;
634		}
635
636		/*
637		 * extra_zero_pkt forces one more iteration through the loop
638		 * after all data is queued up, to send the zero packet
639		 */
640		if (extra_zero_pkt && !bytes_left)
641			extra_zero_pkt = 0;
642
643		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
644		    (n_bytes == bytes_left && !extra_zero_pkt)) {
645			last_bd = 1;
646			dmaflags |= DMADESC_EOP_MASK;
647		}
648
649		d->address = breq->req.dma + breq->offset;
650		mb();
651		d->len_stat = dmaflags;
652
653		breq->offset += n_bytes;
654		breq->bd_bytes += n_bytes;
655		bytes_left -= n_bytes;
656	} while (!last_bd);
657
658	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
659			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
660}
661
662/**
663 * iudma_read - Check for IUDMA buffer completion.
664 * @udc: Reference to the device controller.
665 * @iudma: IUDMA channel to use.
666 *
667 * This checks to see if ALL of the outstanding BDs on the DMA channel
668 * have been filled.  If so, it returns the actual transfer length;
669 * otherwise it returns -EBUSY.
670 */
671static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
672{
673	int i, actual_len = 0;
674	struct bcm_enet_desc *d = iudma->read_bd;
675
676	if (!iudma->n_bds_used)
677		return -EINVAL;
678
679	for (i = 0; i < iudma->n_bds_used; i++) {
680		u32 dmaflags;
681
682		dmaflags = d->len_stat;
683
684		if (dmaflags & DMADESC_OWNER_MASK)
685			return -EBUSY;
686
687		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
688			      DMADESC_LENGTH_SHIFT;
689		if (d == iudma->end_bd)
690			d = iudma->bd_ring;
691		else
692			d++;
693	}
694
695	iudma->read_bd = d;
696	iudma->n_bds_used = 0;
697	return actual_len;
698}
699
700/**
701 * iudma_reset_channel - Stop DMA on a single channel.
702 * @udc: Reference to the device controller.
703 * @iudma: IUDMA channel to reset.
704 */
705static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
706{
707	int timeout = IUDMA_RESET_TIMEOUT_US;
708	struct bcm_enet_desc *d;
709	int ch_idx = iudma->ch_idx;
710
711	if (!iudma->is_tx)
712		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
713
714	/* stop DMA, then wait for the hardware to wrap up */
715	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
716
717	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
718				   ENETDMAC_CHANCFG_EN_MASK) {
719		udelay(1);
720
721		/* repeatedly flush the FIFO data until the BD completes */
722		if (iudma->is_tx && iudma->ep_num >= 0)
723			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
724
725		if (!timeout--) {
726			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
727				ch_idx);
728			break;
729		}
730		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
731			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
732				 ch_idx);
733			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
734					ENETDMAC_CHANCFG_REG, ch_idx);
735		}
736	}
737	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
738
739	/* don't leave "live" HW-owned entries for the next guy to step on */
740	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
741		d->len_stat = 0;
742	mb();
743
744	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
745	iudma->n_bds_used = 0;
746
747	/* set up IRQs, UBUS burst size, and BD base for this channel */
748	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
749			ENETDMAC_IRMASK_REG, ch_idx);
750	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
751
752	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
753	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
754}
755
756/**
757 * iudma_init_channel - One-time IUDMA channel initialization.
758 * @udc: Reference to the device controller.
759 * @ch_idx: Channel to initialize.
760 */
761static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
762{
763	struct iudma_ch *iudma = &udc->iudma[ch_idx];
764	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
765	unsigned int n_bds = cfg->n_bds;
766	struct bcm63xx_ep *bep = NULL;
767
768	iudma->ep_num = cfg->ep_num;
769	iudma->ch_idx = ch_idx;
770	iudma->is_tx = !!(ch_idx & 0x01);
771	if (iudma->ep_num >= 0) {
772		bep = &udc->bep[iudma->ep_num];
773		bep->iudma = iudma;
774		INIT_LIST_HEAD(&bep->queue);
775	}
776
777	iudma->bep = bep;
778	iudma->udc = udc;
779
780	/* ep0 is always active; others are controlled by the gadget driver */
781	if (iudma->ep_num <= 0)
782		iudma->enabled = true;
783
784	iudma->n_bds = n_bds;
785	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
786		n_bds * sizeof(struct bcm_enet_desc),
787		&iudma->bd_ring_dma, GFP_KERNEL);
788	if (!iudma->bd_ring)
789		return -ENOMEM;
790	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
791
792	return 0;
793}
794
795/**
796 * iudma_init - One-time initialization of all IUDMA channels.
797 * @udc: Reference to the device controller.
798 *
799 * Enable DMA, flush channels, and enable global IUDMA IRQs.
800 */
801static int iudma_init(struct bcm63xx_udc *udc)
802{
803	int i, rc;
804
805	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
806
807	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
808		rc = iudma_init_channel(udc, i);
809		if (rc)
810			return rc;
811		iudma_reset_channel(udc, &udc->iudma[i]);
812	}
813
814	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
815	return 0;
816}
817
818/**
819 * iudma_uninit - Uninitialize IUDMA channels.
820 * @udc: Reference to the device controller.
821 *
822 * Kill global IUDMA IRQs, flush channels, and kill DMA.
823 */
824static void iudma_uninit(struct bcm63xx_udc *udc)
825{
826	int i;
827
828	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
829
830	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
831		iudma_reset_channel(udc, &udc->iudma[i]);
832
833	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
834}
835
836/***********************************************************************
837 * Other low-level USBD operations
838 ***********************************************************************/
839
840/**
841 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
842 * @udc: Reference to the device controller.
843 * @enable_irqs: true to enable, false to disable.
844 */
845static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
846{
847	u32 val;
848
849	usbd_writel(udc, 0, USBD_STATUS_REG);
850
851	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
852	      BIT(USBD_EVENT_IRQ_SETUP) |
853	      BIT(USBD_EVENT_IRQ_SETCFG) |
854	      BIT(USBD_EVENT_IRQ_SETINTF) |
855	      BIT(USBD_EVENT_IRQ_USB_LINK);
856	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
857	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
858}
859
860/**
861 * bcm63xx_select_phy_mode - Select between USB device and host mode.
862 * @udc: Reference to the device controller.
863 * @is_device: true for device, false for host.
864 *
865 * This should probably be reworked to use the drivers/usb/otg
866 * infrastructure.
867 *
868 * By default, the AFE/pullups are disabled in device mode, until
869 * bcm63xx_select_pullup() is called.
870 */
871static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
872{
873	u32 val, portmask = BIT(udc->pd->port_no);
874
875	if (BCMCPU_IS_6328()) {
876		/* configure pinmux to sense VBUS signal */
877		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
878		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
879		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
880			       GPIO_PINMUX_OTHR_6328_USB_HOST;
881		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
882	}
883
884	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
885	if (is_device) {
886		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
887		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
888	} else {
889		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
890		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
891	}
892	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
893
894	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
895	if (is_device)
896		val |= USBH_PRIV_SWAP_USBD_MASK;
897	else
898		val &= ~USBH_PRIV_SWAP_USBD_MASK;
899	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
900}
901
902/**
903 * bcm63xx_select_pullup - Enable/disable the pullup on D+
904 * @udc: Reference to the device controller.
905 * @is_on: true to enable the pullup, false to disable.
906 *
907 * If the pullup is active, the host will sense a FS/HS device connected to
908 * the port.  If the pullup is inactive, the host will think the USB
909 * device has been disconnected.
910 */
911static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
912{
913	u32 val, portmask = BIT(udc->pd->port_no);
914
915	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
916	if (is_on)
917		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
918	else
919		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
920	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
921}
922
923/**
924 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
925 * @udc: Reference to the device controller.
926 *
927 * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
928 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
929 */
930static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
931{
932	set_clocks(udc, true);
933	iudma_uninit(udc);
934	set_clocks(udc, false);
935
936	clk_put(udc->usbd_clk);
937	clk_put(udc->usbh_clk);
938}
939
940/**
941 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
942 * @udc: Reference to the device controller.
943 */
944static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
945{
946	int i, rc = 0;
947	u32 val;
948
949	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
950					 GFP_KERNEL);
951	if (!udc->ep0_ctrl_buf)
952		return -ENOMEM;
953
954	INIT_LIST_HEAD(&udc->gadget.ep_list);
955	for (i = 0; i < BCM63XX_NUM_EP; i++) {
956		struct bcm63xx_ep *bep = &udc->bep[i];
957
958		bep->ep.name = bcm63xx_ep_info[i].name;
959		bep->ep.caps = bcm63xx_ep_info[i].caps;
960		bep->ep_num = i;
961		bep->ep.ops = &bcm63xx_udc_ep_ops;
962		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
963		bep->halted = 0;
964		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
965		bep->udc = udc;
966		bep->ep.desc = NULL;
967		INIT_LIST_HEAD(&bep->queue);
968	}
969
970	udc->gadget.ep0 = &udc->bep[0].ep;
971	list_del(&udc->bep[0].ep.ep_list);
972
973	udc->gadget.speed = USB_SPEED_UNKNOWN;
974	udc->ep0state = EP0_SHUTDOWN;
975
976	udc->usbh_clk = clk_get(udc->dev, "usbh");
977	if (IS_ERR(udc->usbh_clk))
978		return -EIO;
979
980	udc->usbd_clk = clk_get(udc->dev, "usbd");
981	if (IS_ERR(udc->usbd_clk)) {
982		clk_put(udc->usbh_clk);
983		return -EIO;
984	}
985
986	set_clocks(udc, true);
987
988	val = USBD_CONTROL_AUTO_CSRS_MASK |
989	      USBD_CONTROL_DONE_CSRS_MASK |
990	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
991	usbd_writel(udc, val, USBD_CONTROL_REG);
992
993	val = USBD_STRAPS_APP_SELF_PWR_MASK |
994	      USBD_STRAPS_APP_RAM_IF_MASK |
995	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
996	      USBD_STRAPS_APP_8BITPHY_MASK |
997	      USBD_STRAPS_APP_RMTWKUP_MASK;
998
999	if (udc->gadget.max_speed == USB_SPEED_HIGH)
1000		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1001	else
1002		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1003	usbd_writel(udc, val, USBD_STRAPS_REG);
1004
1005	bcm63xx_set_ctrl_irqs(udc, false);
1006
1007	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1008
1009	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1010	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1011	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1012
1013	rc = iudma_init(udc);
1014	set_clocks(udc, false);
1015	if (rc)
1016		bcm63xx_uninit_udc_hw(udc);
1017
1018	return 0;
1019}
1020
1021/***********************************************************************
1022 * Standard EP gadget operations
1023 ***********************************************************************/
1024
1025/**
1026 * bcm63xx_ep_enable - Enable one endpoint.
1027 * @ep: Endpoint to enable.
1028 * @desc: Contains max packet, direction, etc.
1029 *
1030 * Most of the endpoint parameters are fixed in this controller, so there
1031 * isn't much for this function to do.
1032 */
1033static int bcm63xx_ep_enable(struct usb_ep *ep,
1034	const struct usb_endpoint_descriptor *desc)
1035{
1036	struct bcm63xx_ep *bep = our_ep(ep);
1037	struct bcm63xx_udc *udc = bep->udc;
1038	struct iudma_ch *iudma = bep->iudma;
1039	unsigned long flags;
1040
1041	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1042		return -EINVAL;
1043
1044	if (!udc->driver)
1045		return -ESHUTDOWN;
1046
1047	spin_lock_irqsave(&udc->lock, flags);
1048	if (iudma->enabled) {
1049		spin_unlock_irqrestore(&udc->lock, flags);
1050		return -EINVAL;
1051	}
1052
1053	iudma->enabled = true;
1054	BUG_ON(!list_empty(&bep->queue));
1055
1056	iudma_reset_channel(udc, iudma);
1057
1058	bep->halted = 0;
1059	bcm63xx_set_stall(udc, bep, false);
1060	clear_bit(bep->ep_num, &udc->wedgemap);
1061
1062	ep->desc = desc;
1063	ep->maxpacket = usb_endpoint_maxp(desc);
1064
1065	spin_unlock_irqrestore(&udc->lock, flags);
1066	return 0;
1067}
1068
1069/**
1070 * bcm63xx_ep_disable - Disable one endpoint.
1071 * @ep: Endpoint to disable.
1072 */
1073static int bcm63xx_ep_disable(struct usb_ep *ep)
1074{
1075	struct bcm63xx_ep *bep = our_ep(ep);
1076	struct bcm63xx_udc *udc = bep->udc;
1077	struct iudma_ch *iudma = bep->iudma;
1078	struct bcm63xx_req *breq, *n;
1079	unsigned long flags;
1080
1081	if (!ep || !ep->desc)
1082		return -EINVAL;
1083
1084	spin_lock_irqsave(&udc->lock, flags);
1085	if (!iudma->enabled) {
1086		spin_unlock_irqrestore(&udc->lock, flags);
1087		return -EINVAL;
1088	}
1089	iudma->enabled = false;
1090
1091	iudma_reset_channel(udc, iudma);
1092
1093	if (!list_empty(&bep->queue)) {
1094		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1095			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1096						 iudma->is_tx);
1097			list_del(&breq->queue);
1098			breq->req.status = -ESHUTDOWN;
1099
1100			spin_unlock_irqrestore(&udc->lock, flags);
1101			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1102			spin_lock_irqsave(&udc->lock, flags);
1103		}
1104	}
1105	ep->desc = NULL;
1106
1107	spin_unlock_irqrestore(&udc->lock, flags);
1108	return 0;
1109}
1110
1111/**
1112 * bcm63xx_udc_alloc_request - Allocate a new request.
1113 * @ep: Endpoint associated with the request.
1114 * @mem_flags: Flags to pass to kzalloc().
1115 */
1116static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1117	gfp_t mem_flags)
1118{
1119	struct bcm63xx_req *breq;
1120
1121	breq = kzalloc(sizeof(*breq), mem_flags);
1122	if (!breq)
1123		return NULL;
1124	return &breq->req;
1125}
1126
1127/**
1128 * bcm63xx_udc_free_request - Free a request.
1129 * @ep: Endpoint associated with the request.
1130 * @req: Request to free.
1131 */
1132static void bcm63xx_udc_free_request(struct usb_ep *ep,
1133	struct usb_request *req)
1134{
1135	struct bcm63xx_req *breq = our_req(req);
1136	kfree(breq);
1137}
1138
1139/**
1140 * bcm63xx_udc_queue - Queue up a new request.
1141 * @ep: Endpoint associated with the request.
1142 * @req: Request to add.
1143 * @mem_flags: Unused.
1144 *
1145 * If the queue is empty, start this request immediately.  Otherwise, add
1146 * it to the list.
1147 *
1148 * ep0 replies are sent through this function from the gadget driver, but
1149 * they are treated differently because they need to be handled by the ep0
1150 * state machine.  (Sometimes they are replies to control requests that
1151 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1152 */
1153static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1154	gfp_t mem_flags)
1155{
1156	struct bcm63xx_ep *bep = our_ep(ep);
1157	struct bcm63xx_udc *udc = bep->udc;
1158	struct bcm63xx_req *breq = our_req(req);
1159	unsigned long flags;
1160	int rc = 0;
1161
1162	if (unlikely(!req || !req->complete || !req->buf || !ep))
1163		return -EINVAL;
1164
1165	req->actual = 0;
1166	req->status = 0;
1167	breq->offset = 0;
1168
1169	if (bep == &udc->bep[0]) {
1170		/* only one reply per request, please */
1171		if (udc->ep0_reply)
1172			return -EINVAL;
1173
1174		udc->ep0_reply = req;
1175		schedule_work(&udc->ep0_wq);
1176		return 0;
1177	}
1178
1179	spin_lock_irqsave(&udc->lock, flags);
1180	if (!bep->iudma->enabled) {
1181		rc = -ESHUTDOWN;
1182		goto out;
1183	}
1184
1185	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1186	if (rc == 0) {
1187		list_add_tail(&breq->queue, &bep->queue);
1188		if (list_is_singular(&bep->queue))
1189			iudma_write(udc, bep->iudma, breq);
1190	}
1191
1192out:
1193	spin_unlock_irqrestore(&udc->lock, flags);
1194	return rc;
1195}
1196
1197/**
1198 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1199 * @ep: Endpoint associated with the request.
1200 * @req: Request to remove.
1201 *
1202 * If the request is not at the head of the queue, this is easy - just nuke
1203 * it.  If the request is at the head of the queue, we'll need to stop the
1204 * DMA transaction and then queue up the successor.
1205 */
1206static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1207{
1208	struct bcm63xx_ep *bep = our_ep(ep);
1209	struct bcm63xx_udc *udc = bep->udc;
1210	struct bcm63xx_req *breq = our_req(req), *cur;
1211	unsigned long flags;
1212	int rc = 0;
1213
1214	spin_lock_irqsave(&udc->lock, flags);
1215	if (list_empty(&bep->queue)) {
1216		rc = -EINVAL;
1217		goto out;
1218	}
1219
1220	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1221	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1222
1223	if (breq == cur) {
1224		iudma_reset_channel(udc, bep->iudma);
1225		list_del(&breq->queue);
1226
1227		if (!list_empty(&bep->queue)) {
1228			struct bcm63xx_req *next;
1229
1230			next = list_first_entry(&bep->queue,
1231				struct bcm63xx_req, queue);
1232			iudma_write(udc, bep->iudma, next);
1233		}
1234	} else {
1235		list_del(&breq->queue);
1236	}
1237
1238out:
1239	spin_unlock_irqrestore(&udc->lock, flags);
1240
1241	req->status = -ESHUTDOWN;
1242	req->complete(ep, req);
1243
1244	return rc;
1245}
1246
1247/**
1248 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1249 * @ep: Endpoint to halt.
1250 * @value: Zero to clear halt; nonzero to set halt.
1251 *
1252 * See comments in bcm63xx_update_wedge().
1253 */
1254static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1255{
1256	struct bcm63xx_ep *bep = our_ep(ep);
1257	struct bcm63xx_udc *udc = bep->udc;
1258	unsigned long flags;
1259
1260	spin_lock_irqsave(&udc->lock, flags);
1261	bcm63xx_set_stall(udc, bep, !!value);
1262	bep->halted = value;
1263	spin_unlock_irqrestore(&udc->lock, flags);
1264
1265	return 0;
1266}
1267
1268/**
1269 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1270 * @ep: Endpoint to wedge.
1271 *
1272 * See comments in bcm63xx_update_wedge().
1273 */
1274static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1275{
1276	struct bcm63xx_ep *bep = our_ep(ep);
1277	struct bcm63xx_udc *udc = bep->udc;
1278	unsigned long flags;
1279
1280	spin_lock_irqsave(&udc->lock, flags);
1281	set_bit(bep->ep_num, &udc->wedgemap);
1282	bcm63xx_set_stall(udc, bep, true);
1283	spin_unlock_irqrestore(&udc->lock, flags);
1284
1285	return 0;
1286}
1287
1288static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1289	.enable		= bcm63xx_ep_enable,
1290	.disable	= bcm63xx_ep_disable,
1291
1292	.alloc_request	= bcm63xx_udc_alloc_request,
1293	.free_request	= bcm63xx_udc_free_request,
1294
1295	.queue		= bcm63xx_udc_queue,
1296	.dequeue	= bcm63xx_udc_dequeue,
1297
1298	.set_halt	= bcm63xx_udc_set_halt,
1299	.set_wedge	= bcm63xx_udc_set_wedge,
1300};
1301
1302/***********************************************************************
1303 * EP0 handling
1304 ***********************************************************************/
1305
1306/**
1307 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1308 * @udc: Reference to the device controller.
1309 * @ctrl: 8-byte SETUP request.
1310 */
1311static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1312	struct usb_ctrlrequest *ctrl)
1313{
1314	int rc;
1315
1316	spin_unlock_irq(&udc->lock);
1317	rc = udc->driver->setup(&udc->gadget, ctrl);
1318	spin_lock_irq(&udc->lock);
1319	return rc;
1320}
1321
1322/**
1323 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1324 * @udc: Reference to the device controller.
1325 *
1326 * Many standard requests are handled automatically in the hardware, but
1327 * we still need to pass them to the gadget driver so that it can
1328 * reconfigure the interfaces/endpoints if necessary.
1329 *
1330 * Unfortunately we are not able to send a STALL response if the host
1331 * requests an invalid configuration.  If this happens, we'll have to be
1332 * content with printing a warning.
1333 */
1334static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1335{
1336	struct usb_ctrlrequest ctrl;
1337	int rc;
1338
1339	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1340	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1341	ctrl.wValue = cpu_to_le16(udc->cfg);
1342	ctrl.wIndex = 0;
1343	ctrl.wLength = 0;
1344
1345	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1346	if (rc < 0) {
1347		dev_warn_ratelimited(udc->dev,
1348			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1349			udc->cfg);
1350	}
1351	return rc;
1352}
1353
1354/**
1355 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1356 * @udc: Reference to the device controller.
1357 */
1358static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1359{
1360	struct usb_ctrlrequest ctrl;
1361	int rc;
1362
1363	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1364	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1365	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1366	ctrl.wIndex = cpu_to_le16(udc->iface);
1367	ctrl.wLength = 0;
1368
1369	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1370	if (rc < 0) {
1371		dev_warn_ratelimited(udc->dev,
1372			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1373			udc->iface, udc->alt_iface);
1374	}
1375	return rc;
1376}
1377
1378/**
1379 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1380 * @udc: Reference to the device controller.
1381 * @ch_idx: IUDMA channel number.
1382 * @req: USB gadget layer representation of the request.
1383 */
1384static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1385	struct usb_request *req)
1386{
1387	struct bcm63xx_req *breq = our_req(req);
1388	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1389
1390	BUG_ON(udc->ep0_request);
1391	udc->ep0_request = req;
1392
1393	req->actual = 0;
1394	breq->offset = 0;
1395	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1396	iudma_write(udc, iudma, breq);
1397}
1398
1399/**
1400 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1401 * @udc: Reference to the device controller.
1402 * @req: USB gadget layer representation of the request.
1403 * @status: Status to return to the gadget driver.
1404 */
1405static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1406	struct usb_request *req, int status)
1407{
1408	req->status = status;
1409	if (status)
1410		req->actual = 0;
1411	if (req->complete) {
1412		spin_unlock_irq(&udc->lock);
1413		req->complete(&udc->bep[0].ep, req);
1414		spin_lock_irq(&udc->lock);
1415	}
1416}
1417
1418/**
1419 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1420 *   reset/shutdown.
1421 * @udc: Reference to the device controller.
1422 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1423 */
1424static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1425{
1426	struct usb_request *req = udc->ep0_reply;
1427
1428	udc->ep0_reply = NULL;
1429	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1430	if (udc->ep0_request == req) {
1431		udc->ep0_req_completed = 0;
1432		udc->ep0_request = NULL;
1433	}
1434	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1435}
1436
1437/**
1438 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1439 *   transfer len.
1440 * @udc: Reference to the device controller.
1441 */
1442static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1443{
1444	struct usb_request *req = udc->ep0_request;
1445
1446	udc->ep0_req_completed = 0;
1447	udc->ep0_request = NULL;
1448
1449	return req->actual;
1450}
1451
1452/**
1453 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1454 * @udc: Reference to the device controller.
1455 * @ch_idx: IUDMA channel number.
1456 * @length: Number of bytes to TX/RX.
1457 *
1458 * Used for simple transfers performed by the ep0 worker.  This will always
1459 * use ep0_ctrl_req / ep0_ctrl_buf.
1460 */
1461static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1462	int length)
1463{
1464	struct usb_request *req = &udc->ep0_ctrl_req.req;
1465
1466	req->buf = udc->ep0_ctrl_buf;
1467	req->length = length;
1468	req->complete = NULL;
1469
1470	bcm63xx_ep0_map_write(udc, ch_idx, req);
1471}
1472
1473/**
1474 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1475 * @udc: Reference to the device controller.
1476 *
1477 * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1478 * for the next packet.  Anything else means the transaction requires multiple
1479 * stages of handling.
1480 */
1481static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1482{
1483	int rc;
1484	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1485
1486	rc = bcm63xx_ep0_read_complete(udc);
1487
1488	if (rc < 0) {
1489		dev_err(udc->dev, "missing SETUP packet\n");
1490		return EP0_IDLE;
1491	}
1492
1493	/*
1494	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1495	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1496	 * just throw it away.
1497	 */
1498	if (rc == 0)
1499		return EP0_REQUEUE;
1500
1501	/* Drop malformed SETUP packets */
1502	if (rc != sizeof(*ctrl)) {
1503		dev_warn_ratelimited(udc->dev,
1504			"malformed SETUP packet (%d bytes)\n", rc);
1505		return EP0_REQUEUE;
1506	}
1507
1508	/* Process new SETUP packet arriving on ep0 */
1509	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1510	if (rc < 0) {
1511		bcm63xx_set_stall(udc, &udc->bep[0], true);
1512		return EP0_REQUEUE;
1513	}
1514
1515	if (!ctrl->wLength)
1516		return EP0_REQUEUE;
1517	else if (ctrl->bRequestType & USB_DIR_IN)
1518		return EP0_IN_DATA_PHASE_SETUP;
1519	else
1520		return EP0_OUT_DATA_PHASE_SETUP;
1521}
1522
1523/**
1524 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1525 * @udc: Reference to the device controller.
1526 *
1527 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1528 * filled with a SETUP packet from the host.  This function handles new
1529 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1530 * and reset/shutdown events.
1531 *
1532 * Returns 0 if work was done; -EAGAIN if nothing to do.
1533 */
1534static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1535{
1536	if (udc->ep0_req_reset) {
1537		udc->ep0_req_reset = 0;
1538	} else if (udc->ep0_req_set_cfg) {
1539		udc->ep0_req_set_cfg = 0;
1540		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1541			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1542	} else if (udc->ep0_req_set_iface) {
1543		udc->ep0_req_set_iface = 0;
1544		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1545			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1546	} else if (udc->ep0_req_completed) {
1547		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1548		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1549	} else if (udc->ep0_req_shutdown) {
1550		udc->ep0_req_shutdown = 0;
1551		udc->ep0_req_completed = 0;
1552		udc->ep0_request = NULL;
1553		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1554		usb_gadget_unmap_request(&udc->gadget,
1555			&udc->ep0_ctrl_req.req, 0);
1556
1557		/* bcm63xx_udc_pullup() is waiting for this */
1558		mb();
1559		udc->ep0state = EP0_SHUTDOWN;
1560	} else if (udc->ep0_reply) {
1561		/*
1562		 * This could happen if a USB RESET shows up during an ep0
1563		 * transaction (especially if a laggy driver like gadgetfs
1564		 * is in use).
1565		 */
1566		dev_warn(udc->dev, "nuking unexpected reply\n");
1567		bcm63xx_ep0_nuke_reply(udc, 0);
1568	} else {
1569		return -EAGAIN;
1570	}
1571
1572	return 0;
1573}
1574
1575/**
1576 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1577 * @udc: Reference to the device controller.
1578 *
1579 * Returns 0 if work was done; -EAGAIN if nothing to do.
1580 */
1581static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1582{
1583	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1584	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1585
1586	switch (udc->ep0state) {
1587	case EP0_REQUEUE:
1588		/* set up descriptor to receive SETUP packet */
1589		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1590					     BCM63XX_MAX_CTRL_PKT);
1591		ep0state = EP0_IDLE;
1592		break;
1593	case EP0_IDLE:
1594		return bcm63xx_ep0_do_idle(udc);
1595	case EP0_IN_DATA_PHASE_SETUP:
1596		/*
1597		 * Normal case: TX request is in ep0_reply (queued by the
1598		 * callback), or will be queued shortly.  When it's here,
1599		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1600		 *
1601		 * Shutdown case: Stop waiting for the reply.  Just
1602		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1603		 * queue anything else now.
1604		 */
1605		if (udc->ep0_reply) {
1606			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1607					      udc->ep0_reply);
1608			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1609		} else if (shutdown) {
1610			ep0state = EP0_REQUEUE;
1611		}
1612		break;
1613	case EP0_IN_DATA_PHASE_COMPLETE: {
1614		/*
1615		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1616		 * it to finish, then go back to REQUEUE->IDLE.
1617		 *
1618		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1619		 * completion to the gadget driver, then REQUEUE->IDLE.
1620		 */
1621		if (udc->ep0_req_completed) {
1622			udc->ep0_reply = NULL;
1623			bcm63xx_ep0_read_complete(udc);
1624			/*
1625			 * the "ack" sometimes gets eaten (see
1626			 * bcm63xx_ep0_do_idle)
1627			 */
1628			ep0state = EP0_REQUEUE;
1629		} else if (shutdown) {
1630			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1631			bcm63xx_ep0_nuke_reply(udc, 1);
1632			ep0state = EP0_REQUEUE;
1633		}
1634		break;
1635	}
1636	case EP0_OUT_DATA_PHASE_SETUP:
1637		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1638		if (udc->ep0_reply) {
1639			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1640					      udc->ep0_reply);
1641			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1642		} else if (shutdown) {
1643			ep0state = EP0_REQUEUE;
1644		}
1645		break;
1646	case EP0_OUT_DATA_PHASE_COMPLETE: {
1647		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1648		if (udc->ep0_req_completed) {
1649			udc->ep0_reply = NULL;
1650			bcm63xx_ep0_read_complete(udc);
1651
1652			/* send 0-byte ack to host */
1653			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1654			ep0state = EP0_OUT_STATUS_PHASE;
1655		} else if (shutdown) {
1656			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1657			bcm63xx_ep0_nuke_reply(udc, 0);
1658			ep0state = EP0_REQUEUE;
1659		}
1660		break;
1661	}
1662	case EP0_OUT_STATUS_PHASE:
1663		/*
1664		 * Normal case: 0-byte OUT ack packet is in flight; wait
1665		 * for it to finish, then go back to REQUEUE->IDLE.
1666		 *
1667		 * Shutdown case: just cancel the transmission.  Don't bother
1668		 * calling the completion, because it originated from this
1669		 * function anyway.  Then go back to REQUEUE->IDLE.
1670		 */
1671		if (udc->ep0_req_completed) {
1672			bcm63xx_ep0_read_complete(udc);
1673			ep0state = EP0_REQUEUE;
1674		} else if (shutdown) {
1675			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1676			udc->ep0_request = NULL;
1677			ep0state = EP0_REQUEUE;
1678		}
1679		break;
1680	case EP0_IN_FAKE_STATUS_PHASE: {
1681		/*
1682		 * Normal case: we spoofed a SETUP packet and are now
1683		 * waiting for the gadget driver to send a 0-byte reply.
1684		 * This doesn't actually get sent to the HW because the
1685		 * HW has already sent its own reply.  Once we get the
1686		 * response, return to IDLE.
1687		 *
1688		 * Shutdown case: return to IDLE immediately.
1689		 *
1690		 * Note that the ep0 RX descriptor has remained queued
1691		 * (and possibly unfilled) during this entire transaction.
1692		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1693		 * or SET_INTERFACE transactions.
1694		 */
1695		struct usb_request *r = udc->ep0_reply;
1696
1697		if (!r) {
1698			if (shutdown)
1699				ep0state = EP0_IDLE;
1700			break;
1701		}
1702
1703		bcm63xx_ep0_complete(udc, r, 0);
1704		udc->ep0_reply = NULL;
1705		ep0state = EP0_IDLE;
1706		break;
1707	}
1708	case EP0_SHUTDOWN:
1709		break;
1710	}
1711
1712	if (udc->ep0state == ep0state)
1713		return -EAGAIN;
1714
1715	udc->ep0state = ep0state;
1716	return 0;
1717}
1718
1719/**
1720 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1721 * @w: Workqueue struct.
1722 *
1723 * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1724 * is used to synchronize ep0 events and ensure that both HW and SW events
1725 * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1726 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1727 * by the USBD hardware.
1728 *
1729 * The worker function will continue iterating around the state machine
1730 * until there is nothing left to do.  Usually "nothing left to do" means
1731 * that we're waiting for a new event from the hardware.
1732 */
1733static void bcm63xx_ep0_process(struct work_struct *w)
1734{
1735	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1736	spin_lock_irq(&udc->lock);
1737	while (bcm63xx_ep0_one_round(udc) == 0)
1738		;
1739	spin_unlock_irq(&udc->lock);
1740}
1741
1742/***********************************************************************
1743 * Standard UDC gadget operations
1744 ***********************************************************************/
1745
1746/**
1747 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1748 * @gadget: USB device.
1749 */
1750static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1751{
1752	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1753
1754	return (usbd_readl(udc, USBD_STATUS_REG) &
1755		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1756}
1757
1758/**
1759 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1760 * @gadget: USB device.
1761 * @is_on: 0 to disable pullup, 1 to enable.
1762 *
1763 * See notes in bcm63xx_select_pullup().
1764 */
1765static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1766{
1767	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1768	unsigned long flags;
1769	int i, rc = -EINVAL;
1770
1771	spin_lock_irqsave(&udc->lock, flags);
1772	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1773		udc->gadget.speed = USB_SPEED_UNKNOWN;
1774		udc->ep0state = EP0_REQUEUE;
1775		bcm63xx_fifo_setup(udc);
1776		bcm63xx_fifo_reset(udc);
1777		bcm63xx_ep_setup(udc);
1778
1779		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1780		for (i = 0; i < BCM63XX_NUM_EP; i++)
1781			bcm63xx_set_stall(udc, &udc->bep[i], false);
1782
1783		bcm63xx_set_ctrl_irqs(udc, true);
1784		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1785		rc = 0;
1786	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1787		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1788
1789		udc->ep0_req_shutdown = 1;
1790		spin_unlock_irqrestore(&udc->lock, flags);
1791
1792		while (1) {
1793			schedule_work(&udc->ep0_wq);
1794			if (udc->ep0state == EP0_SHUTDOWN)
1795				break;
1796			msleep(50);
1797		}
1798		bcm63xx_set_ctrl_irqs(udc, false);
1799		cancel_work_sync(&udc->ep0_wq);
1800		return 0;
1801	}
1802
1803	spin_unlock_irqrestore(&udc->lock, flags);
1804	return rc;
1805}
1806
1807/**
1808 * bcm63xx_udc_start - Start the controller.
1809 * @gadget: USB device.
1810 * @driver: Driver for USB device.
1811 */
1812static int bcm63xx_udc_start(struct usb_gadget *gadget,
1813		struct usb_gadget_driver *driver)
1814{
1815	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1816	unsigned long flags;
1817
1818	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1819	    !driver->setup)
1820		return -EINVAL;
1821	if (!udc)
1822		return -ENODEV;
1823	if (udc->driver)
1824		return -EBUSY;
1825
1826	spin_lock_irqsave(&udc->lock, flags);
1827
1828	set_clocks(udc, true);
1829	bcm63xx_fifo_setup(udc);
1830	bcm63xx_ep_init(udc);
1831	bcm63xx_ep_setup(udc);
1832	bcm63xx_fifo_reset(udc);
1833	bcm63xx_select_phy_mode(udc, true);
1834
1835	udc->driver = driver;
1836	driver->driver.bus = NULL;
1837	udc->gadget.dev.of_node = udc->dev->of_node;
1838
1839	spin_unlock_irqrestore(&udc->lock, flags);
1840
1841	return 0;
1842}
1843
1844/**
1845 * bcm63xx_udc_stop - Shut down the controller.
1846 * @gadget: USB device.
1847 * @driver: Driver for USB device.
1848 */
1849static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1850{
1851	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1852	unsigned long flags;
1853
1854	spin_lock_irqsave(&udc->lock, flags);
1855
1856	udc->driver = NULL;
1857
1858	/*
1859	 * If we switch the PHY too abruptly after dropping D+, the host
1860	 * will often complain:
1861	 *
1862	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1863	 */
1864	msleep(100);
1865
1866	bcm63xx_select_phy_mode(udc, false);
1867	set_clocks(udc, false);
1868
1869	spin_unlock_irqrestore(&udc->lock, flags);
1870
1871	return 0;
1872}
1873
1874static const struct usb_gadget_ops bcm63xx_udc_ops = {
1875	.get_frame	= bcm63xx_udc_get_frame,
1876	.pullup		= bcm63xx_udc_pullup,
1877	.udc_start	= bcm63xx_udc_start,
1878	.udc_stop	= bcm63xx_udc_stop,
1879};
1880
1881/***********************************************************************
1882 * IRQ handling
1883 ***********************************************************************/
1884
1885/**
1886 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1887 * @udc: Reference to the device controller.
1888 *
1889 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1890 * The driver never sees the raw control packets coming in on the ep0
1891 * IUDMA channel, but at least we get an interrupt event to tell us that
1892 * new values are waiting in the USBD_STATUS register.
1893 */
1894static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1895{
1896	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1897
1898	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1899	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1900	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1901			 USBD_STATUS_ALTINTF_SHIFT;
1902	bcm63xx_ep_setup(udc);
1903}
1904
1905/**
1906 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1907 * @udc: Reference to the device controller.
1908 *
1909 * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1910 * speed has changed, so that the caller can update the endpoint settings.
1911 */
1912static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1913{
1914	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1915	enum usb_device_speed oldspeed = udc->gadget.speed;
1916
1917	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1918	case BCM63XX_SPD_HIGH:
1919		udc->gadget.speed = USB_SPEED_HIGH;
1920		break;
1921	case BCM63XX_SPD_FULL:
1922		udc->gadget.speed = USB_SPEED_FULL;
1923		break;
1924	default:
1925		/* this should never happen */
1926		udc->gadget.speed = USB_SPEED_UNKNOWN;
1927		dev_err(udc->dev,
1928			"received SETUP packet with invalid link speed\n");
1929		return 0;
1930	}
1931
1932	if (udc->gadget.speed != oldspeed) {
1933		dev_info(udc->dev, "link up, %s-speed mode\n",
1934			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1935		return 1;
1936	} else {
1937		return 0;
1938	}
1939}
1940
1941/**
1942 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1943 * @udc: Reference to the device controller.
1944 * @new_status: true to "refresh" wedge status; false to clear it.
1945 *
1946 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1947 * because the controller hardware is designed to automatically clear
1948 * stalls in response to a CLEAR_FEATURE request from the host.
1949 *
1950 * On a RESET interrupt, we do want to restore all wedged endpoints.
1951 */
1952static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1953{
1954	int i;
1955
1956	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1957		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1958		if (!new_status)
1959			clear_bit(i, &udc->wedgemap);
1960	}
1961}
1962
1963/**
1964 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1965 * @irq: IRQ number (unused).
1966 * @dev_id: Reference to the device controller.
1967 *
1968 * This is where we handle link (VBUS) down, USB reset, speed changes,
1969 * SET_CONFIGURATION, and SET_INTERFACE events.
1970 */
1971static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1972{
1973	struct bcm63xx_udc *udc = dev_id;
1974	u32 stat;
1975	bool disconnected = false, bus_reset = false;
1976
1977	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1978	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1979
1980	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1981
1982	spin_lock(&udc->lock);
1983	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1984		/* VBUS toggled */
1985
1986		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1987		      USBD_EVENTS_USB_LINK_MASK) &&
1988		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1989			dev_info(udc->dev, "link down\n");
1990
1991		udc->gadget.speed = USB_SPEED_UNKNOWN;
1992		disconnected = true;
1993	}
1994	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1995		bcm63xx_fifo_setup(udc);
1996		bcm63xx_fifo_reset(udc);
1997		bcm63xx_ep_setup(udc);
1998
1999		bcm63xx_update_wedge(udc, false);
2000
2001		udc->ep0_req_reset = 1;
2002		schedule_work(&udc->ep0_wq);
2003		bus_reset = true;
2004	}
2005	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2006		if (bcm63xx_update_link_speed(udc)) {
2007			bcm63xx_fifo_setup(udc);
2008			bcm63xx_ep_setup(udc);
2009		}
2010		bcm63xx_update_wedge(udc, true);
2011	}
2012	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2013		bcm63xx_update_cfg_iface(udc);
2014		udc->ep0_req_set_cfg = 1;
2015		schedule_work(&udc->ep0_wq);
2016	}
2017	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2018		bcm63xx_update_cfg_iface(udc);
2019		udc->ep0_req_set_iface = 1;
2020		schedule_work(&udc->ep0_wq);
2021	}
2022	spin_unlock(&udc->lock);
2023
2024	if (disconnected && udc->driver)
2025		udc->driver->disconnect(&udc->gadget);
2026	else if (bus_reset && udc->driver)
2027		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2028
2029	return IRQ_HANDLED;
2030}
2031
2032/**
2033 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2034 * @irq: IRQ number (unused).
2035 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2036 *
2037 * For the two ep0 channels, we have special handling that triggers the
2038 * ep0 worker thread.  For normal bulk/intr channels, either queue up
2039 * the next buffer descriptor for the transaction (incomplete transaction),
2040 * or invoke the completion callback (complete transactions).
2041 */
2042static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2043{
2044	struct iudma_ch *iudma = dev_id;
2045	struct bcm63xx_udc *udc = iudma->udc;
2046	struct bcm63xx_ep *bep;
2047	struct usb_request *req = NULL;
2048	struct bcm63xx_req *breq = NULL;
2049	int rc;
2050	bool is_done = false;
2051
2052	spin_lock(&udc->lock);
2053
2054	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2055			ENETDMAC_IR_REG, iudma->ch_idx);
2056	bep = iudma->bep;
2057	rc = iudma_read(udc, iudma);
2058
2059	/* special handling for EP0 RX (0) and TX (1) */
2060	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2061	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2062		req = udc->ep0_request;
2063		breq = our_req(req);
2064
2065		/* a single request could require multiple submissions */
2066		if (rc >= 0) {
2067			req->actual += rc;
2068
2069			if (req->actual >= req->length || breq->bd_bytes > rc) {
2070				udc->ep0_req_completed = 1;
2071				is_done = true;
2072				schedule_work(&udc->ep0_wq);
2073
2074				/* "actual" on a ZLP is 1 byte */
2075				req->actual = min(req->actual, req->length);
2076			} else {
2077				/* queue up the next BD (same request) */
2078				iudma_write(udc, iudma, breq);
2079			}
2080		}
2081	} else if (!list_empty(&bep->queue)) {
2082		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2083		req = &breq->req;
2084
2085		if (rc >= 0) {
2086			req->actual += rc;
2087
2088			if (req->actual >= req->length || breq->bd_bytes > rc) {
2089				is_done = true;
2090				list_del(&breq->queue);
2091
2092				req->actual = min(req->actual, req->length);
2093
2094				if (!list_empty(&bep->queue)) {
2095					struct bcm63xx_req *next;
2096
2097					next = list_first_entry(&bep->queue,
2098						struct bcm63xx_req, queue);
2099					iudma_write(udc, iudma, next);
2100				}
2101			} else {
2102				iudma_write(udc, iudma, breq);
2103			}
2104		}
2105	}
2106	spin_unlock(&udc->lock);
2107
2108	if (is_done) {
2109		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2110		if (req->complete)
2111			req->complete(&bep->ep, req);
2112	}
2113
2114	return IRQ_HANDLED;
2115}
2116
2117/***********************************************************************
2118 * Debug filesystem
2119 ***********************************************************************/
2120
2121/*
2122 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2123 * @s: seq_file to which the information will be written.
2124 * @p: Unused.
2125 *
2126 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2127 */
2128static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2129{
2130	struct bcm63xx_udc *udc = s->private;
2131
2132	if (!udc->driver)
2133		return -ENODEV;
2134
2135	seq_printf(s, "ep0 state: %s\n",
2136		   bcm63xx_ep0_state_names[udc->ep0state]);
2137	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2138		   udc->ep0_req_reset ? "reset " : "",
2139		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2140		   udc->ep0_req_set_iface ? "set_iface " : "",
2141		   udc->ep0_req_shutdown ? "shutdown " : "",
2142		   udc->ep0_request ? "pending " : "",
2143		   udc->ep0_req_completed ? "completed " : "",
2144		   udc->ep0_reply ? "reply " : "");
2145	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2146		   udc->cfg, udc->iface, udc->alt_iface);
2147	seq_printf(s, "regs:\n");
2148	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2149		   usbd_readl(udc, USBD_CONTROL_REG),
2150		   usbd_readl(udc, USBD_STRAPS_REG),
2151		   usbd_readl(udc, USBD_STATUS_REG));
2152	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2153		   usbd_readl(udc, USBD_EVENTS_REG),
2154		   usbd_readl(udc, USBD_STALL_REG));
2155
2156	return 0;
2157}
2158DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2159
2160/*
2161 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2162 * @s: seq_file to which the information will be written.
2163 * @p: Unused.
2164 *
2165 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2166 */
2167static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2168{
2169	struct bcm63xx_udc *udc = s->private;
2170	int ch_idx, i;
2171	u32 sram2, sram3;
2172
2173	if (!udc->driver)
2174		return -ENODEV;
2175
2176	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2177		struct iudma_ch *iudma = &udc->iudma[ch_idx];
2178		struct list_head *pos;
2179
2180		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2181		switch (iudma_defaults[ch_idx].ep_type) {
2182		case BCMEP_CTRL:
2183			seq_printf(s, "control");
2184			break;
2185		case BCMEP_BULK:
2186			seq_printf(s, "bulk");
2187			break;
2188		case BCMEP_INTR:
2189			seq_printf(s, "interrupt");
2190			break;
2191		}
2192		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2193		seq_printf(s, " [ep%d]:\n",
2194			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2195		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2196			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2197			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2198			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2199			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2200
2201		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2202		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2203		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2204			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2205			   sram2 >> 16, sram2 & 0xffff,
2206			   sram3 >> 16, sram3 & 0xffff,
2207			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2208		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2209			   iudma->n_bds);
2210
2211		if (iudma->bep) {
2212			i = 0;
2213			list_for_each(pos, &iudma->bep->queue)
2214				i++;
2215			seq_printf(s, "; %d queued\n", i);
2216		} else {
2217			seq_printf(s, "\n");
2218		}
2219
2220		for (i = 0; i < iudma->n_bds; i++) {
2221			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2222
2223			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2224				   i * sizeof(*d), i,
2225				   d->len_stat >> 16, d->len_stat & 0xffff,
2226				   d->address);
2227			if (d == iudma->read_bd)
2228				seq_printf(s, "   <<RD");
2229			if (d == iudma->write_bd)
2230				seq_printf(s, "   <<WR");
2231			seq_printf(s, "\n");
2232		}
2233
2234		seq_printf(s, "\n");
2235	}
2236
2237	return 0;
2238}
2239DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2240
2241/**
2242 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2243 * @udc: Reference to the device controller.
2244 */
2245static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2246{
2247	struct dentry *root;
2248
2249	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2250		return;
2251
2252	root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2253	udc->debugfs_root = root;
2254
2255	debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2256	debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2257}
2258
2259/**
2260 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2261 * @udc: Reference to the device controller.
2262 *
2263 * debugfs_remove() is safe to call with a NULL argument.
2264 */
2265static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2266{
2267	debugfs_remove_recursive(udc->debugfs_root);
2268}
2269
2270/***********************************************************************
2271 * Driver init/exit
2272 ***********************************************************************/
2273
2274/**
2275 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2276 * @pdev: Platform device struct from the bcm63xx BSP code.
2277 *
2278 * Note that platform data is required, because pd.port_no varies from chip
2279 * to chip and is used to switch the correct USB port to device mode.
2280 */
2281static int bcm63xx_udc_probe(struct platform_device *pdev)
2282{
2283	struct device *dev = &pdev->dev;
2284	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2285	struct bcm63xx_udc *udc;
2286	int rc = -ENOMEM, i, irq;
2287
2288	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2289	if (!udc)
2290		return -ENOMEM;
2291
2292	platform_set_drvdata(pdev, udc);
2293	udc->dev = dev;
2294	udc->pd = pd;
2295
2296	if (!pd) {
2297		dev_err(dev, "missing platform data\n");
2298		return -EINVAL;
2299	}
2300
2301	udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
2302	if (IS_ERR(udc->usbd_regs))
2303		return PTR_ERR(udc->usbd_regs);
2304
2305	udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
2306	if (IS_ERR(udc->iudma_regs))
2307		return PTR_ERR(udc->iudma_regs);
2308
2309	spin_lock_init(&udc->lock);
2310	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2311
2312	udc->gadget.ops = &bcm63xx_udc_ops;
2313	udc->gadget.name = dev_name(dev);
2314
2315	if (!pd->use_fullspeed && !use_fullspeed)
2316		udc->gadget.max_speed = USB_SPEED_HIGH;
2317	else
2318		udc->gadget.max_speed = USB_SPEED_FULL;
2319
2320	/* request clocks, allocate buffers, and clear any pending IRQs */
2321	rc = bcm63xx_init_udc_hw(udc);
2322	if (rc)
2323		return rc;
2324
2325	rc = -ENXIO;
2326
2327	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2328	irq = platform_get_irq(pdev, 0);
2329	if (irq < 0)
2330		goto out_uninit;
2331	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2332			     dev_name(dev), udc) < 0)
2333		goto report_request_failure;
2334
2335	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2336	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2337		irq = platform_get_irq(pdev, i + 1);
2338		if (irq < 0)
2339			goto out_uninit;
2340		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2341				     dev_name(dev), &udc->iudma[i]) < 0)
2342			goto report_request_failure;
2343	}
2344
2345	bcm63xx_udc_init_debugfs(udc);
2346	rc = usb_add_gadget_udc(dev, &udc->gadget);
2347	if (!rc)
2348		return 0;
2349
2350	bcm63xx_udc_cleanup_debugfs(udc);
2351out_uninit:
2352	bcm63xx_uninit_udc_hw(udc);
2353	return rc;
2354
2355report_request_failure:
2356	dev_err(dev, "error requesting IRQ #%d\n", irq);
2357	goto out_uninit;
2358}
2359
2360/**
2361 * bcm63xx_udc_remove - Remove the device from the system.
2362 * @pdev: Platform device struct from the bcm63xx BSP code.
2363 */
2364static int bcm63xx_udc_remove(struct platform_device *pdev)
2365{
2366	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2367
2368	bcm63xx_udc_cleanup_debugfs(udc);
2369	usb_del_gadget_udc(&udc->gadget);
2370	BUG_ON(udc->driver);
2371
2372	bcm63xx_uninit_udc_hw(udc);
2373
2374	return 0;
2375}
2376
2377static struct platform_driver bcm63xx_udc_driver = {
2378	.probe		= bcm63xx_udc_probe,
2379	.remove		= bcm63xx_udc_remove,
2380	.driver		= {
2381		.name	= DRV_MODULE_NAME,
2382	},
2383};
2384module_platform_driver(bcm63xx_udc_driver);
2385
2386MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2387MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2388MODULE_LICENSE("GPL");
2389MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2390