1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4 *
5 * ep0.c - Endpoint 0 handling
6 *
7 * Copyright 2017 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/delay.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/list.h>
23#include <linux/interrupt.h>
24#include <linux/proc_fs.h>
25#include <linux/prefetch.h>
26#include <linux/clk.h>
27#include <linux/usb/gadget.h>
28#include <linux/of.h>
29#include <linux/of_gpio.h>
30#include <linux/regmap.h>
31#include <linux/dma-mapping.h>
32
33#include "vhub.h"
34
35int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
36{
37	struct usb_request *req = &ep->ep0.req.req;
38	int rc;
39
40	if (WARN_ON(ep->d_idx != 0))
41		return std_req_stall;
42	if (WARN_ON(!ep->ep0.dir_in))
43		return std_req_stall;
44	if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
45		return std_req_stall;
46	if (WARN_ON(req->status == -EINPROGRESS))
47		return std_req_stall;
48
49	req->buf = ptr;
50	req->length = len;
51	req->complete = NULL;
52	req->zero = true;
53
54	/*
55	 * Call internal queue directly after dropping the lock. This is
56	 * safe to do as the reply is always the last thing done when
57	 * processing a SETUP packet, usually as a tail call
58	 */
59	spin_unlock(&ep->vhub->lock);
60	if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
61		rc = std_req_stall;
62	else
63		rc = std_req_data;
64	spin_lock(&ep->vhub->lock);
65	return rc;
66}
67
68int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
69{
70	u8 *buffer = ep->buf;
71	unsigned int i;
72	va_list args;
73
74	va_start(args, len);
75
76	/* Copy data directly into EP buffer */
77	for (i = 0; i < len; i++)
78		buffer[i] = va_arg(args, int);
79	va_end(args);
80
81	/* req->buf NULL means data is already there */
82	return ast_vhub_reply(ep, NULL, len);
83}
84
85void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
86{
87	struct usb_ctrlrequest crq;
88	enum std_req_rc std_req_rc;
89	int rc = -ENODEV;
90
91	if (WARN_ON(ep->d_idx != 0))
92		return;
93
94	/*
95	 * Grab the setup packet from the chip and byteswap
96	 * interesting fields
97	 */
98	memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
99
100	EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
101	      crq.bRequestType, crq.bRequest,
102	       le16_to_cpu(crq.wValue),
103	       le16_to_cpu(crq.wIndex),
104	       le16_to_cpu(crq.wLength),
105	       (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
106	       ep->ep0.state);
107
108	/*
109	 * Check our state, cancel pending requests if needed
110	 *
111	 * Note: Under some circumstances, we can get a new setup
112	 * packet while waiting for the stall ack, just accept it.
113	 *
114	 * In any case, a SETUP packet in wrong state should have
115	 * reset the HW state machine, so let's just log, nuke
116	 * requests, move on.
117	 */
118	if (ep->ep0.state != ep0_state_token &&
119	    ep->ep0.state != ep0_state_stall) {
120		EPDBG(ep, "wrong state\n");
121		ast_vhub_nuke(ep, -EIO);
122	}
123
124	/* Calculate next state for EP0 */
125	ep->ep0.state = ep0_state_data;
126	ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
127
128	/* If this is the vHub, we handle requests differently */
129	std_req_rc = std_req_driver;
130	if (ep->dev == NULL) {
131		if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
132			std_req_rc = ast_vhub_std_hub_request(ep, &crq);
133		else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
134			std_req_rc = ast_vhub_class_hub_request(ep, &crq);
135		else
136			std_req_rc = std_req_stall;
137	} else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
138		std_req_rc = ast_vhub_std_dev_request(ep, &crq);
139
140	/* Act upon result */
141	switch(std_req_rc) {
142	case std_req_complete:
143		goto complete;
144	case std_req_stall:
145		goto stall;
146	case std_req_driver:
147		break;
148	case std_req_data:
149		return;
150	}
151
152	/* Pass request up to the gadget driver */
153	if (WARN_ON(!ep->dev))
154		goto stall;
155	if (ep->dev->driver) {
156		EPDBG(ep, "forwarding to gadget...\n");
157		spin_unlock(&ep->vhub->lock);
158		rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
159		spin_lock(&ep->vhub->lock);
160		EPDBG(ep, "driver returned %d\n", rc);
161	} else {
162		EPDBG(ep, "no gadget for request !\n");
163	}
164	if (rc >= 0)
165		return;
166
167 stall:
168	EPDBG(ep, "stalling\n");
169	writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
170	ep->ep0.state = ep0_state_stall;
171	ep->ep0.dir_in = false;
172	return;
173
174 complete:
175	EPVDBG(ep, "sending [in] status with no data\n");
176	writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
177	ep->ep0.state = ep0_state_status;
178	ep->ep0.dir_in = false;
179}
180
181
182static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
183				 struct ast_vhub_req *req)
184{
185	unsigned int chunk;
186	u32 reg;
187
188	/* If this is a 0-length request, it's the gadget trying to
189	 * send a status on our behalf. We take it from here.
190	 */
191	if (req->req.length == 0)
192		req->last_desc = 1;
193
194	/* Are we done ? Complete request, otherwise wait for next interrupt */
195	if (req->last_desc >= 0) {
196		EPVDBG(ep, "complete send %d/%d\n",
197		       req->req.actual, req->req.length);
198		ep->ep0.state = ep0_state_status;
199		writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
200		ast_vhub_done(ep, req, 0);
201		return;
202	}
203
204	/*
205	 * Next chunk cropped to max packet size. Also check if this
206	 * is the last packet
207	 */
208	chunk = req->req.length - req->req.actual;
209	if (chunk > ep->ep.maxpacket)
210		chunk = ep->ep.maxpacket;
211	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
212		req->last_desc = 1;
213
214	EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
215	       chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
216
217	/*
218	 * Copy data if any (internal requests already have data
219	 * in the EP buffer)
220	 */
221	if (chunk && req->req.buf)
222		memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
223
224	vhub_dma_workaround(ep->buf);
225
226	/* Remember chunk size and trigger send */
227	reg = VHUB_EP0_SET_TX_LEN(chunk);
228	writel(reg, ep->ep0.ctlstat);
229	writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
230	req->req.actual += chunk;
231}
232
233static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
234{
235	EPVDBG(ep, "rx prime\n");
236
237	/* Prime endpoint for receiving data */
238	writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
239}
240
241static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
242				    unsigned int len)
243{
244	unsigned int remain;
245	int rc = 0;
246
247	/* We are receiving... grab request */
248	remain = req->req.length - req->req.actual;
249
250	EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
251
252	/* Are we getting more than asked ? */
253	if (len > remain) {
254		EPDBG(ep, "receiving too much (ovf: %d) !\n",
255		      len - remain);
256		len = remain;
257		rc = -EOVERFLOW;
258	}
259	if (len && req->req.buf)
260		memcpy(req->req.buf + req->req.actual, ep->buf, len);
261	req->req.actual += len;
262
263	/* Done ? */
264	if (len < ep->ep.maxpacket || len == remain) {
265		ep->ep0.state = ep0_state_status;
266		writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
267		ast_vhub_done(ep, req, rc);
268	} else
269		ast_vhub_ep0_rx_prime(ep);
270}
271
272void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
273{
274	struct ast_vhub_req *req;
275	struct ast_vhub *vhub = ep->vhub;
276	struct device *dev = &vhub->pdev->dev;
277	bool stall = false;
278	u32 stat;
279
280	/* Read EP0 status */
281	stat = readl(ep->ep0.ctlstat);
282
283	/* Grab current request if any */
284	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
285
286	EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
287		stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
288
289	switch(ep->ep0.state) {
290	case ep0_state_token:
291		/* There should be no request queued in that state... */
292		if (req) {
293			dev_warn(dev, "request present while in TOKEN state\n");
294			ast_vhub_nuke(ep, -EINVAL);
295		}
296		dev_warn(dev, "ack while in TOKEN state\n");
297		stall = true;
298		break;
299	case ep0_state_data:
300		/* Check the state bits corresponding to our direction */
301		if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
302		    (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
303		    (ep->ep0.dir_in != in_ack)) {
304			/* In that case, ignore interrupt */
305			dev_warn(dev, "irq state mismatch");
306			break;
307		}
308		/*
309		 * We are in data phase and there's no request, something is
310		 * wrong, stall
311		 */
312		if (!req) {
313			dev_warn(dev, "data phase, no request\n");
314			stall = true;
315			break;
316		}
317
318		/* We have a request, handle data transfers */
319		if (ep->ep0.dir_in)
320			ast_vhub_ep0_do_send(ep, req);
321		else
322			ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
323		return;
324	case ep0_state_status:
325		/* Nuke stale requests */
326		if (req) {
327			dev_warn(dev, "request present while in STATUS state\n");
328			ast_vhub_nuke(ep, -EINVAL);
329		}
330
331		/*
332		 * If the status phase completes with the wrong ack, stall
333		 * the endpoint just in case, to abort whatever the host
334		 * was doing.
335		 */
336		if (ep->ep0.dir_in == in_ack) {
337			dev_warn(dev, "status direction mismatch\n");
338			stall = true;
339		}
340		break;
341	case ep0_state_stall:
342		/*
343		 * There shouldn't be any request left, but nuke just in case
344		 * otherwise the stale request will block subsequent ones
345		 */
346		ast_vhub_nuke(ep, -EIO);
347		break;
348	}
349
350	/* Reset to token state or stall */
351	if (stall) {
352		writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
353		ep->ep0.state = ep0_state_stall;
354	} else
355		ep->ep0.state = ep0_state_token;
356}
357
358static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
359			      gfp_t gfp_flags)
360{
361	struct ast_vhub_req *req = to_ast_req(u_req);
362	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
363	struct ast_vhub *vhub = ep->vhub;
364	struct device *dev = &vhub->pdev->dev;
365	unsigned long flags;
366
367	/* Paranoid cheks */
368	if (!u_req || (!u_req->complete && !req->internal)) {
369		dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
370		if (u_req) {
371			dev_warn(dev, "complete=%p internal=%d\n",
372				 u_req->complete, req->internal);
373		}
374		return -EINVAL;
375	}
376
377	/* Not endpoint 0 ? */
378	if (WARN_ON(ep->d_idx != 0))
379		return -EINVAL;
380
381	/* Disabled device */
382	if (ep->dev && !ep->dev->enabled)
383		return -ESHUTDOWN;
384
385	/* Data, no buffer and not internal ? */
386	if (u_req->length && !u_req->buf && !req->internal) {
387		dev_warn(dev, "Request with no buffer !\n");
388		return -EINVAL;
389	}
390
391	EPVDBG(ep, "enqueue req @%p\n", req);
392	EPVDBG(ep, "  l=%d zero=%d noshort=%d is_in=%d\n",
393	       u_req->length, u_req->zero,
394	       u_req->short_not_ok, ep->ep0.dir_in);
395
396	/* Initialize request progress fields */
397	u_req->status = -EINPROGRESS;
398	u_req->actual = 0;
399	req->last_desc = -1;
400	req->active = false;
401
402	spin_lock_irqsave(&vhub->lock, flags);
403
404	/* EP0 can only support a single request at a time */
405	if (!list_empty(&ep->queue) ||
406	    ep->ep0.state == ep0_state_token ||
407	    ep->ep0.state == ep0_state_stall) {
408		dev_warn(dev, "EP0: Request in wrong state\n");
409	        EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
410		       list_empty(&ep->queue), ep->ep0.state);
411		spin_unlock_irqrestore(&vhub->lock, flags);
412		return -EBUSY;
413	}
414
415	/* Add request to list and kick processing if empty */
416	list_add_tail(&req->queue, &ep->queue);
417
418	if (ep->ep0.dir_in) {
419		/* IN request, send data */
420		ast_vhub_ep0_do_send(ep, req);
421	} else if (u_req->length == 0) {
422		/* 0-len request, send completion as rx */
423		EPVDBG(ep, "0-length rx completion\n");
424		ep->ep0.state = ep0_state_status;
425		writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
426		ast_vhub_done(ep, req, 0);
427	} else {
428		/* OUT request, start receiver */
429		ast_vhub_ep0_rx_prime(ep);
430	}
431
432	spin_unlock_irqrestore(&vhub->lock, flags);
433
434	return 0;
435}
436
437static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
438{
439	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
440	struct ast_vhub *vhub = ep->vhub;
441	struct ast_vhub_req *req;
442	unsigned long flags;
443	int rc = -EINVAL;
444
445	spin_lock_irqsave(&vhub->lock, flags);
446
447	/* Only one request can be in the queue */
448	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
449
450	/* Is it ours ? */
451	if (req && u_req == &req->req) {
452		EPVDBG(ep, "dequeue req @%p\n", req);
453
454		/*
455		 * We don't have to deal with "active" as all
456		 * DMAs go to the EP buffers, not the request.
457		 */
458		ast_vhub_done(ep, req, -ECONNRESET);
459
460		/* We do stall the EP to clean things up in HW */
461		writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
462		ep->ep0.state = ep0_state_status;
463		ep->ep0.dir_in = false;
464		rc = 0;
465	}
466	spin_unlock_irqrestore(&vhub->lock, flags);
467	return rc;
468}
469
470
471static const struct usb_ep_ops ast_vhub_ep0_ops = {
472	.queue		= ast_vhub_ep0_queue,
473	.dequeue	= ast_vhub_ep0_dequeue,
474	.alloc_request	= ast_vhub_alloc_request,
475	.free_request	= ast_vhub_free_request,
476};
477
478void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
479{
480	struct ast_vhub_ep *ep = &dev->ep0;
481
482	ast_vhub_nuke(ep, -EIO);
483	ep->ep0.state = ep0_state_token;
484}
485
486
487void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
488		       struct ast_vhub_dev *dev)
489{
490	memset(ep, 0, sizeof(*ep));
491
492	INIT_LIST_HEAD(&ep->ep.ep_list);
493	INIT_LIST_HEAD(&ep->queue);
494	ep->ep.ops = &ast_vhub_ep0_ops;
495	ep->ep.name = "ep0";
496	ep->ep.caps.type_control = true;
497	usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
498	ep->d_idx = 0;
499	ep->dev = dev;
500	ep->vhub = vhub;
501	ep->ep0.state = ep0_state_token;
502	INIT_LIST_HEAD(&ep->ep0.req.queue);
503	ep->ep0.req.internal = true;
504
505	/* Small difference between vHub and devices */
506	if (dev) {
507		ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
508		ep->ep0.setup = vhub->regs +
509			AST_VHUB_SETUP0 + 8 * (dev->index + 1);
510		ep->buf = vhub->ep0_bufs +
511			AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
512		ep->buf_dma = vhub->ep0_bufs_dma +
513			AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
514	} else {
515		ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
516		ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
517		ep->buf = vhub->ep0_bufs;
518		ep->buf_dma = vhub->ep0_bufs_dma;
519	}
520}
521