1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 */
5
6#include <linux/module.h>
7#include "mt76.h"
8#include "usb_trace.h"
9#include "dma.h"
10
11#define MT_VEND_REQ_MAX_RETRY	10
12#define MT_VEND_REQ_TOUT_MS	300
13
14static bool disable_usb_sg;
15module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
16MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
17
18static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
19				  u8 req_type, u16 val, u16 offset,
20				  void *buf, size_t len)
21{
22	struct usb_interface *uintf = to_usb_interface(dev->dev);
23	struct usb_device *udev = interface_to_usbdev(uintf);
24	unsigned int pipe;
25	int i, ret;
26
27	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
28
29	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
30				       : usb_sndctrlpipe(udev, 0);
31	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
32		if (test_bit(MT76_REMOVED, &dev->phy.state))
33			return -EIO;
34
35		ret = usb_control_msg(udev, pipe, req, req_type, val,
36				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
37		if (ret == -ENODEV)
38			set_bit(MT76_REMOVED, &dev->phy.state);
39		if (ret >= 0 || ret == -ENODEV)
40			return ret;
41		usleep_range(5000, 10000);
42	}
43
44	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
45		req, offset, ret);
46	return ret;
47}
48
49int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
50			 u8 req_type, u16 val, u16 offset,
51			 void *buf, size_t len)
52{
53	int ret;
54
55	mutex_lock(&dev->usb.usb_ctrl_mtx);
56	ret = __mt76u_vendor_request(dev, req, req_type,
57				     val, offset, buf, len);
58	trace_usb_reg_wr(dev, offset, val);
59	mutex_unlock(&dev->usb.usb_ctrl_mtx);
60
61	return ret;
62}
63EXPORT_SYMBOL_GPL(mt76u_vendor_request);
64
65static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
66{
67	struct mt76_usb *usb = &dev->usb;
68	u32 data = ~0;
69	int ret;
70
71	ret = __mt76u_vendor_request(dev, req,
72				     USB_DIR_IN | USB_TYPE_VENDOR,
73				     addr >> 16, addr, usb->data,
74				     sizeof(__le32));
75	if (ret == sizeof(__le32))
76		data = get_unaligned_le32(usb->data);
77	trace_usb_reg_rr(dev, addr, data);
78
79	return data;
80}
81
82static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
83{
84	u8 req;
85
86	switch (addr & MT_VEND_TYPE_MASK) {
87	case MT_VEND_TYPE_EEPROM:
88		req = MT_VEND_READ_EEPROM;
89		break;
90	case MT_VEND_TYPE_CFG:
91		req = MT_VEND_READ_CFG;
92		break;
93	default:
94		req = MT_VEND_MULTI_READ;
95		break;
96	}
97
98	return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
99}
100
101static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
102{
103	u32 ret;
104
105	mutex_lock(&dev->usb.usb_ctrl_mtx);
106	ret = __mt76u_rr(dev, addr);
107	mutex_unlock(&dev->usb.usb_ctrl_mtx);
108
109	return ret;
110}
111
112static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
113{
114	u32 ret;
115
116	mutex_lock(&dev->usb.usb_ctrl_mtx);
117	ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
118	mutex_unlock(&dev->usb.usb_ctrl_mtx);
119
120	return ret;
121}
122
123static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
124			u32 addr, u32 val)
125{
126	struct mt76_usb *usb = &dev->usb;
127
128	put_unaligned_le32(val, usb->data);
129	__mt76u_vendor_request(dev, req,
130			       USB_DIR_OUT | USB_TYPE_VENDOR,
131			       addr >> 16, addr, usb->data,
132			       sizeof(__le32));
133	trace_usb_reg_wr(dev, addr, val);
134}
135
136static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
137{
138	u8 req;
139
140	switch (addr & MT_VEND_TYPE_MASK) {
141	case MT_VEND_TYPE_CFG:
142		req = MT_VEND_WRITE_CFG;
143		break;
144	default:
145		req = MT_VEND_MULTI_WRITE;
146		break;
147	}
148	___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
149}
150
151static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
152{
153	mutex_lock(&dev->usb.usb_ctrl_mtx);
154	__mt76u_wr(dev, addr, val);
155	mutex_unlock(&dev->usb.usb_ctrl_mtx);
156}
157
158static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
159{
160	mutex_lock(&dev->usb.usb_ctrl_mtx);
161	___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
162	mutex_unlock(&dev->usb.usb_ctrl_mtx);
163}
164
165static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
166		     u32 mask, u32 val)
167{
168	mutex_lock(&dev->usb.usb_ctrl_mtx);
169	val |= __mt76u_rr(dev, addr) & ~mask;
170	__mt76u_wr(dev, addr, val);
171	mutex_unlock(&dev->usb.usb_ctrl_mtx);
172
173	return val;
174}
175
176static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
177			 u32 mask, u32 val)
178{
179	mutex_lock(&dev->usb.usb_ctrl_mtx);
180	val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
181	___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
182	mutex_unlock(&dev->usb.usb_ctrl_mtx);
183
184	return val;
185}
186
187static void mt76u_copy(struct mt76_dev *dev, u32 offset,
188		       const void *data, int len)
189{
190	struct mt76_usb *usb = &dev->usb;
191	const u8 *val = data;
192	int ret;
193	int current_batch_size;
194	int i = 0;
195
196	/* Assure that always a multiple of 4 bytes are copied,
197	 * otherwise beacons can be corrupted.
198	 * See: "mt76: round up length on mt76_wr_copy"
199	 * Commit 850e8f6fbd5d0003b0
200	 */
201	len = round_up(len, 4);
202
203	mutex_lock(&usb->usb_ctrl_mtx);
204	while (i < len) {
205		current_batch_size = min_t(int, usb->data_len, len - i);
206		memcpy(usb->data, val + i, current_batch_size);
207		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
208					     USB_DIR_OUT | USB_TYPE_VENDOR,
209					     0, offset + i, usb->data,
210					     current_batch_size);
211		if (ret < 0)
212			break;
213
214		i += current_batch_size;
215	}
216	mutex_unlock(&usb->usb_ctrl_mtx);
217}
218
219static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
220			   const void *data, int len)
221{
222	struct mt76_usb *usb = &dev->usb;
223	int ret, i = 0, batch_len;
224	const u8 *val = data;
225
226	len = round_up(len, 4);
227	mutex_lock(&usb->usb_ctrl_mtx);
228	while (i < len) {
229		batch_len = min_t(int, usb->data_len, len - i);
230		memcpy(usb->data, val + i, batch_len);
231		ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
232					     USB_DIR_OUT | USB_TYPE_VENDOR,
233					     (offset + i) >> 16, offset + i,
234					     usb->data, batch_len);
235		if (ret < 0)
236			break;
237
238		i += batch_len;
239	}
240	mutex_unlock(&usb->usb_ctrl_mtx);
241}
242
243static void
244mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
245		    void *data, int len)
246{
247	struct mt76_usb *usb = &dev->usb;
248	int i = 0, batch_len, ret;
249	u8 *val = data;
250
251	len = round_up(len, 4);
252	mutex_lock(&usb->usb_ctrl_mtx);
253	while (i < len) {
254		batch_len = min_t(int, usb->data_len, len - i);
255		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
256					     USB_DIR_IN | USB_TYPE_VENDOR,
257					     (offset + i) >> 16, offset + i,
258					     usb->data, batch_len);
259		if (ret < 0)
260			break;
261
262		memcpy(val + i, usb->data, batch_len);
263		i += batch_len;
264	}
265	mutex_unlock(&usb->usb_ctrl_mtx);
266}
267
268void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
269		     const u16 offset, const u32 val)
270{
271	mutex_lock(&dev->usb.usb_ctrl_mtx);
272	__mt76u_vendor_request(dev, req,
273			       USB_DIR_OUT | USB_TYPE_VENDOR,
274			       val & 0xffff, offset, NULL, 0);
275	__mt76u_vendor_request(dev, req,
276			       USB_DIR_OUT | USB_TYPE_VENDOR,
277			       val >> 16, offset + 2, NULL, 0);
278	mutex_unlock(&dev->usb.usb_ctrl_mtx);
279}
280EXPORT_SYMBOL_GPL(mt76u_single_wr);
281
282static int
283mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
284		const struct mt76_reg_pair *data, int len)
285{
286	struct mt76_usb *usb = &dev->usb;
287
288	mutex_lock(&usb->usb_ctrl_mtx);
289	while (len > 0) {
290		__mt76u_wr(dev, base + data->reg, data->value);
291		len--;
292		data++;
293	}
294	mutex_unlock(&usb->usb_ctrl_mtx);
295
296	return 0;
297}
298
299static int
300mt76u_wr_rp(struct mt76_dev *dev, u32 base,
301	    const struct mt76_reg_pair *data, int n)
302{
303	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
304		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
305	else
306		return mt76u_req_wr_rp(dev, base, data, n);
307}
308
309static int
310mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
311		int len)
312{
313	struct mt76_usb *usb = &dev->usb;
314
315	mutex_lock(&usb->usb_ctrl_mtx);
316	while (len > 0) {
317		data->value = __mt76u_rr(dev, base + data->reg);
318		len--;
319		data++;
320	}
321	mutex_unlock(&usb->usb_ctrl_mtx);
322
323	return 0;
324}
325
326static int
327mt76u_rd_rp(struct mt76_dev *dev, u32 base,
328	    struct mt76_reg_pair *data, int n)
329{
330	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
331		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
332	else
333		return mt76u_req_rd_rp(dev, base, data, n);
334}
335
336static bool mt76u_check_sg(struct mt76_dev *dev)
337{
338	struct usb_interface *uintf = to_usb_interface(dev->dev);
339	struct usb_device *udev = interface_to_usbdev(uintf);
340
341	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
342		(udev->bus->no_sg_constraint ||
343		 udev->speed == USB_SPEED_WIRELESS));
344}
345
346static int
347mt76u_set_endpoints(struct usb_interface *intf,
348		    struct mt76_usb *usb)
349{
350	struct usb_host_interface *intf_desc = intf->cur_altsetting;
351	struct usb_endpoint_descriptor *ep_desc;
352	int i, in_ep = 0, out_ep = 0;
353
354	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
355		ep_desc = &intf_desc->endpoint[i].desc;
356
357		if (usb_endpoint_is_bulk_in(ep_desc) &&
358		    in_ep < __MT_EP_IN_MAX) {
359			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
360			in_ep++;
361		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
362			   out_ep < __MT_EP_OUT_MAX) {
363			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
364			out_ep++;
365		}
366	}
367
368	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
369		return -EINVAL;
370	return 0;
371}
372
373static int
374mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
375		 int nsgs, gfp_t gfp)
376{
377	int i;
378
379	for (i = 0; i < nsgs; i++) {
380		struct page *page;
381		void *data;
382		int offset;
383
384		data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
385		if (!data)
386			break;
387
388		page = virt_to_head_page(data);
389		offset = data - page_address(page);
390		sg_set_page(&urb->sg[i], page, q->buf_size, offset);
391	}
392
393	if (i < nsgs) {
394		int j;
395
396		for (j = nsgs; j < urb->num_sgs; j++)
397			skb_free_frag(sg_virt(&urb->sg[j]));
398		urb->num_sgs = i;
399	}
400
401	urb->num_sgs = max_t(int, i, urb->num_sgs);
402	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
403	sg_init_marker(urb->sg, urb->num_sgs);
404
405	return i ? : -ENOMEM;
406}
407
408static int
409mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
410		struct urb *urb, int nsgs, gfp_t gfp)
411{
412	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
413
414	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
415		return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
416
417	urb->transfer_buffer_length = q->buf_size;
418	urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
419
420	return urb->transfer_buffer ? 0 : -ENOMEM;
421}
422
423static int
424mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
425		int sg_max_size)
426{
427	unsigned int size = sizeof(struct urb);
428
429	if (dev->usb.sg_en)
430		size += sg_max_size * sizeof(struct scatterlist);
431
432	e->urb = kzalloc(size, GFP_KERNEL);
433	if (!e->urb)
434		return -ENOMEM;
435
436	usb_init_urb(e->urb);
437
438	if (dev->usb.sg_en && sg_max_size > 0)
439		e->urb->sg = (struct scatterlist *)(e->urb + 1);
440
441	return 0;
442}
443
444static int
445mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
446		   struct mt76_queue_entry *e)
447{
448	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
449	int err, sg_size;
450
451	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
452	err = mt76u_urb_alloc(dev, e, sg_size);
453	if (err)
454		return err;
455
456	return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
457}
458
459static void mt76u_urb_free(struct urb *urb)
460{
461	int i;
462
463	for (i = 0; i < urb->num_sgs; i++)
464		skb_free_frag(sg_virt(&urb->sg[i]));
465
466	if (urb->transfer_buffer)
467		skb_free_frag(urb->transfer_buffer);
468
469	usb_free_urb(urb);
470}
471
472static void
473mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
474		    struct urb *urb, usb_complete_t complete_fn,
475		    void *context)
476{
477	struct usb_interface *uintf = to_usb_interface(dev->dev);
478	struct usb_device *udev = interface_to_usbdev(uintf);
479	unsigned int pipe;
480
481	if (dir == USB_DIR_IN)
482		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
483	else
484		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
485
486	urb->dev = udev;
487	urb->pipe = pipe;
488	urb->complete = complete_fn;
489	urb->context = context;
490}
491
492static struct urb *
493mt76u_get_next_rx_entry(struct mt76_queue *q)
494{
495	struct urb *urb = NULL;
496	unsigned long flags;
497
498	spin_lock_irqsave(&q->lock, flags);
499	if (q->queued > 0) {
500		urb = q->entry[q->tail].urb;
501		q->tail = (q->tail + 1) % q->ndesc;
502		q->queued--;
503	}
504	spin_unlock_irqrestore(&q->lock, flags);
505
506	return urb;
507}
508
509static int
510mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
511		       u32 data_len)
512{
513	u16 dma_len, min_len;
514
515	dma_len = get_unaligned_le16(data);
516	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
517		return dma_len;
518
519	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
520	if (data_len < min_len || !dma_len ||
521	    dma_len + MT_DMA_HDR_LEN > data_len ||
522	    (dma_len & 0x3))
523		return -EINVAL;
524	return dma_len;
525}
526
527static struct sk_buff *
528mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
529		   int len, int buf_size)
530{
531	int head_room, drv_flags = dev->drv->drv_flags;
532	struct sk_buff *skb;
533
534	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
535	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
536		struct page *page;
537
538		/* slow path, not enough space for data and
539		 * skb_shared_info
540		 */
541		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
542		if (!skb)
543			return NULL;
544
545		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
546		data += head_room + MT_SKB_HEAD_LEN;
547		page = virt_to_head_page(data);
548		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
549				page, data - page_address(page),
550				len - MT_SKB_HEAD_LEN, buf_size);
551
552		return skb;
553	}
554
555	/* fast path */
556	skb = build_skb(data, buf_size);
557	if (!skb)
558		return NULL;
559
560	skb_reserve(skb, head_room);
561	__skb_put(skb, len);
562
563	return skb;
564}
565
566static int
567mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
568		       int buf_size)
569{
570	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
571	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
572	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
573	struct sk_buff *skb;
574
575	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
576		return 0;
577
578	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
579	if (len < 0)
580		return 0;
581
582	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
583	data_len = min_t(int, len, data_len - head_room);
584	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
585	if (!skb)
586		return 0;
587
588	len -= data_len;
589	while (len > 0 && nsgs < urb->num_sgs) {
590		data_len = min_t(int, len, urb->sg[nsgs].length);
591		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
592				sg_page(&urb->sg[nsgs]),
593				urb->sg[nsgs].offset, data_len,
594				buf_size);
595		len -= data_len;
596		nsgs++;
597	}
598	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
599
600	return nsgs;
601}
602
603static void mt76u_complete_rx(struct urb *urb)
604{
605	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
606	struct mt76_queue *q = urb->context;
607	unsigned long flags;
608
609	trace_rx_urb(dev, urb);
610
611	switch (urb->status) {
612	case -ECONNRESET:
613	case -ESHUTDOWN:
614	case -ENOENT:
615		return;
616	default:
617		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
618				    urb->status);
619		fallthrough;
620	case 0:
621		break;
622	}
623
624	spin_lock_irqsave(&q->lock, flags);
625	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
626		goto out;
627
628	q->head = (q->head + 1) % q->ndesc;
629	q->queued++;
630	tasklet_schedule(&dev->usb.rx_tasklet);
631out:
632	spin_unlock_irqrestore(&q->lock, flags);
633}
634
635static int
636mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
637		    struct urb *urb)
638{
639	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
640
641	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
642			    mt76u_complete_rx, &dev->q_rx[qid]);
643	trace_submit_urb(dev, urb);
644
645	return usb_submit_urb(urb, GFP_ATOMIC);
646}
647
648static void
649mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
650{
651	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
652	struct urb *urb;
653	int err, count;
654
655	while (true) {
656		urb = mt76u_get_next_rx_entry(q);
657		if (!urb)
658			break;
659
660		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
661		if (count > 0) {
662			err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
663			if (err < 0)
664				break;
665		}
666		mt76u_submit_rx_buf(dev, qid, urb);
667	}
668	if (qid == MT_RXQ_MAIN)
669		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
670}
671
672static void mt76u_rx_tasklet(unsigned long data)
673{
674	struct mt76_dev *dev = (struct mt76_dev *)data;
675	int i;
676
677	rcu_read_lock();
678	mt76_for_each_q_rx(dev, i)
679		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
680	rcu_read_unlock();
681}
682
683static int
684mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
685{
686	struct mt76_queue *q = &dev->q_rx[qid];
687	unsigned long flags;
688	int i, err = 0;
689
690	spin_lock_irqsave(&q->lock, flags);
691	for (i = 0; i < q->ndesc; i++) {
692		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
693		if (err < 0)
694			break;
695	}
696	q->head = q->tail = 0;
697	q->queued = 0;
698	spin_unlock_irqrestore(&q->lock, flags);
699
700	return err;
701}
702
703static int
704mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
705{
706	struct mt76_queue *q = &dev->q_rx[qid];
707	int i, err;
708
709	spin_lock_init(&q->lock);
710	q->entry = devm_kcalloc(dev->dev,
711				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
712				GFP_KERNEL);
713	if (!q->entry)
714		return -ENOMEM;
715
716	q->ndesc = MT_NUM_RX_ENTRIES;
717	q->buf_size = PAGE_SIZE;
718
719	for (i = 0; i < q->ndesc; i++) {
720		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
721		if (err < 0)
722			return err;
723	}
724
725	return mt76u_submit_rx_buffers(dev, qid);
726}
727
728int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
729{
730	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
731}
732EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
733
734static void
735mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
736{
737	struct page *page;
738	int i;
739
740	for (i = 0; i < q->ndesc; i++)
741		mt76u_urb_free(q->entry[i].urb);
742
743	if (!q->rx_page.va)
744		return;
745
746	page = virt_to_page(q->rx_page.va);
747	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
748	memset(&q->rx_page, 0, sizeof(q->rx_page));
749}
750
751static void mt76u_free_rx(struct mt76_dev *dev)
752{
753	int i;
754
755	mt76_for_each_q_rx(dev, i)
756		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
757}
758
759void mt76u_stop_rx(struct mt76_dev *dev)
760{
761	int i;
762
763	mt76_for_each_q_rx(dev, i) {
764		struct mt76_queue *q = &dev->q_rx[i];
765		int j;
766
767		for (j = 0; j < q->ndesc; j++)
768			usb_poison_urb(q->entry[j].urb);
769	}
770
771	tasklet_kill(&dev->usb.rx_tasklet);
772}
773EXPORT_SYMBOL_GPL(mt76u_stop_rx);
774
775int mt76u_resume_rx(struct mt76_dev *dev)
776{
777	int i;
778
779	mt76_for_each_q_rx(dev, i) {
780		struct mt76_queue *q = &dev->q_rx[i];
781		int err, j;
782
783		for (j = 0; j < q->ndesc; j++)
784			usb_unpoison_urb(q->entry[j].urb);
785
786		err = mt76u_submit_rx_buffers(dev, i);
787		if (err < 0)
788			return err;
789	}
790
791	return 0;
792}
793EXPORT_SYMBOL_GPL(mt76u_resume_rx);
794
795static void mt76u_tx_worker(struct mt76_worker *w)
796{
797	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
798	struct mt76_queue_entry entry;
799	struct mt76_queue *q;
800	bool wake;
801	int i;
802
803	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
804		q = dev->q_tx[i];
805
806		while (q->queued > 0) {
807			if (!q->entry[q->tail].done)
808				break;
809
810			entry = q->entry[q->tail];
811			q->entry[q->tail].done = false;
812
813			mt76_queue_tx_complete(dev, q, &entry);
814		}
815
816		wake = q->stopped && q->queued < q->ndesc - 8;
817		if (wake)
818			q->stopped = false;
819
820		if (!q->queued)
821			wake_up(&dev->tx_wait);
822
823		mt76_txq_schedule(&dev->phy, i);
824
825		if (dev->drv->tx_status_data &&
826		    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
827			queue_work(dev->wq, &dev->usb.stat_work);
828		if (wake)
829			ieee80211_wake_queue(dev->hw, i);
830	}
831}
832
833static void mt76u_tx_status_data(struct work_struct *work)
834{
835	struct mt76_usb *usb;
836	struct mt76_dev *dev;
837	u8 update = 1;
838	u16 count = 0;
839
840	usb = container_of(work, struct mt76_usb, stat_work);
841	dev = container_of(usb, struct mt76_dev, usb);
842
843	while (true) {
844		if (test_bit(MT76_REMOVED, &dev->phy.state))
845			break;
846
847		if (!dev->drv->tx_status_data(dev, &update))
848			break;
849		count++;
850	}
851
852	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
853		queue_work(dev->wq, &usb->stat_work);
854	else
855		clear_bit(MT76_READING_STATS, &dev->phy.state);
856}
857
858static void mt76u_complete_tx(struct urb *urb)
859{
860	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
861	struct mt76_queue_entry *e = urb->context;
862
863	if (mt76u_urb_error(urb))
864		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
865	e->done = true;
866
867	mt76_worker_schedule(&dev->tx_worker);
868}
869
870static int
871mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
872		       struct urb *urb)
873{
874	urb->transfer_buffer_length = skb->len;
875
876	if (!dev->usb.sg_en) {
877		urb->transfer_buffer = skb->data;
878		return 0;
879	}
880
881	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
882	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
883	if (!urb->num_sgs)
884		return -ENOMEM;
885
886	return urb->num_sgs;
887}
888
889static int
890mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
891		   struct sk_buff *skb, struct mt76_wcid *wcid,
892		   struct ieee80211_sta *sta)
893{
894	struct mt76_queue *q = dev->q_tx[qid];
895	struct mt76_tx_info tx_info = {
896		.skb = skb,
897	};
898	u16 idx = q->head;
899	int err;
900
901	if (q->queued == q->ndesc)
902		return -ENOSPC;
903
904	skb->prev = skb->next = NULL;
905	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
906	if (err < 0)
907		return err;
908
909	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
910	if (err < 0)
911		return err;
912
913	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
914			    q->entry[idx].urb, mt76u_complete_tx,
915			    &q->entry[idx]);
916
917	q->head = (q->head + 1) % q->ndesc;
918	q->entry[idx].skb = tx_info.skb;
919	q->queued++;
920
921	return idx;
922}
923
924static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
925{
926	struct urb *urb;
927	int err;
928
929	while (q->first != q->head) {
930		urb = q->entry[q->first].urb;
931
932		trace_submit_urb(dev, urb);
933		err = usb_submit_urb(urb, GFP_ATOMIC);
934		if (err < 0) {
935			if (err == -ENODEV)
936				set_bit(MT76_REMOVED, &dev->phy.state);
937			else
938				dev_err(dev->dev, "tx urb submit failed:%d\n",
939					err);
940			break;
941		}
942		q->first = (q->first + 1) % q->ndesc;
943	}
944}
945
946static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
947{
948	if (mt76_chip(dev) == 0x7663) {
949		static const u8 lmac_queue_map[] = {
950			/* ac to lmac mapping */
951			[IEEE80211_AC_BK] = 0,
952			[IEEE80211_AC_BE] = 1,
953			[IEEE80211_AC_VI] = 2,
954			[IEEE80211_AC_VO] = 4,
955		};
956
957		if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
958			return 1; /* BE */
959
960		return lmac_queue_map[ac];
961	}
962
963	return mt76_ac_to_hwq(ac);
964}
965
966static int mt76u_alloc_tx(struct mt76_dev *dev)
967{
968	struct mt76_queue *q;
969	int i, j, err;
970
971	for (i = 0; i <= MT_TXQ_PSD; i++) {
972		if (i >= IEEE80211_NUM_ACS) {
973			dev->q_tx[i] = dev->q_tx[0];
974			continue;
975		}
976
977		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
978		if (!q)
979			return -ENOMEM;
980
981		spin_lock_init(&q->lock);
982		q->hw_idx = mt76u_ac_to_hwq(dev, i);
983		dev->q_tx[i] = q;
984
985		q->entry = devm_kcalloc(dev->dev,
986					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
987					GFP_KERNEL);
988		if (!q->entry)
989			return -ENOMEM;
990
991		q->ndesc = MT_NUM_TX_ENTRIES;
992		for (j = 0; j < q->ndesc; j++) {
993			err = mt76u_urb_alloc(dev, &q->entry[j],
994					      MT_TX_SG_MAX_SIZE);
995			if (err < 0)
996				return err;
997		}
998	}
999	return 0;
1000}
1001
1002static void mt76u_free_tx(struct mt76_dev *dev)
1003{
1004	int i;
1005
1006	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1007		struct mt76_queue *q;
1008		int j;
1009
1010		q = dev->q_tx[i];
1011		if (!q)
1012			continue;
1013
1014		for (j = 0; j < q->ndesc; j++)
1015			usb_free_urb(q->entry[j].urb);
1016	}
1017}
1018
1019void mt76u_stop_tx(struct mt76_dev *dev)
1020{
1021	int ret;
1022
1023	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
1024				 HZ / 5);
1025	if (!ret) {
1026		struct mt76_queue_entry entry;
1027		struct mt76_queue *q;
1028		int i, j;
1029
1030		dev_err(dev->dev, "timed out waiting for pending tx\n");
1031
1032		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1033			q = dev->q_tx[i];
1034			if (!q)
1035				continue;
1036
1037			for (j = 0; j < q->ndesc; j++)
1038				usb_kill_urb(q->entry[j].urb);
1039		}
1040
1041		mt76_worker_disable(&dev->tx_worker);
1042
1043		/* On device removal we maight queue skb's, but mt76u_tx_kick()
1044		 * will fail to submit urb, cleanup those skb's manually.
1045		 */
1046		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1047			q = dev->q_tx[i];
1048			if (!q)
1049				continue;
1050
1051			while (q->queued > 0) {
1052				entry = q->entry[q->tail];
1053				q->entry[q->tail].done = false;
1054				mt76_queue_tx_complete(dev, q, &entry);
1055			}
1056		}
1057
1058		mt76_worker_enable(&dev->tx_worker);
1059	}
1060
1061	cancel_work_sync(&dev->usb.stat_work);
1062	clear_bit(MT76_READING_STATS, &dev->phy.state);
1063
1064	mt76_tx_status_check(dev, NULL, true);
1065}
1066EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1067
1068void mt76u_queues_deinit(struct mt76_dev *dev)
1069{
1070	mt76u_stop_rx(dev);
1071	mt76u_stop_tx(dev);
1072
1073	mt76u_free_rx(dev);
1074	mt76u_free_tx(dev);
1075}
1076EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1077
1078int mt76u_alloc_queues(struct mt76_dev *dev)
1079{
1080	int err;
1081
1082	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1083	if (err < 0)
1084		return err;
1085
1086	return mt76u_alloc_tx(dev);
1087}
1088EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1089
1090static const struct mt76_queue_ops usb_queue_ops = {
1091	.tx_queue_skb = mt76u_tx_queue_skb,
1092	.kick = mt76u_tx_kick,
1093};
1094
1095int mt76u_init(struct mt76_dev *dev,
1096	       struct usb_interface *intf, bool ext)
1097{
1098	static struct mt76_bus_ops mt76u_ops = {
1099		.read_copy = mt76u_read_copy_ext,
1100		.wr_rp = mt76u_wr_rp,
1101		.rd_rp = mt76u_rd_rp,
1102		.type = MT76_BUS_USB,
1103	};
1104	struct usb_device *udev = interface_to_usbdev(intf);
1105	struct mt76_usb *usb = &dev->usb;
1106	int err = -ENOMEM;
1107
1108	mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
1109	mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
1110	mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
1111	mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
1112
1113	dev->tx_worker.fn = mt76u_tx_worker;
1114	tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
1115	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1116
1117	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
1118	if (usb->data_len < 32)
1119		usb->data_len = 32;
1120
1121	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1122	if (!usb->data)
1123		goto error;
1124
1125	mutex_init(&usb->usb_ctrl_mtx);
1126	dev->bus = &mt76u_ops;
1127	dev->queue_ops = &usb_queue_ops;
1128
1129	dev_set_drvdata(&udev->dev, dev);
1130
1131	usb->sg_en = mt76u_check_sg(dev);
1132
1133	err = mt76u_set_endpoints(intf, usb);
1134	if (err < 0)
1135		goto error;
1136
1137	return 0;
1138
1139error:
1140	destroy_workqueue(dev->wq);
1141
1142	return err;
1143}
1144EXPORT_SYMBOL_GPL(mt76u_init);
1145
1146MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1147MODULE_LICENSE("Dual BSD/GPL");
1148