Lines Matching refs:urb
374 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
390 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
396 for (j = nsgs; j < urb->num_sgs; j++)
397 skb_free_frag(sg_virt(&urb->sg[j]));
398 urb->num_sgs = i;
401 urb->num_sgs = max_t(int, i, urb->num_sgs);
402 urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
403 sg_init_marker(urb->sg, urb->num_sgs);
410 struct urb *urb, int nsgs, gfp_t gfp)
415 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
417 urb->transfer_buffer_length = q->buf_size;
418 urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
420 return urb->transfer_buffer ? 0 : -ENOMEM;
427 unsigned int size = sizeof(struct urb);
432 e->urb = kzalloc(size, GFP_KERNEL);
433 if (!e->urb)
436 usb_init_urb(e->urb);
439 e->urb->sg = (struct scatterlist *)(e->urb + 1);
456 return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
459 static void mt76u_urb_free(struct urb *urb)
463 for (i = 0; i < urb->num_sgs; i++)
464 skb_free_frag(sg_virt(&urb->sg[i]));
466 if (urb->transfer_buffer)
467 skb_free_frag(urb->transfer_buffer);
469 usb_free_urb(urb);
474 struct urb *urb, usb_complete_t complete_fn,
486 urb->dev = udev;
487 urb->pipe = pipe;
488 urb->complete = complete_fn;
489 urb->context = context;
492 static struct urb *
495 struct urb *urb = NULL;
500 urb = q->entry[q->tail].urb;
506 return urb;
567 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
570 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
571 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
578 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
589 while (len > 0 && nsgs < urb->num_sgs) {
590 data_len = min_t(int, len, urb->sg[nsgs].length);
592 sg_page(&urb->sg[nsgs]),
593 urb->sg[nsgs].offset, data_len,
603 static void mt76u_complete_rx(struct urb *urb)
605 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
606 struct mt76_queue *q = urb->context;
609 trace_rx_urb(dev, urb);
611 switch (urb->status) {
617 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
618 urb->status);
625 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
637 struct urb *urb)
641 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
643 trace_submit_urb(dev, urb);
645 return usb_submit_urb(urb, GFP_ATOMIC);
652 struct urb *urb;
656 urb = mt76u_get_next_rx_entry(q);
657 if (!urb)
660 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
662 err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
666 mt76u_submit_rx_buf(dev, qid, urb);
692 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
741 mt76u_urb_free(q->entry[i].urb);
768 usb_poison_urb(q->entry[j].urb);
784 usb_unpoison_urb(q->entry[j].urb);
858 static void mt76u_complete_tx(struct urb *urb)
860 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
861 struct mt76_queue_entry *e = urb->context;
863 if (mt76u_urb_error(urb))
864 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
872 struct urb *urb)
874 urb->transfer_buffer_length = skb->len;
877 urb->transfer_buffer = skb->data;
881 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
882 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
883 if (!urb->num_sgs)
886 return urb->num_sgs;
909 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
914 q->entry[idx].urb, mt76u_complete_tx,
926 struct urb *urb;
930 urb = q->entry[q->first].urb;
932 trace_submit_urb(dev, urb);
933 err = usb_submit_urb(urb, GFP_ATOMIC);
938 dev_err(dev->dev, "tx urb submit failed:%d\n",
1015 usb_free_urb(q->entry[j].urb);
1038 usb_kill_urb(q->entry[j].urb);
1044 * will fail to submit urb, cleanup those skb's manually.