Lines Matching refs:ep_ring
626 struct xhci_ring *ep_ring;
638 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
640 if (!ep_ring) {
653 if (list_empty(&ep_ring->td_list)) {
654 new_seg = ep_ring->enq_seg;
655 new_deq = ep_ring->enqueue;
656 new_cycle = ep_ring->cycle_state;
666 new_seg = ep_ring->deq_seg;
667 new_deq = ep_ring->dequeue;
690 next_trb(xhci, ep_ring, &new_seg, &new_deq);
754 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
770 next_trb(xhci, ep_ring, &seg, &trb);
833 struct xhci_ring *ep_ring, int status)
841 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1278 struct xhci_ring *ep_ring,
1283 dequeue_temp = ep_ring->dequeue;
1291 if (trb_is_link(ep_ring->dequeue)) {
1292 ep_ring->deq_seg = ep_ring->deq_seg->next;
1293 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1296 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1298 ep_ring->dequeue++;
1299 if (trb_is_link(ep_ring->dequeue)) {
1300 if (ep_ring->dequeue ==
1303 ep_ring->deq_seg = ep_ring->deq_seg->next;
1304 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1306 if (ep_ring->dequeue == dequeue_temp) {
1325 struct xhci_ring *ep_ring;
1337 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1338 if (!ep_ring) {
1400 ep_ring, ep_index);
1410 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1415 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
2161 struct xhci_ring *ep_ring, struct xhci_td *td,
2234 ep_ring->dequeue = td->last_trb;
2235 ep_ring->deq_seg = td->last_trb_seg;
2236 inc_deq(xhci, ep_ring);
2238 return xhci_td_cleanup(xhci, td, ep_ring, td->status);
2260 struct xhci_ring *ep_ring, struct xhci_td *td,
2349 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2356 struct xhci_ring *ep_ring, struct xhci_td *td,
2453 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2485 struct xhci_ring *ep_ring, struct xhci_td *td,
2543 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2552 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2565 struct xhci_ring *ep_ring;
2589 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2600 if (!ep_ring) {
2628 td_num += list_count_nodes(&ep_ring->td_list);
2639 ep_ring->last_td_was_short)
2715 if (!list_empty(&ep_ring->td_list))
2723 if (!list_empty(&ep_ring->td_list))
2733 * Set skip flag of the ep_ring; Complete the missed tds as
2734 * short transfer when process the ep_ring next time.
2770 if (list_empty(&ep_ring->td_list)) {
2781 ep_ring->last_td_was_short)) {
2803 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2808 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2814 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2819 * is not in the current TD pointed by ep_ring->dequeue because
2842 ep_ring->last_td_was_short) {
2843 ep_ring->last_td_was_short = false;
2859 !list_is_last(&td->td_list, &ep_ring->td_list)) {
2867 ep_ring->dequeue = td->last_trb;
2868 ep_ring->deq_seg = td->last_trb_seg;
2869 inc_deq(xhci, ep_ring);
2870 xhci_td_cleanup(xhci, td, ep_ring, td->status);
2882 trb_in_td(xhci, ep_ring->deq_seg,
2883 ep_ring->dequeue, td->last_trb,
2889 ep_ring->last_td_was_short = true;
2891 ep_ring->last_td_was_short = false;
2903 trace_xhci_handle_transfer(ep_ring,
2927 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
2929 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
2931 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
3215 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3250 if (ep_ring != xhci->cmd_ring) {
3251 new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
3252 } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) {
3260 if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) {
3266 while (trb_is_link(ep_ring->enqueue)) {
3271 !(ep_ring->type == TYPE_ISOC &&
3273 ep_ring->enqueue->link.control &=
3276 ep_ring->enqueue->link.control |=
3280 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3283 if (link_trb_toggles_cycle(ep_ring->enqueue))
3284 ep_ring->cycle_state ^= 1;
3286 ep_ring->enq_seg = ep_ring->enq_seg->next;
3287 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3290 if (link_trb_count++ > ep_ring->num_segs) {
3296 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3316 struct xhci_ring *ep_ring;
3319 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3321 if (!ep_ring) {
3327 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3346 list_add_tail(&td->td_list, &ep_ring->td_list);
3347 td->start_seg = ep_ring->enq_seg;
3348 td->first_trb = ep_ring->enqueue;
3755 struct xhci_ring *ep_ring;
3765 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3766 if (!ep_ring)
3800 start_trb = &ep_ring->enqueue->generic;
3801 start_cycle = ep_ring->cycle_state;
3821 queue_trb(xhci, ep_ring, true,
3857 queue_trb(xhci, ep_ring, true,
3861 field | ep_ring->cycle_state);
3865 td->last_trb = ep_ring->enqueue;
3866 td->last_trb_seg = ep_ring->enq_seg;
3874 queue_trb(xhci, ep_ring, false,
3879 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
4052 struct xhci_ring *ep_ring;
4068 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
4076 start_trb = &ep_ring->enqueue->generic;
4077 start_cycle = ep_ring->cycle_state;
4128 (i ? ep_ring->cycle_state : !start_cycle);
4141 ep_ring->cycle_state;
4153 td->last_trb = ep_ring->enqueue;
4154 td->last_trb_seg = ep_ring->enq_seg;
4179 queue_trb(xhci, ep_ring, more_trbs_coming,
4222 urb_priv->td[0].last_trb = ep_ring->enqueue;
4224 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
4227 ep_ring->enqueue = urb_priv->td[0].first_trb;
4228 ep_ring->enq_seg = urb_priv->td[0].start_seg;
4229 ep_ring->cycle_state = start_cycle;
4245 struct xhci_ring *ep_ring;
4255 ep_ring = xdev->eps[ep_index].ring;
4266 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
4278 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {