Lines Matching refs:ep_ring

368 		struct cdnsp_ring *ep_ring;
373 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
374 if (!ep_ring)
377 if (!ep_ring->stream_active || ep_ring->stream_rejected)
380 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
438 struct cdnsp_ring *ep_ring;
443 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
444 if (!ep_ring)
452 new_seg = ep_ring->deq_seg;
453 new_deq = ep_ring->dequeue;
479 cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
504 struct cdnsp_ring *ep_ring,
521 cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
638 struct cdnsp_ring *ep_ring;
657 ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
659 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
660 ep_ring->deq_seg = ep_ring->deq_seg->next;
661 ep_ring->dequeue = ep_ring->deq_seg->trbs;
664 while (ep_ring->dequeue != deq_state->new_deq_ptr) {
665 ep_ring->num_trbs_free++;
666 ep_ring->dequeue++;
668 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
669 if (ep_ring->dequeue == deq_state->new_deq_ptr)
672 ep_ring->deq_seg = ep_ring->deq_seg->next;
673 ep_ring->dequeue = ep_ring->deq_seg->trbs;
696 struct cdnsp_ring *ep_ring;
708 ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
725 cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
732 ep_ring->num_tds--;
744 cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
891 struct cdnsp_ring *ep_ring,
897 cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
909 ep_ring->num_tds--;
921 struct cdnsp_ring *ep_ring;
924 ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
939 while (ep_ring->dequeue != td->last_trb)
940 cdnsp_inc_deq(pdev, ep_ring);
942 cdnsp_inc_deq(pdev, ep_ring);
944 cdnsp_td_cleanup(pdev, td, ep_ring, status);
999 struct cdnsp_ring *ep_ring;
1004 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1023 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1027 cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1050 struct cdnsp_ring *ep_ring;
1054 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1093 td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1110 struct cdnsp_ring *ep_ring;
1112 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1117 while (ep_ring->dequeue != td->last_trb)
1118 cdnsp_inc_deq(pdev, ep_ring);
1120 cdnsp_inc_deq(pdev, ep_ring);
1122 cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1136 struct cdnsp_ring *ep_ring;
1139 ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1163 ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1177 struct cdnsp_ring *ep_ring;
1198 ep_ring = pep->stream_info.stream_rings[cur_stream];
1199 ep_ring->stream_active = 1;
1200 ep_ring->stream_rejected = 0;
1208 ep_ring = pep->stream_info.stream_rings[dev_sid];
1209 ep_ring->stream_active = 0;
1210 ep_ring->stream_rejected = 1;
1212 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1231 struct cdnsp_ring *ep_ring;
1247 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1263 if (!ep_ring) {
1294 * Set skip flag of the ep_ring; Complete the missed tds as
1295 * short transfer when process the ep_ring next time.
1306 if (list_empty(&ep_ring->td_list)) {
1316 ep_ring->last_td_was_short))
1317 trace_cdnsp_trb_without_td(ep_ring,
1328 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1332 ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1333 ep_ring->dequeue, td->last_trb,
1342 trace_cdnsp_handle_transfer(ep_ring,
1352 * of FSE is not in the current TD pointed by ep_ring->dequeue
1380 ep_ring->last_td_was_short = true;
1382 ep_ring->last_td_was_short = false;
1624 struct cdnsp_ring *ep_ring,
1643 if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1648 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1649 if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1656 while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1657 ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1660 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1663 if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1664 ep_ring->cycle_state ^= 1;
1665 ep_ring->enq_seg = ep_ring->enq_seg->next;
1666 ep_ring->enqueue = ep_ring->enq_seg->trbs;
1675 struct cdnsp_ring *ep_ring;
1678 ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1680 if (!ep_ring)
1683 ret = cdnsp_prepare_ring(pdev, ep_ring,
1693 list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1694 ep_ring->num_tds++;
1697 preq->td.start_seg = ep_ring->enq_seg;
1698 preq->td.first_trb = ep_ring->enqueue;
2016 struct cdnsp_ring *ep_ring;
2021 ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
2022 if (!ep_ring)
2055 cdnsp_queue_trb(pdev, ep_ring, true,
2058 field | ep_ring->cycle_state |
2068 cdnsp_queue_trb(pdev, ep_ring, true,
2071 field | ep_ring->cycle_state |
2080 preq->td.last_trb = ep_ring->enqueue;
2084 field = ep_ring->cycle_state;
2086 field = (ep_ring->cycle_state ^ 1);
2098 cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2209 struct cdnsp_ring *ep_ring;
2218 ep_ring = preq->pep->ring;
2238 start_trb = &ep_ring->enqueue->generic;
2239 start_cycle = ep_ring->cycle_state;
2285 field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2301 preq->td.last_trb = ep_ring->enqueue;
2305 cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2344 ep_ring->num_tds--;
2353 preq->td.last_trb = ep_ring->enqueue;
2355 cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2358 ep_ring->enqueue = preq->td.first_trb;
2359 ep_ring->enq_seg = preq->td.start_seg;
2360 ep_ring->cycle_state = start_cycle;