Lines Matching defs:trb
67 union xhci_trb *trb)
71 if (!seg || !trb || trb < seg->trbs)
74 segment_offset = trb - seg->trbs;
77 return seg->dma + (segment_offset * sizeof(*trb));
80 static bool trb_is_noop(union xhci_trb *trb)
82 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
85 static bool trb_is_link(union xhci_trb *trb)
87 return TRB_TYPE_LINK_LE32(trb->link.control);
90 static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
92 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
96 struct xhci_segment *seg, union xhci_trb *trb)
98 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
101 static bool link_trb_toggles_cycle(union xhci_trb *trb)
103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
120 static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
122 if (trb_is_link(trb)) {
124 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
126 trb->generic.field[0] = 0;
127 trb->generic.field[1] = 0;
128 trb->generic.field[2] = 0;
130 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
131 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
135 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
142 union xhci_trb **trb)
144 if (trb_is_link(*trb)) {
146 *trb = ((*seg)->trbs);
148 (*trb)++;
158 /* event ring doesn't have link trbs, check for last trb */
590 * A cancelled TD can complete with a stall if HW cached the trb.
638 * We want to find the pointer, segment and cycle state of the new trb
694 union xhci_trb *trb = td->first_trb;
697 trb_to_noop(trb, TRB_TR_NOOP);
700 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
701 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
703 if (trb == td->last_trb)
706 next_trb(xhci, ep_ring, &seg, &trb);
934 union xhci_trb *trb, struct xhci_event_cmd *event)
945 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
954 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1227 union xhci_trb *trb, u32 cmd_comp_code)
1237 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1238 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1324 union xhci_trb *trb, u32 cmd_comp_code)
1331 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1369 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1970 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2108 /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
2113 union xhci_trb *trb = ring->dequeue;
2116 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2117 if (!trb_is_noop(trb) && !trb_is_link(trb))
2118 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2388 /* stopped on ep trb with invalid length, exclude it */
2416 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2470 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
3058 struct xhci_generic_trb *trb;
3060 trb = &ring->enqueue->generic;
3061 trb->field[0] = cpu_to_le32(field1);
3062 trb->field[1] = cpu_to_le32(field2);
3063 trb->field[2] = cpu_to_le32(field3);
3066 trb->field[3] = cpu_to_le32(field4);
3068 trace_xhci_queue_trb(ring, trb);
3407 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3484 /* Deal with URB_ZERO_PACKET - need one more td/trb */
4139 * Round up to the next frame and consider the time before trb really