Lines Matching refs:trb

336 static inline u32 trb_read_##name(struct tegra_xudc_trb *trb)		\
338 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
341 trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
345 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
347 trb->member = cpu_to_le32(tmp); \
369 static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
371 return ((u64)trb_read_data_hi(trb) << 32) |
372 trb_read_data_lo(trb);
375 static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
377 trb_write_data_lo(trb, lower_32_bits(addr));
378 trb_write_data_hi(trb, upper_32_bits(addr));
618 struct tegra_xudc_trb *trb)
622 type, trb, trb->data_lo, trb->data_hi, trb->status,
623 trb->control);
896 struct tegra_xudc_trb *trb)
900 index = trb - ep->transfer_ring;
905 return (ep->transfer_ring_phys + index * sizeof(*trb));
911 struct tegra_xudc_trb *trb;
914 index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
919 trb = &ep->transfer_ring[index];
921 return trb;
1081 struct tegra_xudc_trb *trb,
1095 trb_write_data_ptr(trb, buf_addr);
1097 trb_write_transfer_len(trb, len);
1098 trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
1102 trb_write_chain(trb, 0);
1104 trb_write_chain(trb, 1);
1106 trb_write_ioc(trb, ioc);
1111 trb_write_isp(trb, 1);
1113 trb_write_isp(trb, 0);
1118 trb_write_type(trb, TRB_TYPE_DATA_STAGE);
1120 trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
1124 trb_write_data_stage_dir(trb, 1);
1126 trb_write_data_stage_dir(trb, 0);
1128 trb_write_type(trb, TRB_TYPE_ISOCH);
1129 trb_write_sia(trb, 1);
1130 trb_write_frame_id(trb, 0);
1131 trb_write_tlbpc(trb, 0);
1133 trb_write_type(trb, TRB_TYPE_STREAM);
1134 trb_write_stream_id(trb, req->usb_req.stream_id);
1136 trb_write_type(trb, TRB_TYPE_NORMAL);
1137 trb_write_stream_id(trb, 0);
1140 trb_write_cycle(trb, ep->pcs);
1145 dump_trb(xudc, "TRANSFER", trb);
1185 struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
1191 tegra_xudc_queue_one_trb(ep, req, trb, ioc);
1192 req->last_trb = trb;
1196 trb = &ep->transfer_ring[ep->enq_ptr];
1197 trb_write_cycle(trb, ep->pcs);
1338 struct tegra_xudc_trb *trb = req->first_trb;
1339 bool pcs_enq = trb_read_cycle(trb);
1344 * and must correct trb cycle bit to the last un-enqueued state.
1346 while (trb != &ep->transfer_ring[ep->enq_ptr]) {
1347 pcs = trb_read_cycle(trb);
1348 memset(trb, 0, sizeof(*trb));
1349 trb_write_cycle(trb, !pcs);
1350 trb++;
1352 if (trb_read_type(trb) == TRB_TYPE_LINK)
1353 trb = ep->transfer_ring;
1359 * Retrieve the correct cycle bit state from the first trb of
1376 * Determine if the given TRB is in the range [first trb, last trb] for the
1381 struct tegra_xudc_trb *trb)
1383 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
1384 req->first_trb, req->last_trb, trb);
1386 if (trb >= req->first_trb && (trb <= req->last_trb ||
1390 if (trb < req->first_trb && trb <= req->last_trb &&
1403 struct tegra_xudc_trb *trb)
1407 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1408 __func__, req->first_trb, req->last_trb, enq_trb, trb);
1410 if (trb < req->first_trb && (enq_trb <= trb ||
1414 if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
1701 struct tegra_xudc_trb *trb)
1703 trb_write_data_ptr(trb, ep->transfer_ring_phys);
1704 trb_write_type(trb, TRB_TYPE_LINK);
1705 trb_write_toggle_cycle(trb, 1);
2635 trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
2643 if (trb_in_request(ep, req, trb))
2655 struct tegra_xudc_trb *trb;
2661 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2662 req = trb_to_request(ep, trb);
2668 if (req && (short_packet || (!trb_read_chain(trb) &&
2706 struct tegra_xudc_trb *trb;
2716 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2719 ep->deq_ptr = (trb - ep->transfer_ring) + 1;