Lines Matching refs:trb

337 static inline u32 trb_read_##name(struct tegra_xudc_trb *trb)		\
339 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
342 trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
346 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
348 trb->member = cpu_to_le32(tmp); \
370 static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
372 return ((u64)trb_read_data_hi(trb) << 32) |
373 trb_read_data_lo(trb);
376 static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
378 trb_write_data_lo(trb, lower_32_bits(addr));
379 trb_write_data_hi(trb, upper_32_bits(addr));
619 struct tegra_xudc_trb *trb)
623 type, trb, trb->data_lo, trb->data_hi, trb->status,
624 trb->control);
886 struct tegra_xudc_trb *trb)
890 index = trb - ep->transfer_ring;
895 return (ep->transfer_ring_phys + index * sizeof(*trb));
901 struct tegra_xudc_trb *trb;
904 index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
909 trb = &ep->transfer_ring[index];
911 return trb;
1071 struct tegra_xudc_trb *trb,
1085 trb_write_data_ptr(trb, buf_addr);
1087 trb_write_transfer_len(trb, len);
1088 trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
1092 trb_write_chain(trb, 0);
1094 trb_write_chain(trb, 1);
1096 trb_write_ioc(trb, ioc);
1101 trb_write_isp(trb, 1);
1103 trb_write_isp(trb, 0);
1108 trb_write_type(trb, TRB_TYPE_DATA_STAGE);
1110 trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
1114 trb_write_data_stage_dir(trb, 1);
1116 trb_write_data_stage_dir(trb, 0);
1118 trb_write_type(trb, TRB_TYPE_ISOCH);
1119 trb_write_sia(trb, 1);
1120 trb_write_frame_id(trb, 0);
1121 trb_write_tlbpc(trb, 0);
1123 trb_write_type(trb, TRB_TYPE_STREAM);
1124 trb_write_stream_id(trb, req->usb_req.stream_id);
1126 trb_write_type(trb, TRB_TYPE_NORMAL);
1127 trb_write_stream_id(trb, 0);
1130 trb_write_cycle(trb, ep->pcs);
1135 dump_trb(xudc, "TRANSFER", trb);
1175 struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
1181 tegra_xudc_queue_one_trb(ep, req, trb, ioc);
1182 req->last_trb = trb;
1186 trb = &ep->transfer_ring[ep->enq_ptr];
1187 trb_write_cycle(trb, ep->pcs);
1328 struct tegra_xudc_trb *trb = req->first_trb;
1329 bool pcs_enq = trb_read_cycle(trb);
1334 * and must correct trb cycle bit to the last un-enqueued state.
1336 while (trb != &ep->transfer_ring[ep->enq_ptr]) {
1337 pcs = trb_read_cycle(trb);
1338 memset(trb, 0, sizeof(*trb));
1339 trb_write_cycle(trb, !pcs);
1340 trb++;
1342 if (trb_read_type(trb) == TRB_TYPE_LINK)
1343 trb = ep->transfer_ring;
1349 * Retrieve the correct cycle bit state from the first trb of
1366 * Determine if the given TRB is in the range [first trb, last trb] for the
1371 struct tegra_xudc_trb *trb)
1373 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
1374 req->first_trb, req->last_trb, trb);
1376 if (trb >= req->first_trb && (trb <= req->last_trb ||
1380 if (trb < req->first_trb && trb <= req->last_trb &&
1393 struct tegra_xudc_trb *trb)
1397 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1398 __func__, req->first_trb, req->last_trb, enq_trb, trb);
1400 if (trb < req->first_trb && (enq_trb <= trb ||
1404 if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
1689 struct tegra_xudc_trb *trb)
1691 trb_write_data_ptr(trb, ep->transfer_ring_phys);
1692 trb_write_type(trb, TRB_TYPE_LINK);
1693 trb_write_toggle_cycle(trb, 1);
2624 trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
2632 if (trb_in_request(ep, req, trb))
2644 struct tegra_xudc_trb *trb;
2650 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2651 req = trb_to_request(ep, trb);
2657 if (req && (short_packet || (!trb_read_chain(trb) &&
2695 struct tegra_xudc_trb *trb;
2705 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2708 ep->deq_ptr = (trb - ep->transfer_ring) + 1;