Lines Matching refs:qh
72 * @qh: QH containing periodic bandwidth required
80 struct dwc2_qh *qh)
87 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
92 max_claimed_usecs = 100 - qh->host_us;
98 max_claimed_usecs = 900 - qh->host_us;
104 __func__, hsotg->periodic_usecs, qh->host_us);
332 * dwc2_get_ls_map() - Get the map used for the given qh
335 * @qh: QH for the periodic transfer.
340 * add logic here to get a map out of "hsotg" if !qh->do_split.
345 struct dwc2_qh *qh)
350 if (WARN_ON(!qh->dwc_tt))
354 map = qh->dwc_tt->periodic_bitmaps;
355 if (qh->dwc_tt->usb_tt->multi)
356 map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
463 struct dwc2_qh *qh;
476 dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
483 * @qh: QH to print.
486 struct dwc2_qh *qh)
488 struct dwc2_qh_print_data print_data = { hsotg, qh };
497 if (qh->schedule_low_speed) {
498 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
501 qh, qh->device_us,
502 DWC2_ROUND_US_TO_SLICE(qh->device_us),
503 DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
508 qh, map);
515 for (i = 0; i < qh->num_hs_transfers; i++) {
516 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
524 qh, i, trans_time->duration_us, uframe, rel_us);
526 if (qh->num_hs_transfers) {
527 dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
536 struct dwc2_qh *qh) {};
543 * @qh: QH for the periodic transfer.
554 static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
557 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
558 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
578 qh->device_interval, search_slice, false);
583 qh->ls_start_schedule_slice = slice;
591 * @qh: QH for the periodic transfer.
594 struct dwc2_qh *qh)
596 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
597 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
604 DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
605 qh->ls_start_schedule_slice);
613 * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
618 * @qh: QH for the periodic transfer.
623 * @index: The index into qh->hs_transfers that we're working with.
628 static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
631 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
637 qh->host_interval, trans_time->start_schedule_us,
651 * @qh: QH for the periodic transfer.
655 struct dwc2_qh *qh, int index)
657 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
662 qh->host_interval, trans_time->start_schedule_us);
676 * @qh: QH for the periodic transfer.
679 struct dwc2_qh *qh)
681 int bytecount = qh->maxp_mult * qh->maxp;
690 host_interval_in_sched = gcd(qh->host_interval,
718 if (qh->schedule_low_speed) {
719 err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
738 start_s_uframe = qh->ls_start_schedule_slice /
753 if (qh->schedule_low_speed)
754 dwc2_ls_pmap_unschedule(hsotg, qh);
756 (qh->ls_start_schedule_slice /
796 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
803 DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
825 if (!qh->ep_is_in &&
829 qh, first_data_bytes, bytecount);
830 if (qh->schedule_low_speed)
831 dwc2_ls_pmap_unschedule(hsotg, qh);
838 qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
845 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
847 qh->num_hs_transfers += 2;
849 qh->num_hs_transfers += 3;
851 if (qh->ep_is_in) {
870 if (qh->ep_is_in) {
874 qh->num_hs_transfers++;
877 last = rel_uframe + qh->num_hs_transfers + 1;
881 qh->num_hs_transfers += 2;
883 qh->num_hs_transfers += 1;
887 qh->num_hs_transfers--;
903 qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
904 for (i = 1; i < qh->num_hs_transfers - 1; i++)
905 qh->hs_transfers[i].duration_us =
907 if (qh->num_hs_transfers > 1)
908 qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
916 qh->hs_transfers[0].start_schedule_us =
918 for (i = 1; i < qh->num_hs_transfers; i++)
919 qh->hs_transfers[i].start_schedule_us =
925 for (i = 0; i < qh->num_hs_transfers; i++) {
926 err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
932 if (i == qh->num_hs_transfers)
936 dwc2_hs_pmap_unschedule(hsotg, qh, i);
938 if (qh->schedule_low_speed)
939 dwc2_ls_pmap_unschedule(hsotg, qh);
958 * @qh: QH for the periodic transfer.
960 static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
963 WARN_ON(qh->host_us != qh->device_us);
964 WARN_ON(qh->host_interval != qh->device_interval);
965 WARN_ON(qh->num_hs_transfers != 1);
968 qh->hs_transfers[0].start_schedule_us = 0;
969 qh->hs_transfers[0].duration_us = qh->host_us;
971 return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
981 * @qh: QH for the periodic transfer.
983 static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
986 WARN_ON(qh->host_us != qh->device_us);
987 WARN_ON(qh->host_interval != qh->device_interval);
988 WARN_ON(!qh->schedule_low_speed);
991 return dwc2_ls_pmap_schedule(hsotg, qh, 0);
1001 * @qh: QH for the periodic transfer.
1003 static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1007 if (qh->dev_speed == USB_SPEED_HIGH)
1008 ret = dwc2_uframe_schedule_hs(hsotg, qh);
1009 else if (!qh->do_split)
1010 ret = dwc2_uframe_schedule_ls(hsotg, qh);
1012 ret = dwc2_uframe_schedule_split(hsotg, qh);
1015 dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
1017 dwc2_qh_schedule_print(hsotg, qh);
1026 * @qh: QH for the periodic transfer.
1028 static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1032 for (i = 0; i < qh->num_hs_transfers; i++)
1033 dwc2_hs_pmap_unschedule(hsotg, qh, i);
1035 if (qh->schedule_low_speed)
1036 dwc2_ls_pmap_unschedule(hsotg, qh);
1038 dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
1042 * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
1044 * Takes a qh that has already been scheduled (which means we know we have the
1048 * This is expected to be called on qh's that weren't previously actively
1053 * @qh: QH for a periodic endpoint
1056 static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1083 if (qh->do_split)
1089 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
1096 WARN_ON(qh->num_hs_transfers < 1);
1098 relative_frame = qh->hs_transfers[0].start_schedule_us /
1102 interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
1112 relative_frame = qh->ls_start_schedule_slice /
1114 interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
1153 qh->next_active_frame = next_active_frame;
1154 qh->start_active_frame = next_active_frame;
1157 qh, frame_number, qh->next_active_frame);
1167 * @qh: QH for the periodic transfer.
1171 static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1176 status = dwc2_uframe_schedule(hsotg, qh);
1186 status = dwc2_check_periodic_bandwidth(hsotg, qh);
1201 hsotg->periodic_usecs += qh->host_us;
1203 dwc2_pick_first_frame(hsotg, qh);
1212 * by the given qh.
1215 * @qh: QH for the periodic transfer.
1217 static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1221 WARN_ON(!qh->unreserve_pending);
1224 qh->unreserve_pending = false;
1226 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
1227 list_del_init(&qh->qh_list_entry);
1230 hsotg->periodic_usecs -= qh->host_us;
1233 dwc2_uframe_unschedule(hsotg, qh);
1250 * @t: Address to a qh unreserve_work.
1254 struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
1255 struct dwc2_hsotg *hsotg = qh->hsotg;
1268 if (timer_pending(&qh->unreserve_timer))
1282 if (qh->unreserve_pending)
1283 dwc2_do_unreserve(hsotg, qh);
1294 * @qh: QH for a periodic endpoint
1299 struct dwc2_qh *qh)
1305 max_xfer_size = qh->maxp * qh->maxp_mult;
1323 * @qh: QH for the periodic transfer. The QH should already contain the
1328 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1332 status = dwc2_check_max_xfer_size(hsotg, qh);
1341 if (del_timer(&qh->unreserve_timer))
1342 WARN_ON(!qh->unreserve_pending);
1351 if (!qh->unreserve_pending) {
1352 status = dwc2_do_reserve(hsotg, qh);
1362 if (dwc2_frame_num_le(qh->next_active_frame,
1364 dwc2_pick_first_frame(hsotg, qh);
1367 qh->unreserve_pending = 0;
1371 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
1374 list_add_tail(&qh->qh_list_entry,
1385 * @qh: QH for the periodic transfer
1388 struct dwc2_qh *qh)
1409 did_modify = mod_timer(&qh->unreserve_timer,
1412 qh->unreserve_pending = 1;
1414 list_del_init(&qh->qh_list_entry);
1434 * qh back to the "inactive" list, then queues transactions.
1436 * @t: Pointer to wait_timer in a qh.
1442 struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
1443 struct dwc2_hsotg *hsotg = qh->hsotg;
1452 if (!qh->wait_timer_cancel) {
1455 qh->want_wait = false;
1457 list_move(&qh->qh_list_entry,
1473 * @qh: The QH to init
1478 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1496 qh->hsotg = hsotg;
1497 timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
1498 hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1499 qh->wait_timer.function = &dwc2_wait_timer_fn;
1500 qh->ep_type = ep_type;
1501 qh->ep_is_in = ep_is_in;
1503 qh->data_toggle = DWC2_HC_PID_DATA0;
1504 qh->maxp = maxp;
1505 qh->maxp_mult = maxp_mult;
1506 INIT_LIST_HEAD(&qh->qtd_list);
1507 INIT_LIST_HEAD(&qh->qh_list_entry);
1509 qh->do_split = do_split;
1510 qh->dev_speed = dev_speed;
1517 &qh->ttport);
1520 qh->dwc_tt = dwc_tt;
1522 qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
1529 qh->device_us = NS_TO_US(device_ns);
1531 qh->device_interval = urb->interval;
1532 qh->host_interval = urb->interval * (do_split ? 8 : 1);
1539 qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
1544 qh->num_hs_transfers = -1;
1546 qh->num_hs_transfers = 1;
1548 qh->num_hs_transfers = 0;
1569 switch (qh->ep_type) {
1587 dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
1589 dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
1596 qh, qh->host_us, qh->device_us);
1598 qh, qh->host_interval, qh->device_interval);
1599 if (qh->schedule_low_speed)
1601 qh, dwc2_get_ls_map(hsotg, qh));
1619 struct dwc2_qh *qh;
1625 qh = kzalloc(sizeof(*qh), mem_flags);
1626 if (!qh)
1629 dwc2_qh_init(hsotg, qh, urb, mem_flags);
1632 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
1633 dwc2_hcd_qh_free(hsotg, qh);
1637 return qh;
1644 * @qh: The QH to free
1651 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1654 if (del_timer_sync(&qh->unreserve_timer)) {
1658 dwc2_do_unreserve(hsotg, qh);
1669 hrtimer_cancel(&qh->wait_timer);
1671 dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
1673 if (qh->desc_list)
1674 dwc2_hcd_qh_free_ddma(hsotg, qh);
1675 else if (hsotg->unaligned_cache && qh->dw_align_buf)
1676 kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
1678 kfree(qh);
1687 * @qh: The QH to add
1691 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1697 if (dbg_qh(qh))
1700 if (!list_empty(&qh->qh_list_entry))
1705 if (dwc2_qh_is_non_per(qh)) {
1707 qh->start_active_frame = hsotg->frame_number;
1708 qh->next_active_frame = qh->start_active_frame;
1710 if (qh->want_wait) {
1711 list_add_tail(&qh->qh_list_entry,
1713 qh->wait_timer_cancel = false;
1715 hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
1717 list_add_tail(&qh->qh_list_entry,
1723 status = dwc2_schedule_periodic(hsotg, qh);
1741 * @qh: QH to remove from schedule
1743 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1750 qh->wait_timer_cancel = true;
1752 if (list_empty(&qh->qh_list_entry))
1756 if (dwc2_qh_is_non_per(qh)) {
1757 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
1760 list_del_init(&qh->qh_list_entry);
1764 dwc2_deschedule_periodic(hsotg, qh);
1788 * @qh: QH for the periodic transfer.
1794 struct dwc2_qh *qh, u16 frame_number)
1796 u16 old_frame = qh->next_active_frame;
1807 if (old_frame == qh->start_active_frame &&
1808 !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
1813 qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
1823 if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
1829 qh->next_active_frame);
1830 qh->next_active_frame = frame_number;
1851 * @qh: QH for the periodic transfer.
1857 struct dwc2_qh *qh, u16 frame_number)
1860 u16 interval = qh->host_interval;
1863 qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
1903 if (qh->start_active_frame == qh->next_active_frame ||
1904 dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
1905 u16 ideal_start = qh->start_active_frame;
1912 if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
1919 qh->start_active_frame = dwc2_frame_num_inc(
1920 qh->start_active_frame, interval);
1922 qh->start_active_frame));
1924 missed = dwc2_frame_num_dec(qh->start_active_frame,
1929 qh->next_active_frame = qh->start_active_frame;
1947 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1950 u16 old_frame = qh->next_active_frame;
1954 if (dbg_qh(qh))
1957 if (dwc2_qh_is_non_per(qh)) {
1958 dwc2_hcd_qh_unlink(hsotg, qh);
1959 if (!list_empty(&qh->qtd_list))
1961 dwc2_hcd_qh_add(hsotg, qh);
1974 missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
1976 missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
1980 qh, sched_next_periodic_split, frame_number, old_frame,
1981 qh->next_active_frame,
1982 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
1985 if (list_empty(&qh->qtd_list)) {
1986 dwc2_hcd_qh_unlink(hsotg, qh);
1997 if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
1998 list_move_tail(&qh->qh_list_entry,
2001 list_move_tail(&qh->qh_list_entry,
2041 * @qh: Queue head to add qtd to
2049 struct dwc2_qh *qh)
2053 if (unlikely(!qh)) {
2059 retval = dwc2_hcd_qh_add(hsotg, qh);
2063 qtd->qh = qh;
2064 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);