Lines Matching refs:qh
102 * @qh: QH containing periodic bandwidth required
110 struct dwc2_qh *qh)
117 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
122 max_claimed_usecs = 100 - qh->host_us;
128 max_claimed_usecs = 900 - qh->host_us;
134 __func__, hsotg->periodic_usecs, qh->host_us);
362 * dwc2_get_ls_map() - Get the map used for the given qh
365 * @qh: QH for the periodic transfer.
370 * add logic here to get a map out of "hsotg" if !qh->do_split.
375 struct dwc2_qh *qh)
380 if (WARN_ON(!qh->dwc_tt))
384 map = qh->dwc_tt->periodic_bitmaps;
385 if (qh->dwc_tt->usb_tt->multi)
386 map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
493 struct dwc2_qh *qh;
506 dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
513 * @qh: QH to print.
516 struct dwc2_qh *qh)
518 struct dwc2_qh_print_data print_data = { hsotg, qh };
527 if (qh->schedule_low_speed) {
528 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
531 qh, qh->device_us,
532 DWC2_ROUND_US_TO_SLICE(qh->device_us),
533 DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
538 qh, map);
545 for (i = 0; i < qh->num_hs_transfers; i++) {
546 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
554 qh, i, trans_time->duration_us, uframe, rel_us);
556 if (qh->num_hs_transfers) {
557 dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
566 struct dwc2_qh *qh) {};
573 * @qh: QH for the periodic transfer.
584 static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
587 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
588 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
608 qh->device_interval, search_slice, false);
613 qh->ls_start_schedule_slice = slice;
621 * @qh: QH for the periodic transfer.
624 struct dwc2_qh *qh)
626 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
627 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
634 DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
635 qh->ls_start_schedule_slice);
643 * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
648 * @qh: QH for the periodic transfer.
653 * @index: The index into qh->hs_transfers that we're working with.
658 static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
661 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
667 qh->host_interval, trans_time->start_schedule_us,
681 * @qh: QH for the periodic transfer.
685 struct dwc2_qh *qh, int index)
687 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
692 qh->host_interval, trans_time->start_schedule_us);
706 * @qh: QH for the periodic transfer.
709 struct dwc2_qh *qh)
711 int bytecount = qh->maxp_mult * qh->maxp;
720 host_interval_in_sched = gcd(qh->host_interval,
748 if (qh->schedule_low_speed) {
749 err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
768 start_s_uframe = qh->ls_start_schedule_slice /
783 if (qh->schedule_low_speed)
784 dwc2_ls_pmap_unschedule(hsotg, qh);
786 (qh->ls_start_schedule_slice /
826 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
833 DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
855 if (!qh->ep_is_in &&
859 qh, first_data_bytes, bytecount);
860 if (qh->schedule_low_speed)
861 dwc2_ls_pmap_unschedule(hsotg, qh);
868 qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
875 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
877 qh->num_hs_transfers += 2;
879 qh->num_hs_transfers += 3;
881 if (qh->ep_is_in) {
900 if (qh->ep_is_in) {
904 qh->num_hs_transfers++;
907 last = rel_uframe + qh->num_hs_transfers + 1;
911 qh->num_hs_transfers += 2;
913 qh->num_hs_transfers += 1;
917 qh->num_hs_transfers--;
933 qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
934 for (i = 1; i < qh->num_hs_transfers - 1; i++)
935 qh->hs_transfers[i].duration_us =
937 if (qh->num_hs_transfers > 1)
938 qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
946 qh->hs_transfers[0].start_schedule_us =
948 for (i = 1; i < qh->num_hs_transfers; i++)
949 qh->hs_transfers[i].start_schedule_us =
955 for (i = 0; i < qh->num_hs_transfers; i++) {
956 err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
962 if (i == qh->num_hs_transfers)
966 dwc2_hs_pmap_unschedule(hsotg, qh, i);
968 if (qh->schedule_low_speed)
969 dwc2_ls_pmap_unschedule(hsotg, qh);
988 * @qh: QH for the periodic transfer.
990 static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
993 WARN_ON(qh->host_us != qh->device_us);
994 WARN_ON(qh->host_interval != qh->device_interval);
995 WARN_ON(qh->num_hs_transfers != 1);
998 qh->hs_transfers[0].start_schedule_us = 0;
999 qh->hs_transfers[0].duration_us = qh->host_us;
1001 return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
1011 * @qh: QH for the periodic transfer.
1013 static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1016 WARN_ON(qh->host_us != qh->device_us);
1017 WARN_ON(qh->host_interval != qh->device_interval);
1018 WARN_ON(!qh->schedule_low_speed);
1021 return dwc2_ls_pmap_schedule(hsotg, qh, 0);
1031 * @qh: QH for the periodic transfer.
1033 static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1037 if (qh->dev_speed == USB_SPEED_HIGH)
1038 ret = dwc2_uframe_schedule_hs(hsotg, qh);
1039 else if (!qh->do_split)
1040 ret = dwc2_uframe_schedule_ls(hsotg, qh);
1042 ret = dwc2_uframe_schedule_split(hsotg, qh);
1045 dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
1047 dwc2_qh_schedule_print(hsotg, qh);
1056 * @qh: QH for the periodic transfer.
1058 static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1062 for (i = 0; i < qh->num_hs_transfers; i++)
1063 dwc2_hs_pmap_unschedule(hsotg, qh, i);
1065 if (qh->schedule_low_speed)
1066 dwc2_ls_pmap_unschedule(hsotg, qh);
1068 dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
1072 * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
1074 * Takes a qh that has already been scheduled (which means we know we have the
1078 * This is expected to be called on qh's that weren't previously actively
1083 * @qh: QH for a periodic endpoint
1086 static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1113 if (qh->do_split)
1119 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
1126 WARN_ON(qh->num_hs_transfers < 1);
1128 relative_frame = qh->hs_transfers[0].start_schedule_us /
1132 interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
1142 relative_frame = qh->ls_start_schedule_slice /
1144 interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
1183 qh->next_active_frame = next_active_frame;
1184 qh->start_active_frame = next_active_frame;
1187 qh, frame_number, qh->next_active_frame);
1197 * @qh: QH for the periodic transfer.
1201 static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1206 status = dwc2_uframe_schedule(hsotg, qh);
1216 status = dwc2_check_periodic_bandwidth(hsotg, qh);
1231 hsotg->periodic_usecs += qh->host_us;
1233 dwc2_pick_first_frame(hsotg, qh);
1242 * by the given qh.
1245 * @qh: QH for the periodic transfer.
1247 static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1251 WARN_ON(!qh->unreserve_pending);
1254 qh->unreserve_pending = false;
1256 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
1257 list_del_init(&qh->qh_list_entry);
1260 hsotg->periodic_usecs -= qh->host_us;
1263 dwc2_uframe_unschedule(hsotg, qh);
1280 * @t: Address to a qh unreserve_work.
1284 struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
1285 struct dwc2_hsotg *hsotg = qh->hsotg;
1298 if (timer_pending(&qh->unreserve_timer))
1312 if (qh->unreserve_pending)
1313 dwc2_do_unreserve(hsotg, qh);
1324 * @qh: QH for a periodic endpoint
1329 struct dwc2_qh *qh)
1335 max_xfer_size = qh->maxp * qh->maxp_mult;
1353 * @qh: QH for the periodic transfer. The QH should already contain the
1358 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1362 status = dwc2_check_max_xfer_size(hsotg, qh);
1371 if (del_timer(&qh->unreserve_timer))
1372 WARN_ON(!qh->unreserve_pending);
1381 if (!qh->unreserve_pending) {
1382 status = dwc2_do_reserve(hsotg, qh);
1392 if (dwc2_frame_num_le(qh->next_active_frame,
1394 dwc2_pick_first_frame(hsotg, qh);
1397 qh->unreserve_pending = 0;
1401 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
1404 list_add_tail(&qh->qh_list_entry,
1415 * @qh: QH for the periodic transfer
1418 struct dwc2_qh *qh)
1439 did_modify = mod_timer(&qh->unreserve_timer,
1442 qh->unreserve_pending = 1;
1444 list_del_init(&qh->qh_list_entry);
1464 * qh back to the "inactive" list, then queues transactions.
1466 * @t: Pointer to wait_timer in a qh.
1472 struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
1473 struct dwc2_hsotg *hsotg = qh->hsotg;
1482 if (!qh->wait_timer_cancel) {
1485 qh->want_wait = false;
1487 list_move(&qh->qh_list_entry,
1503 * @qh: The QH to init
1508 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1526 qh->hsotg = hsotg;
1527 timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
1528 hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1529 qh->wait_timer.function = &dwc2_wait_timer_fn;
1530 qh->ep_type = ep_type;
1531 qh->ep_is_in = ep_is_in;
1533 qh->data_toggle = DWC2_HC_PID_DATA0;
1534 qh->maxp = maxp;
1535 qh->maxp_mult = maxp_mult;
1536 INIT_LIST_HEAD(&qh->qtd_list);
1537 INIT_LIST_HEAD(&qh->qh_list_entry);
1539 qh->do_split = do_split;
1540 qh->dev_speed = dev_speed;
1547 &qh->ttport);
1550 qh->dwc_tt = dwc_tt;
1552 qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
1559 qh->device_us = NS_TO_US(device_ns);
1561 qh->device_interval = urb->interval;
1562 qh->host_interval = urb->interval * (do_split ? 8 : 1);
1569 qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
1574 qh->num_hs_transfers = -1;
1576 qh->num_hs_transfers = 1;
1578 qh->num_hs_transfers = 0;
1599 switch (qh->ep_type) {
1617 dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
1619 dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
1626 qh, qh->host_us, qh->device_us);
1628 qh, qh->host_interval, qh->device_interval);
1629 if (qh->schedule_low_speed)
1631 qh, dwc2_get_ls_map(hsotg, qh));
1649 struct dwc2_qh *qh;
1655 qh = kzalloc(sizeof(*qh), mem_flags);
1656 if (!qh)
1659 dwc2_qh_init(hsotg, qh, urb, mem_flags);
1662 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
1663 dwc2_hcd_qh_free(hsotg, qh);
1667 return qh;
1674 * @qh: The QH to free
1681 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1684 if (del_timer_sync(&qh->unreserve_timer)) {
1688 dwc2_do_unreserve(hsotg, qh);
1699 hrtimer_cancel(&qh->wait_timer);
1701 dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
1703 if (qh->desc_list)
1704 dwc2_hcd_qh_free_ddma(hsotg, qh);
1705 else if (hsotg->unaligned_cache && qh->dw_align_buf)
1706 kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
1708 kfree(qh);
1717 * @qh: The QH to add
1721 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1727 if (dbg_qh(qh))
1730 if (!list_empty(&qh->qh_list_entry))
1735 if (dwc2_qh_is_non_per(qh)) {
1737 qh->start_active_frame = hsotg->frame_number;
1738 qh->next_active_frame = qh->start_active_frame;
1740 if (qh->want_wait) {
1741 list_add_tail(&qh->qh_list_entry,
1743 qh->wait_timer_cancel = false;
1745 hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
1747 list_add_tail(&qh->qh_list_entry,
1753 status = dwc2_schedule_periodic(hsotg, qh);
1771 * @qh: QH to remove from schedule
1773 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1780 qh->wait_timer_cancel = true;
1782 if (list_empty(&qh->qh_list_entry))
1786 if (dwc2_qh_is_non_per(qh)) {
1787 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
1790 list_del_init(&qh->qh_list_entry);
1794 dwc2_deschedule_periodic(hsotg, qh);
1818 * @qh: QH for the periodic transfer.
1824 struct dwc2_qh *qh, u16 frame_number)
1826 u16 old_frame = qh->next_active_frame;
1837 if (old_frame == qh->start_active_frame &&
1838 !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
1843 qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
1853 if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
1859 qh->next_active_frame);
1860 qh->next_active_frame = frame_number;
1881 * @qh: QH for the periodic transfer.
1887 struct dwc2_qh *qh, u16 frame_number)
1890 u16 interval = qh->host_interval;
1893 qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
1933 if (qh->start_active_frame == qh->next_active_frame ||
1934 dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
1935 u16 ideal_start = qh->start_active_frame;
1942 if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
1949 qh->start_active_frame = dwc2_frame_num_inc(
1950 qh->start_active_frame, interval);
1952 qh->start_active_frame));
1954 missed = dwc2_frame_num_dec(qh->start_active_frame,
1959 qh->next_active_frame = qh->start_active_frame;
1977 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1980 u16 old_frame = qh->next_active_frame;
1984 if (dbg_qh(qh))
1987 if (dwc2_qh_is_non_per(qh)) {
1988 dwc2_hcd_qh_unlink(hsotg, qh);
1989 if (!list_empty(&qh->qtd_list))
1991 dwc2_hcd_qh_add(hsotg, qh);
2004 missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
2006 missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
2010 qh, sched_next_periodic_split, frame_number, old_frame,
2011 qh->next_active_frame,
2012 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
2015 if (list_empty(&qh->qtd_list)) {
2016 dwc2_hcd_qh_unlink(hsotg, qh);
2027 if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
2028 list_move_tail(&qh->qh_list_entry,
2031 list_move_tail(&qh->qh_list_entry,
2071 * @qh: Queue head to add qtd to
2079 struct dwc2_qh *qh)
2083 if (unlikely(!qh)) {
2089 retval = dwc2_hcd_qh_add(hsotg, qh);
2093 qtd->qh = qh;
2094 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);