Lines Matching refs:qh

176 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
179 ep->in_qh = qh;
181 ep->out_qh = qh;
196 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
200 struct urb *urb = next_urb(qh);
203 struct musb_hw_ep *hw_ep = qh->hw_ep;
206 /* initialize software qh state */
207 qh->offset = 0;
208 qh->segsize = 0;
211 switch (qh->type) {
220 qh->iso_idx = 0;
221 qh->frame = 0;
234 musb_ep_set_qh(hw_ep, is_in, qh);
242 switch (qh->type) {
253 qh->frame = 0;
256 qh->frame = urb->start_frame;
291 * advancing to either the next URB queued to that qh, or else invalidating
292 * that qh and advancing to the next qh scheduled after the current one.
299 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
300 struct musb_hw_ep *ep = qh->hw_ep;
301 int ready = qh->is_ready;
308 switch (qh->type) {
311 toggle = musb->io.get_toggle(qh, !is_in);
312 usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
320 qh->is_ready = 0;
322 qh->is_ready = ready;
325 * musb->lock had been unlocked in musb_giveback, so qh may
328 qh = musb_ep_get_qh(hw_ep, is_in);
331 * invalidate qh as soon as list_empty(&hep->urb_list)
333 if (qh && list_empty(&qh->hep->urb_list)) {
351 /* Clobber old pointers to this qh */
353 qh->hep->hcpriv = NULL;
355 switch (qh->type) {
360 * should rotate a qh to the end (for fairness).
362 if (qh->mux == 1) {
363 head = qh->ring.prev;
364 list_del(&qh->ring);
365 kfree(qh);
366 qh = first_qh(head);
377 kfree(qh);
378 qh = NULL;
383 if (qh != NULL && qh->is_ready) {
385 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
386 musb_start_urb(musb, is_in, qh);
423 struct musb_qh *qh = hw_ep->in_qh;
430 urb->transfer_buffer, qh->offset,
443 d = urb->iso_frame_desc + qh->iso_idx;
461 done = (++qh->iso_idx >= urb->number_of_packets);
464 buf = buffer + qh->offset;
465 length = urb->transfer_buffer_length - qh->offset;
474 qh->offset += length;
478 || (rx_count < qh->maxpacket)
514 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
553 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
554 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
555 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
557 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
560 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
561 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
567 qh->maxpacket | ((qh->hb_mult - 1) << 11));
573 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
579 u16 pkt_size = qh->maxpacket;
599 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
600 can_bulk_split(hw_ep->musb, qh->type)))
613 struct musb_qh *qh,
631 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
635 u16 pkt_size = qh->maxpacket;
639 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
642 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
647 qh->segsize = length;
685 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
686 u16 packet_sz = qh->maxpacket;
694 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
695 qh->h_addr_reg, qh->h_port_reg,
764 csr |= musb->io.set_toggle(qh, is_out, urb);
778 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
779 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
780 musb_write_txhubport(musb, epnum, qh->h_port_reg);
783 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
787 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
788 if (can_bulk_split(musb, qh->type)) {
789 qh->hb_mult = hw_ep->max_packet_sz_tx
792 | ((qh->hb_mult) - 1) << 11);
795 qh->maxpacket |
796 ((qh->hb_mult - 1) << 11));
798 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
800 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
803 qh->type_reg);
806 if (can_bulk_split(musb, qh->type))
813 hw_ep, qh, urb, offset, len))
818 qh->segsize = load_count;
820 sg_miter_start(&qh->sg_miter, urb->sg, 1,
823 if (!sg_miter_next(&qh->sg_miter)) {
827 sg_miter_stop(&qh->sg_miter);
830 buf = qh->sg_miter.addr + urb->sg->offset +
833 qh->sg_miter.length);
835 qh->sg_miter.consumed = load_count;
836 sg_miter_stop(&qh->sg_miter);
849 musb_rx_reinit(musb, qh, epnum);
850 csr |= musb->io.set_toggle(qh, is_out, urb);
852 if (qh->type == USB_ENDPOINT_XFER_INT)
873 qh->segsize = len;
887 qh->segsize);
902 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
959 /* get the next qh from musb->in_bulk */
962 /* set rx_reinit and schedule the next qh */
968 /* get the next qh from musb->out_bulk */
971 /* set tx_reinit and schedule the next qh */
990 struct musb_qh *qh = hw_ep->in_qh;
1004 if (len < qh->maxpacket) {
1030 fifo_count = min_t(size_t, qh->maxpacket,
1068 struct musb_qh *qh = hw_ep->in_qh;
1073 urb = next_urb(qh);
1081 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1082 csr, qh, len, urb, musb->ep0_stage);
1107 * if (qh->ring.next != &musb->control), then
1210 struct musb_qh *qh = hw_ep->out_qh;
1211 struct urb *urb = next_urb(qh);
1247 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1258 * if (bulk && qh->ring.next != &musb->out_bulk), then
1368 length = qh->segsize;
1369 qh->offset += length;
1374 d = urb->iso_frame_desc + qh->iso_idx;
1377 if (++qh->iso_idx >= urb->number_of_packets) {
1384 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1388 if (qh->segsize < qh->maxpacket)
1390 else if (qh->offset == urb->transfer_buffer_length
1395 offset = qh->offset;
1414 urb->actual_length = qh->offset;
1418 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1436 if (length > qh->maxpacket)
1437 length = qh->maxpacket;
1447 if (!sg_miter_next(&qh->sg_miter)) {
1449 sg_miter_stop(&qh->sg_miter);
1453 length = min_t(u32, length, qh->sg_miter.length);
1454 musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1455 qh->sg_miter.consumed = length;
1456 sg_miter_stop(&qh->sg_miter);
1461 qh->segsize = length;
1472 struct musb_qh *qh,
1482 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1485 length = urb->iso_frame_desc[qh->iso_idx].length;
1491 return dma->channel_program(channel, qh->maxpacket, 0,
1497 struct musb_qh *qh,
1543 struct musb_qh *qh,
1558 d = urb->iso_frame_desc + qh->iso_idx;
1567 if (++qh->iso_idx >= urb->number_of_packets) {
1572 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1581 || channel->actual_len < qh->maxpacket
1613 struct musb_qh *qh,
1632 d = urb->iso_frame_desc + qh->iso_idx;
1663 > qh->maxpacket)
1684 if (qh->hb_mult == 1)
1693 done = dma->channel_program(channel, qh->maxpacket,
1713 struct musb_qh *qh,
1722 struct musb_qh *qh,
1741 struct musb_qh *qh = hw_ep->in_qh;
1753 urb = next_urb(qh);
1799 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1811 && qh->mux == 1
1893 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1927 qh->offset,
1930 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1948 qh->use_sg = true;
1949 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1953 if (qh->use_sg) {
1954 if (!sg_miter_next(&qh->sg_miter)) {
1956 sg_miter_stop(&qh->sg_miter);
1961 urb->transfer_buffer = qh->sg_miter.addr;
1963 qh->offset = 0x0;
1969 qh->sg_miter.consumed = received_len;
1970 sg_miter_stop(&qh->sg_miter);
1981 qh->offset += xfer_len;
1983 if (qh->use_sg) {
1984 qh->use_sg = false;
2001 struct musb_qh *qh,
2011 struct urb *urb = next_urb(qh);
2014 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2023 * We know this qh hasn't been scheduled, so all we need to do
2047 diff -= (qh->maxpacket * qh->hb_mult);
2064 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2067 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2076 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2090 if (qh->dev)
2091 qh->intv_reg =
2092 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2097 musb_ep_xfertype_string(qh->type),
2098 qh->hb_mult, qh->maxpacket);
2103 qh->mux = 0;
2105 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2109 list_add_tail(&qh->ring, head);
2110 qh->mux = 1;
2112 qh->hw_ep = hw_ep;
2113 qh->hep->hcpriv = qh;
2115 musb_start_urb(musb, is_in, qh);
2127 struct musb_qh *qh;
2141 qh = ret ? NULL : hep->hcpriv;
2142 if (qh)
2143 urb->hcpriv = qh;
2148 * scheduled onto a live qh.
2151 * disabled, testing for empty qh->ring and avoiding qh setup costs
2154 if (qh || ret)
2157 /* Allocate and initialize qh, minimizing the work done each time
2160 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2163 qh = kzalloc(sizeof *qh, mem_flags);
2164 if (!qh) {
2171 qh->hep = hep;
2172 qh->dev = urb->dev;
2173 INIT_LIST_HEAD(&qh->ring);
2174 qh->is_ready = 1;
2176 qh->maxpacket = usb_endpoint_maxp(epd);
2177 qh->type = usb_endpoint_type(epd);
2183 qh->hb_mult = usb_endpoint_maxp_mult(epd);
2184 if (qh->hb_mult > 1) {
2185 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2193 musb_ep_xfertype_string(qh->type),
2194 qh->hb_mult, qh->maxpacket & 0x7ff);
2198 qh->maxpacket &= 0x7ff;
2201 qh->epnum = usb_endpoint_num(epd);
2204 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2207 type_reg = (qh->type << 4) | qh->epnum;
2218 qh->type_reg = type_reg;
2221 switch (qh->type) {
2238 * transfer scheduling logic to try some other qh, e.g. try
2253 qh->intv_reg = interval;
2260 qh->h_addr_reg = (u8) parent->devnum;
2264 qh->h_port_reg = (u8) urb->dev->ttport;
2266 qh->h_addr_reg =
2269 qh->h_addr_reg |= 0x80;
2274 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2279 if (hep->hcpriv || !next_urb(qh)) {
2283 kfree(qh);
2284 qh = NULL;
2287 ret = musb_schedule(musb, qh,
2291 urb->hcpriv = qh;
2303 kfree(qh);
2314 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2316 struct musb_hw_ep *ep = qh->hw_ep;
2372 struct musb_qh *qh;
2384 qh = urb->hcpriv;
2385 if (!qh)
2398 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2400 if (!qh->is_ready
2401 || urb->urb_list.prev != &qh->hep->urb_list
2402 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2403 int ready = qh->is_ready;
2405 qh->is_ready = 0;
2407 qh->is_ready = ready;
2410 * and its URB list has emptied, recycle this qh.
2412 if (ready && list_empty(&qh->hep->urb_list)) {
2413 musb_ep_set_qh(qh->hw_ep, is_in, NULL);
2414 qh->hep->hcpriv = NULL;
2415 list_del(&qh->ring);
2416 kfree(qh);
2419 ret = musb_cleanup_urb(urb, qh);
2432 struct musb_qh *qh;
2437 qh = hep->hcpriv;
2438 if (qh == NULL)
2441 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2444 qh->is_ready = 0;
2445 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2446 urb = next_urb(qh);
2453 musb_cleanup_urb(urb, qh);
2459 urb = next_urb(qh);
2461 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2465 * other transfers, and since !qh->is_ready nothing
2469 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2472 list_del(&qh->ring);
2473 kfree(qh);