Lines Matching defs:hwep

354 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
365 node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
374 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
375 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
378 || hwreq->req.length % hwep->ep.maxpacket)
423 static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
431 ret = add_td_to_list(hwep, hwreq, 0, NULL);
447 ret = add_td_to_list(hwep, hwreq, count, NULL);
454 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
455 && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
456 ret = add_td_to_list(hwep, hwreq, 0, NULL);
464 static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
475 ret = add_td_to_list(hwep, hwreq, count, s);
504 static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
512 dev_err(hwep->ci->dev, "not supported operation for sg\n");
518 dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
526 ret = prepare_td_per_sg(hwep, hwreq, s);
542 * @hwep: endpoint
547 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
549 struct ci_hdrc *ci = hwep->ci;
560 &hwreq->req, hwep->dir);
565 ret = prepare_td_for_sg(hwep, hwreq);
567 ret = prepare_td_for_non_sg(hwep, hwreq);
583 if (!list_empty(&hwep->qh.queue)) {
585 int n = hw_ep_bit(hwep->num, hwep->dir);
590 hwreqprev = list_entry(hwep->qh.queue.prev,
609 hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
610 hwep->qh.ptr->td.token &=
613 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
614 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
617 || hwreq->req.length % hwep->ep.maxpacket)
619 hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
622 ret = hw_ep_prime(ci, hwep->num, hwep->dir,
623 hwep->type == USB_ENDPOINT_XFER_CONTROL);
630 * @hwep: endpoint
632 static void free_pending_td(struct ci_hw_ep *hwep)
634 struct td_node *pending = hwep->pending_td;
636 dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
637 hwep->pending_td = NULL;
641 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
644 hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
645 hwep->qh.ptr->td.token &=
648 return hw_ep_prime(ci, hwep->num, hwep->dir,
649 hwep->type == USB_ENDPOINT_XFER_CONTROL);
654 * @hwep: endpoint
659 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
665 struct ci_hdrc *ci = hwep->ci;
675 int n = hw_ep_bit(hwep->num, hwep->dir);
679 reprime_dtd(ci, hwep, node);
701 if (hwep->dir == TX) {
711 if (hwep->pending_td)
712 free_pending_td(hwep);
714 hwep->pending_td = node;
718 usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
719 &hwreq->req, hwep->dir);
731 * @hwep: endpoint
736 static int _ep_nuke(struct ci_hw_ep *hwep)
737 __releases(hwep->lock)
738 __acquires(hwep->lock)
741 if (hwep == NULL)
744 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
746 while (!list_empty(&hwep->qh.queue)) {
749 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
753 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
763 spin_unlock(hwep->lock);
764 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
765 spin_lock(hwep->lock);
769 if (hwep->pending_td)
770 free_pending_td(hwep);
777 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
781 if (ep == NULL || hwep->ep.desc == NULL)
784 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
787 spin_lock_irqsave(hwep->lock, flags);
789 if (value && hwep->dir == TX && check_transfer &&
790 !list_empty(&hwep->qh.queue) &&
791 !usb_endpoint_xfer_control(hwep->ep.desc)) {
792 spin_unlock_irqrestore(hwep->lock, flags);
796 direction = hwep->dir;
798 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
801 hwep->wedge = 0;
803 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
804 hwep->dir = (hwep->dir == TX) ? RX : TX;
806 } while (hwep->dir != direction);
808 spin_unlock_irqrestore(hwep->lock, flags);
917 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
919 struct ci_hdrc *ci = hwep->ci;
922 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
925 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
927 hwep = (ci->ep0_dir == RX) ?
929 if (!list_empty(&hwep->qh.queue)) {
930 _ep_nuke(hwep);
931 dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
932 _usb_addr(hwep));
936 if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
937 hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
938 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
944 dev_err(hwep->ci->dev, "request already in queue\n");
952 retval = _hardware_enqueue(hwep, hwreq);
957 list_add_tail(&hwreq->queue, &hwep->qh.queue);
971 __releases(hwep->lock)
972 __acquires(hwep->lock)
974 struct ci_hw_ep *hwep = ci->ep0in;
979 if (hwep == NULL || setup == NULL)
982 spin_unlock(hwep->lock);
983 req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
984 spin_lock(hwep->lock);
1008 retval = _ep_queue(&hwep->ep, req, gfp_flags);
1017 spin_unlock(hwep->lock);
1018 usb_ep_free_request(&hwep->ep, req);
1019 spin_lock(hwep->lock);
1061 struct ci_hw_ep *hwep;
1072 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
1076 return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
1081 * @hwep: endpoint
1086 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
1087 __releases(hwep->lock)
1088 __acquires(hwep->lock)
1091 struct ci_hw_ep *hweptemp = hwep;
1094 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
1096 retval = _hardware_dequeue(hwep, hwreq);
1101 spin_unlock(hwep->lock);
1102 if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
1104 hweptemp = hwep->ci->ep0in;
1106 spin_lock(hwep->lock);
1133 struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1148 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1287 if (_ep_set_halt(&hwep->ep, 1, false))
1307 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1309 if (hwep->ep.desc == NULL)
1313 err = isr_tr_complete_low(hwep);
1314 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1319 if (_ep_set_halt(&hwep->ep, 1, false))
1345 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1353 spin_lock_irqsave(hwep->lock, flags);
1357 if (!list_empty(&hwep->qh.queue)) {
1358 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1359 spin_unlock_irqrestore(hwep->lock, flags);
1363 hwep->ep.desc = desc;
1365 hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1366 hwep->num = usb_endpoint_num(desc);
1367 hwep->type = usb_endpoint_type(desc);
1369 hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1370 hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1372 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1376 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1381 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1384 hwep->qh.ptr->cap = cpu_to_le32(cap);
1386 hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
1388 if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1389 dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1397 if (hwep->num)
1398 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1399 hwep->type);
1401 spin_unlock_irqrestore(hwep->lock, flags);
1412 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1418 else if (hwep->ep.desc == NULL)
1421 spin_lock_irqsave(hwep->lock, flags);
1422 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1423 spin_unlock_irqrestore(hwep->lock, flags);
1429 direction = hwep->dir;
1431 retval |= _ep_nuke(hwep);
1432 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1434 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1435 hwep->dir = (hwep->dir == TX) ? RX : TX;
1437 } while (hwep->dir != direction);
1439 hwep->ep.desc = NULL;
1441 spin_unlock_irqrestore(hwep->lock, flags);
1473 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1481 dev_err(hwep->ci->dev, "freeing queued request\n");
1485 spin_lock_irqsave(hwep->lock, flags);
1488 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1496 spin_unlock_irqrestore(hwep->lock, flags);
1507 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1511 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1514 spin_lock_irqsave(hwep->lock, flags);
1515 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1516 spin_unlock_irqrestore(hwep->lock, flags);
1520 spin_unlock_irqrestore(hwep->lock, flags);
1531 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1537 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1538 list_empty(&hwep->qh.queue))
1541 spin_lock_irqsave(hwep->lock, flags);
1542 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1543 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1546 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1554 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1559 spin_unlock(hwep->lock);
1560 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1561 spin_lock(hwep->lock);
1564 spin_unlock_irqrestore(hwep->lock, flags);
1585 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1588 if (ep == NULL || hwep->ep.desc == NULL)
1591 spin_lock_irqsave(hwep->lock, flags);
1592 hwep->wedge = 1;
1593 spin_unlock_irqrestore(hwep->lock, flags);
1605 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1609 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1613 spin_lock_irqsave(hwep->lock, flags);
1614 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1615 spin_unlock_irqrestore(hwep->lock, flags);
1619 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1621 spin_unlock_irqrestore(hwep->lock, flags);
1737 struct ci_hw_ep *hwep = ci->ep0in;
1740 spin_lock_irqsave(hwep->lock, flags);
1742 spin_unlock_irqrestore(hwep->lock, flags);
1816 struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1818 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1821 hwep->ci = ci;
1822 hwep->lock = &ci->lock;
1823 hwep->td_pool = ci->td_pool;
1825 hwep->ep.name = hwep->name;
1826 hwep->ep.ops = &usb_ep_ops;
1829 hwep->ep.caps.type_control = true;
1831 hwep->ep.caps.type_iso = true;
1832 hwep->ep.caps.type_bulk = true;
1833 hwep->ep.caps.type_int = true;
1837 hwep->ep.caps.dir_in = true;
1839 hwep->ep.caps.dir_out = true;
1846 usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1848 INIT_LIST_HEAD(&hwep->qh.queue);
1849 hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1850 &hwep->qh.dma);
1851 if (hwep->qh.ptr == NULL)
1860 ci->ep0out = hwep;
1862 ci->ep0in = hwep;
1864 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1868 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1879 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1881 if (hwep->pending_td)
1882 free_pending_td(hwep);
1883 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);