Lines Matching refs:musb
33 struct musb *musb, struct musb_ep *musb_ep)
36 struct dma_controller *dma = musb->dma_controller;
59 musb->controller,
65 ret = dma_mapping_error(musb->controller, dma_addr);
72 dma_sync_single_for_device(musb->controller,
84 struct musb *musb)
92 dev_vdbg(musb->controller,
97 dma_unmap_single(musb->controller,
105 dma_sync_single_for_cpu(musb->controller,
126 __releases(ep->musb->lock)
127 __acquires(ep->musb->lock)
130 struct musb *musb;
138 musb = req->musb;
141 spin_unlock(&musb->lock);
143 if (!dma_mapping_error(&musb->g.dev, request->dma))
144 unmap_dma_buffer(req, musb);
148 spin_lock(&musb->lock);
160 struct musb *musb = ep->musb;
162 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
167 struct dma_controller *c = ep->musb->dma_controller;
188 musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
208 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
210 if (can_bulk_split(musb, ep->type))
223 static void txstate(struct musb *musb, struct musb_request *req)
227 void __iomem *epio = musb->endpoints[epnum].regs;
236 musb_dbg(musb, "ep:%s disabled - ignore request",
243 musb_dbg(musb, "dma pending...");
251 fifo_count = min(max_ep_writesize(musb, musb_ep),
255 musb_dbg(musb, "%s old packet still ready , txcsr %03x",
261 musb_dbg(musb, "%s stalling, txcsr %03x",
266 musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
272 struct dma_controller *c = musb->dma_controller;
283 if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
323 can_bulk_split(musb,
333 if (is_cppi_enabled(musb)) {
369 } else if (tusb_dma_omap(musb))
383 unmap_dma_buffer(req, musb);
394 musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
406 void musb_g_tx(struct musb *musb, u8 epnum)
411 u8 __iomem *mbase = musb->mregs;
412 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
413 void __iomem *epio = musb->endpoints[epnum].regs;
421 musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
441 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
450 musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
466 musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
503 musb_dbg(musb, "%s idle now",
509 txstate(musb, req);
518 static void rxstate(struct musb *musb, struct musb_request *req)
523 void __iomem *epio = musb->endpoints[epnum].regs;
527 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
539 musb_dbg(musb, "ep:%s disabled - ignore request",
546 musb_dbg(musb, "DMA pending...");
551 musb_dbg(musb, "%s stalling, RXCSR %04x",
556 if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
557 struct dma_controller *c = musb->dma_controller;
601 if (musb_dma_inventra(musb)) {
607 c = musb->dma_controller;
676 if ((musb_dma_ux500(musb)) &&
683 c = musb->dma_controller;
726 musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
733 if (tusb_dma_omap(musb)) {
734 struct dma_controller *c = musb->dma_controller;
753 unmap_dma_buffer(req, musb);
790 void musb_g_rx(struct musb *musb, u8 epnum)
795 void __iomem *mbase = musb->mregs;
797 void __iomem *epio = musb->endpoints[epnum].regs;
799 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
818 musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
833 musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
839 musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
844 musb_dbg(musb, "%s busy, csr %04x",
903 rxstate(musb, req);
915 struct musb *musb;
928 musb = musb_ep->musb;
929 mbase = musb->mregs;
932 spin_lock_irqsave(&musb->lock, flags);
950 ok = musb->hb_iso_tx;
952 ok = musb->hb_iso_rx;
955 musb_dbg(musb, "no support for high bandwidth ISO");
978 musb_dbg(musb, "packet size beyond hardware FIFO size");
982 musb->intrtxe |= (1 << epnum);
983 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
991 if (can_bulk_split(musb, musb_ep->type))
1017 musb_dbg(musb, "packet size beyond hardware FIFO size");
1021 musb->intrrxe |= (1 << epnum);
1022 musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1054 if (is_dma_capable() && musb->dma_controller) {
1055 struct dma_controller *c = musb->dma_controller;
1074 schedule_delayed_work(&musb->irq_work, 0);
1077 spin_unlock_irqrestore(&musb->lock, flags);
1087 struct musb *musb;
1093 musb = musb_ep->musb;
1095 epio = musb->endpoints[epnum].regs;
1097 spin_lock_irqsave(&musb->lock, flags);
1098 musb_ep_select(musb->mregs, epnum);
1102 musb->intrtxe &= ~(1 << epnum);
1103 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1106 musb->intrrxe &= ~(1 << epnum);
1107 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1117 schedule_delayed_work(&musb->irq_work, 0);
1119 spin_unlock_irqrestore(&(musb->lock), flags);
1121 musb_dbg(musb, "%s", musb_ep->end_point.name);
1171 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1174 musb_ep_select(musb->mregs, req->epnum);
1176 txstate(musb, req);
1178 rxstate(musb, req);
1181 static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1185 musb_ep_restart(musb, req);
1195 struct musb *musb;
1205 musb = musb_ep->musb;
1208 request->musb = musb;
1213 status = pm_runtime_get(musb->controller);
1215 dev_err(musb->controller,
1218 pm_runtime_put_noidle(musb->controller);
1232 map_dma_buffer(request, musb, musb_ep);
1234 spin_lock_irqsave(&musb->lock, lockflags);
1238 musb_dbg(musb, "req %p queued to %s while ep %s",
1241 unmap_dma_buffer(request, musb);
1250 status = musb_queue_resume_work(musb,
1254 dev_err(musb->controller, "%s resume work: %i\n",
1261 spin_unlock_irqrestore(&musb->lock, lockflags);
1262 pm_runtime_mark_last_busy(musb->controller);
1263 pm_runtime_put_autosuspend(musb->controller);
1275 struct musb *musb = musb_ep->musb;
1282 spin_lock_irqsave(&musb->lock, flags);
1289 dev_err(musb->controller, "request %p not queued to %s\n",
1301 struct dma_controller *c = musb->dma_controller;
1303 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1318 spin_unlock_irqrestore(&musb->lock, flags);
1332 struct musb *musb = musb_ep->musb;
1333 void __iomem *epio = musb->endpoints[epnum].regs;
1342 mbase = musb->mregs;
1344 spin_lock_irqsave(&musb->lock, flags);
1356 musb_dbg(musb, "request in progress, cannot halt %s",
1365 musb_dbg(musb, "FIFO busy, cannot halt %s",
1375 musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
1402 musb_dbg(musb, "restarting the request");
1403 musb_ep_restart(musb, request);
1407 spin_unlock_irqrestore(&musb->lock, flags);
1433 struct musb *musb = musb_ep->musb;
1435 void __iomem *mbase = musb->mregs;
1438 spin_lock_irqsave(&musb->lock, flags);
1444 spin_unlock_irqrestore(&musb->lock, flags);
1452 struct musb *musb = musb_ep->musb;
1454 void __iomem *epio = musb->endpoints[epnum].regs;
1459 mbase = musb->mregs;
1461 spin_lock_irqsave(&musb->lock, flags);
1465 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1489 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1490 spin_unlock_irqrestore(&musb->lock, flags);
1510 struct musb *musb = gadget_to_musb(gadget);
1512 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1517 struct musb *musb = gadget_to_musb(gadget);
1518 void __iomem *mregs = musb->mregs;
1524 spin_lock_irqsave(&musb->lock, flags);
1526 switch (musb_get_state(musb)) {
1532 if (musb->may_wakeup && musb->is_suspended)
1538 musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
1555 if (musb->xceiv) {
1556 spin_unlock_irqrestore(&musb->lock, flags);
1557 otg_start_srp(musb->xceiv->otg);
1558 spin_lock_irqsave(&musb->lock, flags);
1562 musb_platform_try_idle(musb,
1568 musb_dbg(musb, "Unhandled wake: %s",
1569 musb_otg_state_string(musb));
1578 musb_dbg(musb, "issue wakeup");
1587 spin_unlock_irqrestore(&musb->lock, flags);
1598 static void musb_pullup(struct musb *musb, int is_on)
1602 power = musb_readb(musb->mregs, MUSB_POWER);
1610 musb_dbg(musb, "gadget D+ pullup %s",
1612 musb_writeb(musb->mregs, MUSB_POWER, power);
1618 musb_dbg(musb, "<= %s =>\n", __func__);
1631 struct musb *musb = gadget_to_musb(gadget);
1633 return usb_phy_set_power(musb->xceiv, mA);
1638 struct musb *musb;
1641 musb = container_of(work, struct musb, gadget_work.work);
1642 pm_runtime_get_sync(musb->controller);
1643 spin_lock_irqsave(&musb->lock, flags);
1644 musb_pullup(musb, musb->softconnect);
1645 spin_unlock_irqrestore(&musb->lock, flags);
1646 pm_runtime_mark_last_busy(musb->controller);
1647 pm_runtime_put_autosuspend(musb->controller);
1652 struct musb *musb = gadget_to_musb(gadget);
1660 spin_lock_irqsave(&musb->lock, flags);
1661 if (is_on != musb->softconnect) {
1662 musb->softconnect = is_on;
1663 schedule_delayed_work(&musb->gadget_work, 0);
1665 spin_unlock_irqrestore(&musb->lock, flags);
1695 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1697 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1702 ep->musb = musb;
1717 musb->g.ep0 = &ep->end_point;
1727 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1743 static inline void musb_g_init_endpoints(struct musb *musb)
1750 INIT_LIST_HEAD(&(musb->g.ep_list));
1752 for (epnum = 0, hw_ep = musb->endpoints;
1753 epnum < musb->nr_endpoints;
1756 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1760 init_peripheral_ep(musb, &hw_ep->ep_in,
1765 init_peripheral_ep(musb, &hw_ep->ep_out,
1776 int musb_gadget_setup(struct musb *musb)
1781 * musb peripherals at the same time, only the bus lock
1785 musb->g.ops = &musb_gadget_operations;
1786 musb->g.max_speed = USB_SPEED_HIGH;
1787 musb->g.speed = USB_SPEED_UNKNOWN;
1789 MUSB_DEV_MODE(musb);
1790 musb_set_state(musb, OTG_STATE_B_IDLE);
1793 musb->g.name = musb_driver_name;
1795 musb->g.is_otg = 0;
1796 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1797 musb_g_init_endpoints(musb);
1799 musb->is_active = 0;
1800 musb_platform_try_idle(musb, 0);
1802 status = usb_add_gadget_udc(musb->controller, &musb->g);
1808 musb->g.dev.parent = NULL;
1809 device_unregister(&musb->g.dev);
1813 void musb_gadget_cleanup(struct musb *musb)
1815 if (musb->port_mode == MUSB_HOST)
1818 cancel_delayed_work_sync(&musb->gadget_work);
1819 usb_del_gadget_udc(&musb->g);
1836 struct musb *musb = gadget_to_musb(g);
1845 pm_runtime_get_sync(musb->controller);
1847 musb->softconnect = 0;
1848 musb->gadget_driver = driver;
1850 spin_lock_irqsave(&musb->lock, flags);
1851 musb->is_active = 1;
1853 if (musb->xceiv)
1854 otg_set_peripheral(musb->xceiv->otg, &musb->g);
1856 phy_set_mode(musb->phy, PHY_MODE_USB_DEVICE);
1858 musb_set_state(musb, OTG_STATE_B_IDLE);
1859 spin_unlock_irqrestore(&musb->lock, flags);
1861 musb_start(musb);
1867 if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID)
1868 musb_platform_set_vbus(musb, 1);
1870 pm_runtime_mark_last_busy(musb->controller);
1871 pm_runtime_put_autosuspend(musb->controller);
1887 struct musb *musb = gadget_to_musb(g);
1890 pm_runtime_get_sync(musb->controller);
1897 spin_lock_irqsave(&musb->lock, flags);
1899 musb_hnp_stop(musb);
1901 (void) musb_gadget_vbus_draw(&musb->g, 0);
1903 musb_set_state(musb, OTG_STATE_UNDEFINED);
1904 musb_stop(musb);
1906 if (musb->xceiv)
1907 otg_set_peripheral(musb->xceiv->otg, NULL);
1909 phy_set_mode(musb->phy, PHY_MODE_INVALID);
1911 musb->is_active = 0;
1912 musb->gadget_driver = NULL;
1913 musb_platform_try_idle(musb, 0);
1914 spin_unlock_irqrestore(&musb->lock, flags);
1923 pm_runtime_mark_last_busy(musb->controller);
1924 pm_runtime_put_autosuspend(musb->controller);
1933 void musb_g_resume(struct musb *musb)
1935 musb->is_suspended = 0;
1936 switch (musb_get_state(musb)) {
1941 musb->is_active = 1;
1942 if (musb->gadget_driver && musb->gadget_driver->resume) {
1943 spin_unlock(&musb->lock);
1944 musb->gadget_driver->resume(&musb->g);
1945 spin_lock(&musb->lock);
1950 musb_otg_state_string(musb));
1955 void musb_g_suspend(struct musb *musb)
1959 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1960 musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
1962 switch (musb_get_state(musb)) {
1965 musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
1968 musb->is_suspended = 1;
1969 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1970 spin_unlock(&musb->lock);
1971 musb->gadget_driver->suspend(&musb->g);
1972 spin_lock(&musb->lock);
1980 musb_otg_state_string(musb));
1985 void musb_g_wakeup(struct musb *musb)
1987 musb_gadget_wakeup(&musb->g);
1991 void musb_g_disconnect(struct musb *musb)
1993 void __iomem *mregs = musb->mregs;
1996 musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
2002 (void) musb_gadget_vbus_draw(&musb->g, 0);
2004 musb->g.speed = USB_SPEED_UNKNOWN;
2005 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2006 spin_unlock(&musb->lock);
2007 musb->gadget_driver->disconnect(&musb->g);
2008 spin_lock(&musb->lock);
2011 switch (musb_get_state(musb)) {
2013 musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
2014 musb_otg_state_string(musb));
2015 musb_set_state(musb, OTG_STATE_A_IDLE);
2016 MUSB_HST_MODE(musb);
2019 musb_set_state(musb, OTG_STATE_A_WAIT_BCON);
2020 MUSB_HST_MODE(musb);
2026 musb_set_state(musb, OTG_STATE_B_IDLE);
2032 musb->is_active = 0;
2035 void musb_g_reset(struct musb *musb)
2036 __releases(musb->lock)
2037 __acquires(musb->lock)
2039 void __iomem *mbase = musb->mregs;
2043 musb_dbg(musb, "<== %s driver '%s'",
2046 musb->gadget_driver
2047 ? musb->gadget_driver->driver.name
2052 if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2053 spin_unlock(&musb->lock);
2054 usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2055 spin_lock(&musb->lock);
2065 musb->g.speed = (power & MUSB_POWER_HSMODE)
2069 musb->is_active = 1;
2070 musb->is_suspended = 0;
2071 MUSB_DEV_MODE(musb);
2072 musb->address = 0;
2073 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2075 musb->may_wakeup = 0;
2076 musb->g.b_hnp_enable = 0;
2077 musb->g.a_alt_hnp_support = 0;
2078 musb->g.a_hnp_support = 0;
2079 musb->g.quirk_zlp_not_supp = 1;
2084 if (!musb->g.is_otg) {
2090 musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
2091 musb->g.is_a_peripheral = 0;
2093 musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
2094 musb->g.is_a_peripheral = 0;
2096 musb_set_state(musb, OTG_STATE_A_PERIPHERAL);
2097 musb->g.is_a_peripheral = 1;
2101 (void) musb_gadget_vbus_draw(&musb->g, 8);