Lines Matching defs:oxu

334 	struct oxu_hcd		*oxu;
482 #define oxu_dbg(oxu, fmt, args...) \
483 dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
484 #define oxu_err(oxu, fmt, args...) \
485 dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
486 #define oxu_info(oxu, fmt, args...) \
487 dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
493 static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
495 return container_of((void *) oxu, struct usb_hcd, hcd_priv);
513 #define oxu_vdbg(oxu, fmt, args...) /* Nop */
632 #define dbg_status(oxu, label, status) { \
635 oxu_dbg(oxu, "%s\n", _buf); \
638 #define dbg_cmd(oxu, label, command) { \
641 oxu_dbg(oxu, "%s\n", _buf); \
644 #define dbg_port(oxu, label, port, status) { \
647 oxu_dbg(oxu, "%s\n", _buf); \
670 static void ehci_work(struct oxu_hcd *oxu);
690 static inline void timer_action_done(struct oxu_hcd *oxu,
693 clear_bit(action, &oxu->actions);
696 static inline void timer_action(struct oxu_hcd *oxu,
699 if (!test_and_set_bit(action, &oxu->actions)) {
724 && t > oxu->watchdog.expires
725 && timer_pending(&oxu->watchdog))
727 mod_timer(&oxu->watchdog, t);
748 static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
765 static int ehci_halt(struct oxu_hcd *oxu)
767 u32 temp = readl(&oxu->regs->status);
770 writel(0, &oxu->regs->intr_enable);
775 temp = readl(&oxu->regs->command);
777 writel(temp, &oxu->regs->command);
778 return handshake(oxu, &oxu->regs->status,
783 static void tdi_reset(struct oxu_hcd *oxu)
788 reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
795 static int ehci_reset(struct oxu_hcd *oxu)
798 u32 command = readl(&oxu->regs->command);
801 dbg_cmd(oxu, "reset", command);
802 writel(command, &oxu->regs->command);
803 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
804 oxu->next_statechange = jiffies;
805 retval = handshake(oxu, &oxu->regs->command,
811 tdi_reset(oxu);
817 static void ehci_quiesce(struct oxu_hcd *oxu)
822 BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state));
826 temp = readl(&oxu->regs->command) << 10;
828 if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
830 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
835 temp = readl(&oxu->regs->command);
837 writel(temp, &oxu->regs->command);
840 if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
842 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
847 static int check_reset_complete(struct oxu_hcd *oxu, int index,
851 oxu->reset_done[index] = 0;
857 oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
861 oxu_dbg(oxu, "port %d high speed\n", index + 1);
866 static void ehci_hub_descriptor(struct oxu_hcd *oxu,
869 int ports = HCS_N_PORTS(oxu->hcs_params);
873 desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
885 if (HCS_PPC(oxu->hcs_params))
907 static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
915 oxu_err(oxu, "buffer too big (%d)\n", len);
919 spin_lock(&oxu->mem_lock);
930 i += max(a_blocks, (int)oxu->db_used[i])) {
934 if (oxu->db_used[i + j])
941 qtd->buffer = (void *) &oxu->mem->db_pool[i];
945 oxu->db_used[i] = a_blocks;
947 spin_unlock(&oxu->mem_lock);
954 spin_unlock(&oxu->mem_lock);
959 static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
963 spin_lock(&oxu->mem_lock);
965 index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
967 oxu->db_used[index] = 0;
972 spin_unlock(&oxu->mem_lock);
985 static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
990 oxu_buf_free(oxu, qtd);
992 spin_lock(&oxu->mem_lock);
994 index = qtd - &oxu->mem->qtd_pool[0];
995 oxu->qtd_used[index] = 0;
997 spin_unlock(&oxu->mem_lock);
1000 static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
1005 spin_lock(&oxu->mem_lock);
1008 if (!oxu->qtd_used[i])
1012 qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
1022 oxu->qtd_used[i] = 1;
1025 spin_unlock(&oxu->mem_lock);
1030 static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
1034 spin_lock(&oxu->mem_lock);
1036 index = qh - &oxu->mem->qh_pool[0];
1037 oxu->qh_used[index] = 0;
1039 spin_unlock(&oxu->mem_lock);
1045 struct oxu_hcd *oxu = qh->oxu;
1049 oxu_dbg(oxu, "unused qh not empty!\n");
1053 oxu_qtd_free(oxu, qh->dummy);
1054 oxu_qh_free(oxu, qh);
1057 static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
1062 spin_lock(&oxu->mem_lock);
1065 if (!oxu->qh_used[i])
1069 qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
1073 qh->oxu = oxu;
1078 qh->dummy = ehci_qtd_alloc(oxu);
1080 oxu_dbg(oxu, "no dummy td\n");
1081 oxu->qh_used[i] = 0;
1086 oxu->qh_used[i] = 1;
1089 spin_unlock(&oxu->mem_lock);
1106 static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
1110 spin_lock(&oxu->mem_lock);
1112 index = murb - &oxu->murb_pool[0];
1113 oxu->murb_used[index] = 0;
1115 spin_unlock(&oxu->mem_lock);
1118 static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
1124 spin_lock(&oxu->mem_lock);
1127 if (!oxu->murb_used[i])
1131 murb = &(oxu->murb_pool)[i];
1133 oxu->murb_used[i] = 1;
1136 spin_unlock(&oxu->mem_lock);
1145 static void ehci_mem_cleanup(struct oxu_hcd *oxu)
1147 kfree(oxu->murb_pool);
1148 oxu->murb_pool = NULL;
1150 if (oxu->async)
1151 qh_put(oxu->async);
1152 oxu->async = NULL;
1154 del_timer(&oxu->urb_timer);
1156 oxu->periodic = NULL;
1159 kfree(oxu->pshadow);
1160 oxu->pshadow = NULL;
1165 static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
1169 for (i = 0; i < oxu->periodic_size; i++)
1170 oxu->mem->frame_list[i] = EHCI_LIST_END;
1172 oxu->qh_used[i] = 0;
1174 oxu->qtd_used[i] = 0;
1176 oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
1177 if (!oxu->murb_pool)
1181 oxu->murb_used[i] = 0;
1183 oxu->async = oxu_qh_alloc(oxu);
1184 if (!oxu->async)
1187 oxu->periodic = (__le32 *) &oxu->mem->frame_list;
1188 oxu->periodic_dma = virt_to_phys(oxu->periodic);
1190 for (i = 0; i < oxu->periodic_size; i++)
1191 oxu->periodic[i] = EHCI_LIST_END;
1194 oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
1195 if (oxu->pshadow != NULL)
1199 oxu_dbg(oxu, "couldn't init memory\n");
1200 ehci_mem_cleanup(oxu);
1244 static inline void qh_update(struct oxu_hcd *oxu,
1278 static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
1293 qh_update(oxu, qh, qtd);
1296 static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
1328 oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
1340 oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
1348 static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
1349 __releases(oxu->lock)
1350 __acquires(oxu->lock)
1359 oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
1380 oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
1389 spin_unlock(&oxu->lock);
1390 usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
1391 spin_lock(&oxu->lock);
1394 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
1395 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
1397 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
1398 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
1406 static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
1447 ehci_urb_done(oxu, last->urb);
1450 oxu_murb_free(oxu, murb);
1452 ehci_urb_done(oxu, last->urb);
1456 oxu_qtd_free(oxu, last);
1485 HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
1491 if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
1526 qtd_copy_status(oxu, urb->complete ?
1550 ehci_urb_done(oxu, last->urb);
1553 oxu_murb_free(oxu, murb);
1555 ehci_urb_done(oxu, last->urb);
1558 oxu_qtd_free(oxu, last);
1571 qh_refresh(oxu, qh);
1579 intr_deschedule(oxu, qh);
1580 (void) qh_schedule(oxu, qh);
1582 unlink_async(oxu, qh);
1599 static void qtd_list_free(struct oxu_hcd *oxu,
1606 oxu_qtd_free(oxu, qtd);
1612 static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
1628 qtd = ehci_qtd_alloc(oxu);
1645 ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
1657 qtd = ehci_qtd_alloc(oxu);
1673 ret = oxu_buf_alloc(oxu, qtd, len);
1703 qtd->hw_alt_next = oxu->async->hw_alt_next;
1713 qtd = ehci_qtd_alloc(oxu);
1717 ret = oxu_buf_alloc(oxu, qtd, len);
1751 qtd = ehci_qtd_alloc(oxu);
1768 qtd_list_free(oxu, urb, head);
1779 static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
1782 struct ehci_qh *qh = oxu_qh_alloc(oxu);
1824 oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
1896 oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
1909 qh_refresh(oxu, qh);
1915 static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
1921 head = oxu->async;
1922 timer_action_done(oxu, TIMER_ASYNC_OFF);
1924 u32 cmd = readl(&oxu->regs->command);
1928 (void)handshake(oxu, &oxu->regs->status,
1931 writel(cmd, &oxu->regs->command);
1932 oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
1939 qh_refresh(oxu, qh);
1961 static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
1969 /* can't sleep here, we have oxu->lock... */
1970 qh = qh_make(oxu, urb, GFP_ATOMIC);
2036 static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
2048 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2055 spin_lock_irqsave(&oxu->lock, flags);
2056 if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
2061 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2071 qh_link_async(oxu, qh_get(qh));
2073 spin_unlock_irqrestore(&oxu->lock, flags);
2075 qtd_list_free(oxu, urb, qtd_list);
2081 static void end_unlink_async(struct oxu_hcd *oxu)
2083 struct ehci_qh *qh = oxu->reclaim;
2086 timer_action_done(oxu, TIMER_IAA_WATCHDOG);
2094 oxu->reclaim = next;
2095 oxu->reclaim_ready = 0;
2098 qh_completions(oxu, qh);
2101 && HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
2102 qh_link_async(oxu, qh);
2109 if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
2110 && oxu->async->qh_next.qh == NULL)
2111 timer_action(oxu, TIMER_ASYNC_OFF);
2115 oxu->reclaim = NULL;
2116 start_unlink_async(oxu, next);
2121 /* caller must own oxu->lock */
2123 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2125 int cmd = readl(&oxu->regs->command);
2129 assert_spin_locked(&oxu->lock);
2130 BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
2135 if (unlikely(qh == oxu->async)) {
2137 if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
2138 && !oxu->reclaim) {
2140 writel(cmd & ~CMD_ASE, &oxu->regs->command);
2143 timer_action_done(oxu, TIMER_ASYNC_OFF);
2149 oxu->reclaim = qh = qh_get(qh);
2151 prev = oxu->async;
2159 if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
2163 end_unlink_async(oxu);
2167 oxu->reclaim_ready = 0;
2169 writel(cmd, &oxu->regs->command);
2170 (void) readl(&oxu->regs->command);
2171 timer_action(oxu, TIMER_IAA_WATCHDOG);
2174 static void scan_async(struct oxu_hcd *oxu)
2179 if (!++(oxu->stamp))
2180 oxu->stamp++;
2181 timer_action_done(oxu, TIMER_ASYNC_SHRINK);
2183 qh = oxu->async->qh_next.qh;
2188 && qh->stamp != oxu->stamp) {
2197 qh->stamp = oxu->stamp;
2198 temp = qh_completions(oxu, qh);
2211 if (qh->stamp == oxu->stamp)
2213 else if (!oxu->reclaim
2215 start_unlink_async(oxu, qh);
2222 timer_action(oxu, TIMER_ASYNC_SHRINK);
2240 /* caller must hold oxu->lock */
2241 static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
2243 union ehci_shadow *prev_p = &oxu->pshadow[frame];
2244 __le32 *hw_p = &oxu->periodic[frame];
2265 static unsigned short periodic_usecs(struct oxu_hcd *oxu,
2268 __le32 *hw_p = &oxu->periodic[frame];
2269 union ehci_shadow *q = &oxu->pshadow[frame];
2289 oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
2295 static int enable_periodic(struct oxu_hcd *oxu)
2303 status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
2305 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
2306 usb_hc_died(oxu_to_hcd(oxu));
2310 cmd = readl(&oxu->regs->command) | CMD_PSE;
2311 writel(cmd, &oxu->regs->command);
2313 oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
2316 oxu->next_uframe = readl(&oxu->regs->frame_index)
2317 % (oxu->periodic_size << 3);
2321 static int disable_periodic(struct oxu_hcd *oxu)
2329 status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
2331 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
2332 usb_hc_died(oxu_to_hcd(oxu));
2336 cmd = readl(&oxu->regs->command) & ~CMD_PSE;
2337 writel(cmd, &oxu->regs->command);
2340 oxu->next_uframe = -1;
2348 * no FSTN support (yet; oxu 0.96+)
2350 static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
2364 for (i = qh->start; i < oxu->periodic_size; i += period) {
2365 union ehci_shadow *prev = &oxu->pshadow[i];
2366 __le32 *hw_p = &oxu->periodic[i];
2404 oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
2409 if (!oxu->periodic_sched++)
2410 return enable_periodic(oxu);
2415 static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
2433 for (i = qh->start; i < oxu->periodic_size; i += period)
2434 periodic_unlink(oxu, i, qh);
2437 oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
2453 oxu->periodic_sched--;
2454 if (!oxu->periodic_sched)
2455 (void) disable_periodic(oxu);
2458 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2462 qh_unlink_periodic(oxu, qh);
2481 static int check_period(struct oxu_hcd *oxu,
2505 claimed = periodic_usecs(oxu, frame, uframe);
2509 } while ((frame += 1) < oxu->periodic_size);
2514 claimed = periodic_usecs(oxu, frame, uframe);
2517 } while ((frame += period) < oxu->periodic_size);
2523 static int check_intr_schedule(struct oxu_hcd *oxu,
2532 if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
2547 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2554 qh_refresh(oxu, qh);
2561 status = check_intr_schedule(oxu, frame, --uframe,
2578 status = check_intr_schedule(oxu,
2589 status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
2602 oxu_dbg(oxu, "reused qh %p schedule\n", qh);
2605 status = qh_link_periodic(oxu, qh);
2610 static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
2622 spin_lock_irqsave(&oxu->lock, flags);
2624 if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
2631 qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
2637 status = qh_schedule(oxu, qh);
2643 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2647 oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
2650 spin_unlock_irqrestore(&oxu->lock, flags);
2652 qtd_list_free(oxu, urb, qtd_list);
2657 static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
2660 oxu_dbg(oxu, "iso support is missing!\n");
2664 static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
2667 oxu_dbg(oxu, "split iso support is missing!\n");
2671 static void scan_periodic(struct oxu_hcd *oxu)
2676 mod = oxu->periodic_size << 3;
2683 now_uframe = oxu->next_uframe;
2684 if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
2685 clock = readl(&oxu->regs->frame_index);
2703 q_p = &oxu->pshadow[frame];
2704 hw_p = &oxu->periodic[frame];
2718 modified = qh_completions(oxu, temp.qh);
2720 intr_deschedule(oxu, temp.qh);
2724 oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
2748 if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
2750 oxu->next_uframe = now_uframe;
2751 now = readl(&oxu->regs->frame_index) % mod;
2768 static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
2770 int port = HCS_N_PORTS(oxu->hcs_params);
2773 writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
2776 static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
2780 if (!HCS_PPC(oxu->hcs_params))
2783 oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
2784 for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
2786 oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
2789 oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
2797 * It calls driver completion functions, after dropping oxu->lock.
2799 static void ehci_work(struct oxu_hcd *oxu)
2801 timer_action_done(oxu, TIMER_IO_WATCHDOG);
2802 if (oxu->reclaim_ready)
2803 end_unlink_async(oxu);
2805 /* another CPU may drop oxu->lock during a schedule scan while
2809 if (oxu->scanning)
2811 oxu->scanning = 1;
2812 scan_async(oxu);
2813 if (oxu->next_uframe != -1)
2814 scan_periodic(oxu);
2815 oxu->scanning = 0;
2821 if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
2822 (oxu->async->qh_next.ptr != NULL ||
2823 oxu->periodic_sched != 0))
2824 timer_action(oxu, TIMER_IO_WATCHDOG);
2827 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2831 && oxu->reclaim
2832 && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
2835 for (last = oxu->reclaim;
2843 } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
2844 end_unlink_async(oxu);
2848 start_unlink_async(oxu, qh);
2857 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2861 spin_lock(&oxu->lock);
2863 status = readl(&oxu->regs->status);
2867 oxu_dbg(oxu, "device removed\n");
2874 spin_unlock(&oxu->lock);
2879 writel(status, &oxu->regs->status);
2880 readl(&oxu->regs->command); /* unblock posted write */
2885 dbg_status(oxu, "irq", status);
2896 oxu->reclaim_ready = 1;
2902 unsigned i = HCS_N_PORTS(oxu->hcs_params);
2906 if (!(readl(&oxu->regs->command) & CMD_RUN))
2910 int pstatus = readl(&oxu->regs->port_status[i]);
2915 || oxu->reset_done[i] != 0)
2922 oxu->reset_done[i] = jiffies +
2924 oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
2925 mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
2932 status = readl(&oxu->regs->status);
2933 dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
2934 dbg_status(oxu, "fatal", status);
2936 oxu_err(oxu, "fatal error\n");
2938 ehci_reset(oxu);
2939 writel(0, &oxu->regs->configured_flag);
2949 ehci_work(oxu);
2950 spin_unlock(&oxu->lock);
2958 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2967 if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
2968 (!oxu->is_otg && (status & OXU_USBSPHI)))
2981 struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
2984 spin_lock_irqsave(&oxu->lock, flags);
2987 if (oxu->reclaim) {
2988 u32 status = readl(&oxu->regs->status);
2990 oxu_vdbg(oxu, "lost IAA\n");
2991 writel(STS_IAA, &oxu->regs->status);
2992 oxu->reclaim_ready = 1;
2997 if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
2998 start_unlink_async(oxu, oxu->async);
3000 /* oxu could run by timer, without IRQs ... */
3001 ehci_work(oxu);
3003 spin_unlock_irqrestore(&oxu->lock, flags);
3010 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3015 spin_lock_init(&oxu->lock);
3017 timer_setup(&oxu->watchdog, oxu_watchdog, 0);
3023 oxu->periodic_size = DEFAULT_I_TDPS;
3024 retval = ehci_mem_init(oxu, GFP_KERNEL);
3029 hcc_params = readl(&oxu->caps->hcc_params);
3031 oxu->i_thresh = 8;
3033 oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
3035 oxu->reclaim = NULL;
3036 oxu->reclaim_ready = 0;
3037 oxu->next_uframe = -1;
3046 oxu->async->qh_next.qh = NULL;
3047 oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
3048 oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
3049 oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
3050 oxu->async->hw_qtd_next = EHCI_LIST_END;
3051 oxu->async->qh_state = QH_STATE_LINKED;
3052 oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
3071 oxu_dbg(oxu, "park %d\n", park);
3078 oxu->command = temp;
3087 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3089 spin_lock_init(&oxu->mem_lock);
3090 INIT_LIST_HEAD(&oxu->urb_list);
3091 oxu->urb_len = 0;
3093 if (oxu->is_otg) {
3094 oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
3095 oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
3096 HC_LENGTH(readl(&oxu->caps->hc_capbase));
3098 oxu->mem = hcd->regs + OXU_SPH_MEM;
3100 oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
3101 oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
3102 HC_LENGTH(readl(&oxu->caps->hc_capbase));
3104 oxu->mem = hcd->regs + OXU_OTG_MEM;
3107 oxu->hcs_params = readl(&oxu->caps->hcs_params);
3108 oxu->sbrn = 0x20;
3115 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3122 retval = ehci_reset(oxu);
3124 ehci_mem_cleanup(oxu);
3127 writel(oxu->periodic_dma, &oxu->regs->frame_list);
3128 writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
3130 /* hcc_params controls whether oxu->regs->segment must (!!!)
3141 hcc_params = readl(&oxu->caps->hcc_params);
3143 writel(0, &oxu->regs->segment);
3145 oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
3147 oxu->command |= CMD_RUN;
3148 writel(oxu->command, &oxu->regs->command);
3149 dbg_cmd(oxu, "init", oxu->command);
3158 writel(FLAG_CF, &oxu->regs->configured_flag);
3159 readl(&oxu->regs->command); /* unblock posted writes */
3161 temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
3162 oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
3163 ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
3167 writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
3174 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3177 ehci_port_power(oxu, 0);
3180 del_timer_sync(&oxu->watchdog);
3182 spin_lock_irq(&oxu->lock);
3184 ehci_quiesce(oxu);
3186 ehci_reset(oxu);
3187 writel(0, &oxu->regs->intr_enable);
3188 spin_unlock_irq(&oxu->lock);
3191 writel(0, &oxu->regs->configured_flag);
3194 spin_lock_irq(&oxu->lock);
3195 if (oxu->async)
3196 ehci_work(oxu);
3197 spin_unlock_irq(&oxu->lock);
3198 ehci_mem_cleanup(oxu);
3200 dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
3209 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3211 (void) ehci_halt(oxu);
3212 ehci_turn_off_all_ports(oxu);
3215 writel(0, &oxu->regs->configured_flag);
3218 readl(&oxu->regs->configured_flag);
3235 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3244 if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
3246 return submit_async(oxu, urb, &qtd_list, mem_flags);
3249 if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
3251 return intr_submit(oxu, urb, &qtd_list, mem_flags);
3255 return itd_submit(oxu, urb, mem_flags);
3257 return sitd_submit(oxu, urb, mem_flags);
3267 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3295 murb = (struct urb *) oxu_murb_alloc(oxu);
3326 murb = (struct urb *) oxu_murb_alloc(oxu);
3357 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3361 spin_lock_irqsave(&oxu->lock, flags);
3369 unlink_async(oxu, qh);
3378 intr_deschedule(oxu, qh);
3381 qh_completions(oxu, qh);
3384 oxu_dbg(oxu, "bogus qh %p state %d\n",
3394 status = qh_schedule(oxu, qh);
3395 spin_unlock_irqrestore(&oxu->lock, flags);
3410 spin_unlock_irqrestore(&oxu->lock, flags);
3418 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3426 spin_lock_irqsave(&oxu->lock, flags);
3435 oxu_vdbg(oxu, "iso delay\n");
3443 for (tmp = oxu->async->qh_next.qh;
3450 unlink_async(oxu, qh);
3454 spin_unlock_irqrestore(&oxu->lock, flags);
3468 oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
3475 spin_unlock_irqrestore(&oxu->lock, flags);
3480 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3482 return (readl(&oxu->regs->frame_index) >> 3) %
3483 oxu->periodic_size;
3489 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3500 ports = HCS_N_PORTS(oxu->hcs_params);
3520 spin_lock_irqsave(&oxu->lock, flags);
3522 temp = readl(&oxu->regs->port_status[i]);
3532 oxu->reset_done[i] = 0;
3534 time_after_eq(jiffies, oxu->reset_done[i]))) {
3543 spin_unlock_irqrestore(&oxu->lock, flags);
3548 static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
3566 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3567 int ports = HCS_N_PORTS(oxu->hcs_params);
3568 u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
3581 spin_lock_irqsave(&oxu->lock, flags);
3622 oxu->reset_done[wIndex] = jiffies
3630 if (HCS_PPC(oxu->hcs_params))
3646 readl(&oxu->regs->command); /* unblock posted write */
3649 ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
3675 if (!oxu->reset_done[wIndex]) {
3677 oxu->reset_done[wIndex] = jiffies
3680 mod_timer(&oxu_to_hcd(oxu)->rh_timer,
3681 oxu->reset_done[wIndex]);
3686 oxu->reset_done[wIndex])) {
3688 oxu->reset_done[wIndex] = 0;
3694 retval = handshake(oxu, status_reg,
3697 oxu_err(oxu,
3709 oxu->reset_done[wIndex])) {
3711 oxu->reset_done[wIndex] = 0;
3719 retval = handshake(oxu, status_reg,
3722 oxu_err(oxu, "port %d reset error %d\n",
3728 temp = check_reset_complete(oxu, wIndex, status_reg,
3734 test_bit(wIndex, &oxu->companion_ports)) {
3738 oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
3751 status |= oxu_port_speed(oxu, temp);
3767 dbg_port(oxu, "GetStatus", wIndex + 1, temp);
3801 if (HCS_PPC(oxu->hcs_params))
3811 oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
3819 oxu->reset_done[wIndex] = jiffies
3833 ehci_quiesce(oxu);
3834 ehci_halt(oxu);
3842 readl(&oxu->regs->command); /* unblock posted writes */
3850 spin_unlock_irqrestore(&oxu->lock, flags);
3858 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3862 oxu_dbg(oxu, "suspend root hub\n");
3864 if (time_before(jiffies, oxu->next_statechange))
3867 port = HCS_N_PORTS(oxu->hcs_params);
3868 spin_lock_irq(&oxu->lock);
3872 ehci_quiesce(oxu);
3875 oxu->command = readl(&oxu->regs->command);
3876 if (oxu->reclaim)
3877 oxu->reclaim_ready = 1;
3878 ehci_work(oxu);
3885 oxu->bus_suspended = 0;
3887 u32 __iomem *reg = &oxu->regs->port_status[port];
3895 set_bit(port, &oxu->bus_suspended);
3905 oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
3911 spin_unlock_irq(&oxu->lock);
3913 del_timer_sync(&oxu->watchdog);
3914 spin_lock_irq(&oxu->lock);
3915 ehci_halt(oxu);
3922 writel(mask, &oxu->regs->intr_enable);
3923 readl(&oxu->regs->intr_enable);
3925 oxu->next_statechange = jiffies + msecs_to_jiffies(10);
3926 spin_unlock_irq(&oxu->lock);
3933 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3937 if (time_before(jiffies, oxu->next_statechange))
3939 spin_lock_irq(&oxu->lock);
3947 temp = readl(&oxu->regs->intr_enable);
3948 oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
3953 writel(0, &oxu->regs->intr_enable);
3956 writel(0, &oxu->regs->segment);
3957 writel(oxu->periodic_dma, &oxu->regs->frame_list);
3958 writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
3961 writel(oxu->command, &oxu->regs->command);
3968 i = HCS_N_PORTS(oxu->hcs_params);
3970 temp = readl(&oxu->regs->port_status[i]);
3973 if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
3974 oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
3977 writel(temp, &oxu->regs->port_status[i]);
3979 i = HCS_N_PORTS(oxu->hcs_params);
3982 temp = readl(&oxu->regs->port_status[i]);
3983 if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
3985 writel(temp, &oxu->regs->port_status[i]);
3986 oxu_vdbg(oxu, "resumed port %d\n", i + 1);
3989 (void) readl(&oxu->regs->command);
3993 if (oxu->async->qh_next.qh)
3995 if (oxu->periodic_sched)
3998 oxu->command |= temp;
3999 writel(oxu->command, &oxu->regs->command);
4002 oxu->next_statechange = jiffies + msecs_to_jiffies(5);
4006 writel(INTR_MASK, &oxu->regs->intr_enable);
4008 spin_unlock_irq(&oxu->lock);
4133 struct oxu_hcd *oxu;
4152 oxu = hcd_to_oxu(hcd);
4153 oxu->is_otg = otg;