Lines Matching defs:imx21

50 #include "imx21-hcd.h"
57 #define DEBUG_LOG_FRAME(imx21, etd, event) \
58 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
60 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
63 static const char hcd_name[] = "imx21-hcd";
65 static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
67 return (struct imx21 *)hcd->hcd_priv;
75 static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
77 void __iomem *reg = imx21->regs + offset;
81 static inline void clear_register_bits(struct imx21 *imx21,
84 void __iomem *reg = imx21->regs + offset;
88 static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
90 void __iomem *reg = imx21->regs + offset;
96 static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
98 void __iomem *reg = imx21->regs + offset;
104 static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
106 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
109 static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
111 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
127 struct imx21 *imx21 = hcd_to_imx21(hcd);
129 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
137 #include "imx21-dbg.c"
140 struct imx21 *imx21, struct etd_priv *etd, int status);
141 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
142 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
148 static int alloc_etd(struct imx21 *imx21)
151 struct etd_priv *etd = imx21->etd;
155 memset(etd, 0, sizeof(imx21->etd[0]));
157 debug_etd_allocated(imx21);
164 static void disactivate_etd(struct imx21 *imx21, int num)
167 struct etd_priv *etd = &imx21->etd[num];
169 writel(etd_mask, imx21->regs + USBH_ETDENCLR);
170 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
171 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
172 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
176 DEBUG_LOG_FRAME(imx21, etd, disactivated);
179 static void reset_etd(struct imx21 *imx21, int num)
181 struct etd_priv *etd = imx21->etd + num;
184 disactivate_etd(imx21, num);
187 etd_writel(imx21, num, i, 0);
194 static void free_etd(struct imx21 *imx21, int num)
200 dev_err(imx21->dev, "BAD etd=%d!\n", num);
203 if (imx21->etd[num].alloc == 0) {
204 dev_err(imx21->dev, "ETD %d already free!\n", num);
208 debug_etd_freed(imx21);
209 reset_etd(imx21, num);
210 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
214 static void setup_etd_dword0(struct imx21 *imx21,
217 etd_writel(imx21, etd_num, 0,
232 struct imx21 *imx21, int dmem_offset, void *src, int count)
234 void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
254 static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
257 struct etd_priv *etd = &imx21->etd[etd_num];
264 copy_to_dmem(imx21,
285 dev_err(imx21->dev, "failed bounce alloc\n");
290 dma_map_single(imx21->dev,
294 if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
295 dev_err(imx21->dev, "failed bounce map\n");
301 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
302 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
303 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
304 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
307 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
308 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
309 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
310 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
311 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
315 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
316 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
320 DEBUG_LOG_FRAME(imx21, etd, activated);
325 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
331 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
336 writel(etd_mask, imx21->regs + USBH_ETDENSET);
343 free_dmem(imx21, etd);
344 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
351 static int alloc_dmem(struct imx21 *imx21, unsigned int size,
361 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
366 list_for_each_entry(tmp, &imx21->dmem_list, list) {
384 debug_dmem_allocated(imx21, size);
392 static void activate_queued_etd(struct imx21 *imx21,
396 int etd_num = etd - &imx21->etd[0];
397 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
398 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
400 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
402 etd_writel(imx21, etd_num, 1,
407 activate_etd(imx21, etd_num, dir);
410 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
422 list_for_each_entry(area, &imx21->dmem_list, list) {
424 debug_dmem_freed(imx21, area->size);
433 dev_err(imx21->dev,
439 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
440 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
443 activate_queued_etd(imx21, etd, (u32)offset);
448 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
452 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
454 dev_err(imx21->dev,
469 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
479 etd = &imx21->etd[etd_num];
482 free_dmem(imx21, etd); /* for isoc */
484 if (list_empty(&imx21->queue_for_etd)) {
485 free_etd(imx21, etd_num);
489 dev_dbg(imx21->dev,
491 ep_priv = list_first_entry(&imx21->queue_for_etd,
494 reset_etd(imx21, etd_num);
499 dev_err(imx21->dev, "No urb for queued ep!\n");
502 schedule_nonisoc_etd(imx21, list_first_entry(
508 __releases(imx21->lock)
509 __acquires(imx21->lock)
511 struct imx21 *imx21 = hcd_to_imx21(hcd);
515 debug_urb_completed(imx21, urb, status);
516 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
522 spin_unlock(&imx21->lock);
524 spin_lock(&imx21->lock);
526 ep_idle(imx21, ep_priv);
530 struct imx21 *imx21, struct etd_priv *etd, int status)
534 urb_done(imx21->hcd, etd->urb, status);
541 dev_vdbg(imx21->dev, "next URB %p\n", urb);
542 schedule_nonisoc_etd(imx21, urb);
554 struct imx21 *imx21 = hcd_to_imx21(hcd);
573 etd = &imx21->etd[etd_num];
583 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
602 debug_isoc_submitted(imx21, cur_frame, td);
605 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
606 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
607 etd_writel(imx21, etd_num, 2,
610 etd_writel(imx21, etd_num, 3,
614 activate_etd(imx21, etd_num, dir);
620 struct imx21 *imx21 = hcd_to_imx21(hcd);
622 struct etd_priv *etd = imx21->etd + etd_num;
633 disactivate_etd(imx21, etd_num);
635 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
636 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
647 debug_isoc_completed(imx21,
651 dev_dbg(imx21->dev,
659 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
662 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
681 struct imx21 *imx21, struct usb_host_endpoint *ep)
699 static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
707 etd_num = alloc_etd(imx21);
712 imx21->etd[etd_num].ep = ep_priv->ep;
718 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
720 free_etd(imx21, ep_priv->etd[j]);
730 struct imx21 *imx21 = hcd_to_imx21(hcd);
751 spin_lock_irqsave(&imx21->lock, flags);
754 ep_priv = alloc_isoc_ep(imx21, ep);
763 ret = alloc_isoc_etds(imx21, ep_priv);
780 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
784 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
791 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
793 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
811 dev_dbg(imx21->dev,
844 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
847 debug_urb_submitted(imx21, urb);
850 spin_unlock_irqrestore(&imx21->lock, flags);
859 spin_unlock_irqrestore(&imx21->lock, flags);
867 static void dequeue_isoc_urb(struct imx21 *imx21,
877 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
878 struct etd_priv *etd = imx21->etd + etd_num;
880 reset_etd(imx21, etd_num);
881 free_dmem(imx21, etd);
888 dev_vdbg(imx21->dev, "removing td %p\n", td);
898 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
916 dev_err(imx21->dev, "No valid ETD\n");
919 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
920 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
922 etd = &imx21->etd[etd_num];
931 usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
948 usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
976 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
980 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
982 etd_writel(imx21, etd_num, 2,
998 etd_writel(imx21, etd_num, 3,
1006 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1009 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
1011 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1012 debug_urb_queued_for_dmem(imx21, urb);
1013 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1017 etd_writel(imx21, etd_num, 1,
1024 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1026 activate_etd(imx21, etd_num, dir);
1032 struct imx21 *imx21 = hcd_to_imx21(hcd);
1033 struct etd_priv *etd = &imx21->etd[etd_num];
1042 disactivate_etd(imx21, etd_num);
1044 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
1045 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
1046 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1051 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
1054 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
1055 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1059 dma_unmap_single(imx21->dev,
1063 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1070 free_dmem(imx21, etd);
1078 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
1099 dev_err(imx21->dev,
1128 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1130 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
1131 schedule_nonisoc_etd(imx21, urb);
1154 struct imx21 *imx21 = hcd_to_imx21(hcd);
1162 dev_vdbg(imx21->dev,
1177 spin_lock_irqsave(&imx21->lock, flags);
1209 debug_urb_submitted(imx21, urb);
1212 dev_dbg(imx21->dev,
1215 debug_urb_queued_for_etd(imx21, urb);
1218 ep_priv->etd[0] = alloc_etd(imx21);
1220 dev_dbg(imx21->dev,
1222 debug_urb_queued_for_etd(imx21, urb);
1223 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1230 etd = &imx21->etd[ep_priv->etd[0]];
1232 DEBUG_LOG_FRAME(imx21, etd, last_req);
1233 schedule_nonisoc_etd(imx21, urb);
1237 spin_unlock_irqrestore(&imx21->lock, flags);
1242 spin_unlock_irqrestore(&imx21->lock, flags);
1250 struct imx21 *imx21 = hcd_to_imx21(hcd);
1257 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1260 spin_lock_irqsave(&imx21->lock, flags);
1268 debug_urb_unlinked(imx21, urb);
1271 dequeue_isoc_urb(imx21, urb, ep_priv);
1276 struct etd_priv *etd = &imx21->etd[etd_num];
1278 disactivate_etd(imx21, etd_num);
1279 free_dmem(imx21, etd);
1288 spin_unlock_irqrestore(&imx21->lock, flags);
1292 spin_unlock_irqrestore(&imx21->lock, flags);
1300 static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1306 spin_lock_irqsave(&imx21->lock, flags);
1310 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1311 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1312 struct etd_priv *etd = &imx21->etd[etd_num];
1316 DEBUG_LOG_FRAME(imx21, etd, last_int);
1343 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1350 dword0 = etd_readl(imx21, etd_num, 0);
1351 dev_dbg(imx21->dev,
1358 dev_dbg(imx21->dev,
1365 readl(imx21->regs + USBH_FRMNUB));
1366 imx21->debug_unblocks++;
1373 dev_dbg(imx21->dev,
1377 disactivate_etd(imx21, etd_num);
1389 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1391 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1394 spin_unlock_irqrestore(&imx21->lock, flags);
1399 struct imx21 *imx21 = hcd_to_imx21(hcd);
1400 u32 ints = readl(imx21->regs + USBH_SYSISR);
1403 dev_dbg(imx21->dev, "Scheduling error\n");
1406 dev_dbg(imx21->dev, "Scheduling overrun\n");
1409 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1411 writel(ints, imx21->regs + USBH_SYSISR);
1418 struct imx21 *imx21 = hcd_to_imx21(hcd);
1426 spin_lock_irqsave(&imx21->lock, flags);
1428 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1431 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1436 dev_dbg(imx21->dev, "free etd %d for disable\n",
1439 free_etd(imx21, ep_priv->etd[i]);
1446 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1447 dev_err(imx21->dev,
1449 free_etd(imx21, i);
1452 free_epdmem(imx21, ep);
1453 spin_unlock_irqrestore(&imx21->lock, flags);
1463 struct imx21 *imx21 = hcd_to_imx21(hcd);
1467 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1482 struct imx21 *imx21 = hcd_to_imx21(hcd);
1488 spin_lock_irqsave(&imx21->lock, flags);
1489 ports = readl(imx21->regs + USBH_ROOTHUBA)
1493 dev_err(imx21->dev, "ports %d > 7\n", ports);
1496 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1507 spin_unlock_irqrestore(&imx21->lock, flags);
1510 dev_info(imx21->dev, "Hub status changed\n");
1518 struct imx21 *imx21 = hcd_to_imx21(hcd);
1524 dev_dbg(imx21->dev, "ClearHubFeature\n");
1527 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1530 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1533 dev_dbg(imx21->dev, " unknown\n");
1540 dev_dbg(imx21->dev, "ClearPortFeature\n");
1543 dev_dbg(imx21->dev, " ENABLE\n");
1547 dev_dbg(imx21->dev, " SUSPEND\n");
1551 dev_dbg(imx21->dev, " POWER\n");
1555 dev_dbg(imx21->dev, " C_ENABLE\n");
1559 dev_dbg(imx21->dev, " C_SUSPEND\n");
1563 dev_dbg(imx21->dev, " C_CONNECTION\n");
1567 dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1571 dev_dbg(imx21->dev, " C_RESET\n");
1575 dev_dbg(imx21->dev, " unknown\n");
1583 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1588 dev_dbg(imx21->dev, " GetHubStatus\n");
1593 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1595 *(__le32 *) buf = readl(imx21->regs +
1600 dev_dbg(imx21->dev, "SetHubFeature\n");
1603 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1607 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1610 dev_dbg(imx21->dev, " unknown\n");
1618 dev_dbg(imx21->dev, "SetPortFeature\n");
1621 dev_dbg(imx21->dev, " SUSPEND\n");
1625 dev_dbg(imx21->dev, " POWER\n");
1629 dev_dbg(imx21->dev, " RESET\n");
1633 dev_dbg(imx21->dev, " unknown\n");
1640 dev_dbg(imx21->dev, " unknown\n");
1646 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1656 struct imx21 *imx21 = hcd_to_imx21(hcd);
1660 spin_lock_irqsave(&imx21->lock, flags);
1665 imx21->regs + USBOTG_RST_CTRL);
1669 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1671 spin_unlock_irqrestore(&imx21->lock, flags);
1672 dev_err(imx21->dev, "timeout waiting for reset\n");
1675 spin_unlock_irq(&imx21->lock);
1677 spin_lock_irq(&imx21->lock);
1679 spin_unlock_irqrestore(&imx21->lock, flags);
1685 struct imx21 *imx21 = hcd_to_imx21(hcd);
1691 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1693 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1696 if (imx21->pdata->host1_txenoe)
1699 if (!imx21->pdata->host1_xcverless)
1702 if (imx21->pdata->otg_ext_xcvr)
1706 spin_lock_irqsave(&imx21->lock, flags);
1709 imx21->regs + USBOTG_CLK_CTRL);
1710 writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1711 writel(usb_control, imx21->regs + USBCTRL);
1713 imx21->regs + USB_MISCCONTROL);
1718 etd_writel(imx21, i, j, 0);
1722 imx21->regs + USBH_HOST_CTRL);
1725 if (imx21->pdata->enable_otg_host)
1727 imx21->regs + USBH_PORTSTAT(0));
1729 if (imx21->pdata->enable_host1)
1731 imx21->regs + USBH_PORTSTAT(1));
1733 if (imx21->pdata->enable_host2)
1735 imx21->regs + USBH_PORTSTAT(2));
1741 set_register_bits(imx21, USBH_SYSIEN,
1744 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1746 spin_unlock_irqrestore(&imx21->lock, flags);
1753 struct imx21 *imx21 = hcd_to_imx21(hcd);
1756 spin_lock_irqsave(&imx21->lock, flags);
1758 writel(0, imx21->regs + USBH_SYSIEN);
1759 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1760 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1762 spin_unlock_irqrestore(&imx21->lock, flags);
1772 .hcd_priv_size = sizeof(struct imx21),
1807 struct imx21 *imx21 = hcd_to_imx21(hcd);
1810 remove_debug_files(imx21);
1814 clk_disable_unprepare(imx21->clk);
1815 clk_put(imx21->clk);
1816 iounmap(imx21->regs);
1828 struct imx21 *imx21;
1850 imx21 = hcd_to_imx21(hcd);
1851 imx21->hcd = hcd;
1852 imx21->dev = &pdev->dev;
1853 imx21->pdata = dev_get_platdata(&pdev->dev);
1854 if (!imx21->pdata)
1855 imx21->pdata = &default_pdata;
1857 spin_lock_init(&imx21->lock);
1858 INIT_LIST_HEAD(&imx21->dmem_list);
1859 INIT_LIST_HEAD(&imx21->queue_for_etd);
1860 INIT_LIST_HEAD(&imx21->queue_for_dmem);
1861 create_debug_files(imx21);
1869 imx21->regs = ioremap(res->start, resource_size(res));
1870 if (imx21->regs == NULL) {
1871 dev_err(imx21->dev, "Cannot map registers\n");
1877 imx21->clk = clk_get(imx21->dev, NULL);
1878 if (IS_ERR(imx21->clk)) {
1879 dev_err(imx21->dev, "no clock found\n");
1880 ret = PTR_ERR(imx21->clk);
1884 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1887 ret = clk_prepare_enable(imx21->clk);
1891 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1892 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1896 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1904 clk_disable_unprepare(imx21->clk);
1907 clk_put(imx21->clk);
1909 iounmap(imx21->regs);
1913 remove_debug_files(imx21);
1933 MODULE_ALIAS("platform:imx21-hcd");