Lines Matching defs:host

3  * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
43 #include <linux/mmc/host.h>
124 /* Status flags used by the host structure */
164 static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
166 u32 val = __raw_readl(HOST_CONFIG(host));
168 __raw_writel(val, HOST_CONFIG(host));
172 static inline void FLUSH_FIFO(struct au1xmmc_host *host)
174 u32 val = __raw_readl(HOST_CONFIG2(host));
176 __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
183 __raw_writel(val, HOST_CONFIG2(host));
187 static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
189 u32 val = __raw_readl(HOST_CONFIG(host));
191 __raw_writel(val, HOST_CONFIG(host));
195 static inline void SEND_STOP(struct au1xmmc_host *host)
199 WARN_ON(host->status != HOST_S_DATA);
200 host->status = HOST_S_STOP;
202 config2 = __raw_readl(HOST_CONFIG2(host));
203 __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
207 __raw_writel(STOP_CMD, HOST_CMD(host));
211 static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
213 if (host->platdata && host->platdata->set_power)
214 host->platdata->set_power(host->mmc, state);
219 struct au1xmmc_host *host = mmc_priv(mmc);
221 if (host->platdata && host->platdata->card_inserted)
222 return !!host->platdata->card_inserted(host->mmc);
229 struct au1xmmc_host *host = mmc_priv(mmc);
231 if (host->platdata && host->platdata->card_readonly)
232 return !!host->platdata->card_readonly(mmc);
237 static void au1xmmc_finish_request(struct au1xmmc_host *host)
239 struct mmc_request *mrq = host->mrq;
241 host->mrq = NULL;
242 host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
244 host->dma.len = 0;
245 host->dma.dir = 0;
247 host->pio.index = 0;
248 host->pio.offset = 0;
249 host->pio.len = 0;
251 host->status = HOST_S_IDLE;
253 mmc_request_done(host->mmc, mrq);
258 struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
259 au1xmmc_finish_request(host);
262 static int au1xmmc_send_command(struct au1xmmc_host *host,
302 __raw_writel(cmd->arg, HOST_CMDARG(host));
305 __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
309 while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
315 static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
317 struct mmc_request *mrq = host->mrq;
321 WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
323 if (host->mrq == NULL)
329 status = __raw_readl(HOST_STATUS(host));
332 while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
333 status = __raw_readl(HOST_STATUS(host));
336 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
340 if (host->flags & HOST_F_XMIT)
347 __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
352 if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
353 u32 chan = DMA_CHANNEL(host);
360 (data->blocks * data->blksz) - host->pio.len;
363 au1xmmc_finish_request(host);
368 struct au1xmmc_host *host = from_tasklet(host, t, data_task);
370 u32 status = __raw_readl(HOST_STATUS(host));
371 au1xmmc_data_complete(host, status);
376 static void au1xmmc_send_pio(struct au1xmmc_host *host)
384 data = host->mrq->data;
386 if (!(host->flags & HOST_F_XMIT))
390 sg = &data->sg[host->pio.index];
391 sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
394 sg_len = data->sg[host->pio.index].length - host->pio.offset;
397 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
402 status = __raw_readl(HOST_STATUS(host));
409 __raw_writel((unsigned long)val, HOST_TXPORT(host));
414 host->pio.len -= count;
415 host->pio.offset += count;
418 host->pio.index++;
419 host->pio.offset = 0;
422 if (host->pio.len == 0) {
423 IRQ_OFF(host, SD_CONFIG_TH);
425 if (host->flags & HOST_F_STOP)
426 SEND_STOP(host);
428 tasklet_schedule(&host->data_task);
432 static void au1xmmc_receive_pio(struct au1xmmc_host *host)
440 data = host->mrq->data;
442 if (!(host->flags & HOST_F_RECV))
445 max = host->pio.len;
447 if (host->pio.index < host->dma.len) {
448 sg = &data->sg[host->pio.index];
449 sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
452 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
463 status = __raw_readl(HOST_STATUS(host));
469 DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
470 host->pio.len, count);
475 DBG("RX Overrun [%d + %d]\n", host->pdev->id,
476 host->pio.len, count);
480 DBG("RX Underrun [%d + %d]\n", host->pdev->id,
481 host->pio.len, count);
485 val = __raw_readl(HOST_RXPORT(host));
493 host->pio.len -= count;
494 host->pio.offset += count;
497 host->pio.index++;
498 host->pio.offset = 0;
501 if (host->pio.len == 0) {
502 /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
503 IRQ_OFF(host, SD_CONFIG_NE);
505 if (host->flags & HOST_F_STOP)
506 SEND_STOP(host);
508 tasklet_schedule(&host->data_task);
515 static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
517 struct mmc_request *mrq = host->mrq;
522 if (!host->mrq)
530 r[0] = __raw_readl(host->iobase + SD_RESP3);
531 r[1] = __raw_readl(host->iobase + SD_RESP2);
532 r[2] = __raw_readl(host->iobase + SD_RESP1);
533 r[3] = __raw_readl(host->iobase + SD_RESP0);
552 cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
560 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
563 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
564 tasklet_schedule(&host->finish_task);
568 host->status = HOST_S_DATA;
570 if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
571 u32 channel = DMA_CHANNEL(host);
575 if (host->flags & HOST_F_RECV) {
579 status = __raw_readl(HOST_STATUS(host));
586 static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
588 unsigned int pbus = clk_get_rate(host->clk);
592 config = __raw_readl(HOST_CONFIG(host));
597 __raw_writel(config, HOST_CONFIG(host));
601 static int au1xmmc_prepare_data(struct au1xmmc_host *host,
607 host->flags |= HOST_F_RECV;
609 host->flags |= HOST_F_XMIT;
611 if (host->mrq->stop)
612 host->flags |= HOST_F_STOP;
614 host->dma.dir = DMA_BIDIRECTIONAL;
616 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
617 data->sg_len, host->dma.dir);
619 if (host->dma.len == 0)
622 __raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
624 if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
626 u32 channel = DMA_CHANNEL(host);
630 for (i = 0; i < host->dma.len; i++) {
637 if (i == host->dma.len - 1)
640 if (host->flags & HOST_F_XMIT) {
654 host->pio.index = 0;
655 host->pio.offset = 0;
656 host->pio.len = datalen;
658 if (host->flags & HOST_F_XMIT)
659 IRQ_ON(host, SD_CONFIG_TH);
661 IRQ_ON(host, SD_CONFIG_NE);
662 /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
668 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
669 host->dma.dir);
676 struct au1xmmc_host *host = mmc_priv(mmc);
680 WARN_ON(host->status != HOST_S_IDLE);
682 host->mrq = mrq;
683 host->status = HOST_S_CMD;
688 au1xmmc_finish_request(host);
693 FLUSH_FIFO(host);
694 ret = au1xmmc_prepare_data(host, mrq->data);
698 ret = au1xmmc_send_command(host, mrq->cmd, mrq->data);
702 au1xmmc_finish_request(host);
706 static void au1xmmc_reset_controller(struct au1xmmc_host *host)
709 __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
713 __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
717 __raw_writel(~0, HOST_STATUS(host));
720 __raw_writel(0, HOST_BLKSIZE(host));
721 __raw_writel(0x001fffff, HOST_TIMEOUT(host));
724 __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
727 __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
731 __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
735 __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
742 struct au1xmmc_host *host = mmc_priv(mmc);
746 au1xmmc_set_power(host, 0);
748 au1xmmc_set_power(host, 1);
751 if (ios->clock && ios->clock != host->clock) {
752 au1xmmc_set_clock(host, ios->clock);
753 host->clock = ios->clock;
756 config2 = __raw_readl(HOST_CONFIG2(host));
769 __raw_writel(config2, HOST_CONFIG2(host));
779 struct au1xmmc_host *host = dev_id;
782 status = __raw_readl(HOST_STATUS(host));
788 mmc_signal_sdio_irq(host->mmc);
790 if (host->mrq && (status & STATUS_TIMEOUT)) {
792 host->mrq->cmd->error = -ETIMEDOUT;
794 host->mrq->data->error = -ETIMEDOUT;
797 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
799 /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
800 tasklet_schedule(&host->finish_task);
805 if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
806 au1xmmc_receive_pio(host);
808 au1xmmc_data_complete(host, status);
809 /* tasklet_schedule(&host->data_task); */
814 if (host->status == HOST_S_CMD)
815 au1xmmc_cmd_complete(host, status);
817 } else if (!(host->flags & HOST_F_DMA)) {
818 if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
819 au1xmmc_send_pio(host);
820 else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
821 au1xmmc_receive_pio(host);
824 DBG("Unhandled status %8.8x\n", host->pdev->id,
828 __raw_writel(status, HOST_STATUS(host));
848 struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
851 if (!host->mrq)
854 if (host->flags & HOST_F_STOP)
855 SEND_STOP(host);
857 tasklet_schedule(&host->data_task);
860 static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
865 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
870 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
878 host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
879 au1xmmc_dbdma_callback, (void *)host);
880 if (!host->tx_chan) {
881 dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
885 host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
886 au1xmmc_dbdma_callback, (void *)host);
887 if (!host->rx_chan) {
888 dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
889 au1xxx_dbdma_chan_free(host->tx_chan);
893 au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
894 au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
896 au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
897 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
900 host->flags |= HOST_F_DMA | HOST_F_DBDMA;
905 static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
907 if (host->flags & HOST_F_DMA) {
908 host->flags &= ~HOST_F_DMA;
909 au1xxx_dbdma_chan_free(host->tx_chan);
910 au1xxx_dbdma_chan_free(host->rx_chan);
916 struct au1xmmc_host *host = mmc_priv(mmc);
919 IRQ_ON(host, SD_CONFIG_SI);
921 IRQ_OFF(host, SD_CONFIG_SI);
935 struct au1xmmc_host *host;
946 host = mmc_priv(mmc);
947 host->mmc = mmc;
948 host->platdata = pdev->dev.platform_data;
949 host->pdev = pdev;
958 host->ioarea = request_mem_region(r->start, resource_size(r),
960 if (!host->ioarea) {
965 host->iobase = ioremap(r->start, 0x3c);
966 if (!host->iobase) {
971 host->irq = platform_get_irq(pdev, 0);
972 if (host->irq < 0) {
973 ret = host->irq;
1002 if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
1007 ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
1013 host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
1014 if (IS_ERR(host->clk)) {
1016 ret = PTR_ERR(host->clk);
1020 ret = clk_prepare_enable(host->clk);
1026 host->status = HOST_S_IDLE;
1029 if (host->platdata && host->platdata->cd_setup) {
1030 ret = host->platdata->cd_setup(mmc, 1);
1039 if (host->platdata)
1040 mmc->caps &= ~(host->platdata->mask_host_caps);
1042 tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
1044 tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
1047 ret = au1xmmc_dbdma_init(host);
1053 if (host->platdata && host->platdata->led) {
1054 struct led_classdev *led = host->platdata->led;
1064 au1xmmc_reset_controller(host);
1068 dev_err(&pdev->dev, "cannot add mmc host\n");
1072 platform_set_drvdata(pdev, host);
1075 " (mode=%s)\n", pdev->id, host->iobase,
1076 host->flags & HOST_F_DMA ? "dma" : "pio");
1082 if (host->platdata && host->platdata->led)
1083 led_classdev_unregister(host->platdata->led);
1086 __raw_writel(0, HOST_ENABLE(host));
1087 __raw_writel(0, HOST_CONFIG(host));
1088 __raw_writel(0, HOST_CONFIG2(host));
1091 if (host->flags & HOST_F_DBDMA)
1092 au1xmmc_dbdma_shutdown(host);
1094 tasklet_kill(&host->data_task);
1095 tasklet_kill(&host->finish_task);
1097 if (host->platdata && host->platdata->cd_setup &&
1099 host->platdata->cd_setup(mmc, 0);
1101 clk_disable_unprepare(host->clk);
1103 clk_put(host->clk);
1105 free_irq(host->irq, host);
1107 iounmap((void *)host->iobase);
1109 release_resource(host->ioarea);
1110 kfree(host->ioarea);
1119 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1121 if (host) {
1122 mmc_remove_host(host->mmc);
1125 if (host->platdata && host->platdata->led)
1126 led_classdev_unregister(host->platdata->led);
1129 if (host->platdata && host->platdata->cd_setup &&
1130 !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1131 host->platdata->cd_setup(host->mmc, 0);
1133 __raw_writel(0, HOST_ENABLE(host));
1134 __raw_writel(0, HOST_CONFIG(host));
1135 __raw_writel(0, HOST_CONFIG2(host));
1138 tasklet_kill(&host->data_task);
1139 tasklet_kill(&host->finish_task);
1141 if (host->flags & HOST_F_DBDMA)
1142 au1xmmc_dbdma_shutdown(host);
1144 au1xmmc_set_power(host, 0);
1146 clk_disable_unprepare(host->clk);
1147 clk_put(host->clk);
1149 free_irq(host->irq, host);
1150 iounmap((void *)host->iobase);
1151 release_resource(host->ioarea);
1152 kfree(host->ioarea);
1154 mmc_free_host(host->mmc);
1161 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1163 __raw_writel(0, HOST_CONFIG2(host));
1164 __raw_writel(0, HOST_CONFIG(host));
1165 __raw_writel(0xffffffff, HOST_STATUS(host));
1166 __raw_writel(0, HOST_ENABLE(host));
1174 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1176 au1xmmc_reset_controller(host);