Lines Matching refs:host

3  *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
25 #include <linux/mmc/host.h>
192 static inline int is_imx31_mmc(struct mxcmci_host *host)
194 return host->devtype == IMX31_MMC;
197 static inline int is_mpc512x_mmc(struct mxcmci_host *host)
199 return host->devtype == MPC512X_MMC;
202 static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
205 return ioread32be(host->base + reg);
207 return readl(host->base + reg);
210 static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
213 iowrite32be(val, host->base + reg);
215 writel(val, host->base + reg);
218 static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
221 return ioread32be(host->base + reg);
223 return readw(host->base + reg);
226 static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
229 iowrite32be(val, host->base + reg);
231 writew(val, host->base + reg);
234 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
236 static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
238 if (!IS_ERR(host->mmc->supply.vmmc)) {
239 if (host->power_mode == MMC_POWER_UP)
240 mmc_regulator_set_ocr(host->mmc,
241 host->mmc->supply.vmmc, vdd);
242 else if (host->power_mode == MMC_POWER_OFF)
243 mmc_regulator_set_ocr(host->mmc,
244 host->mmc->supply.vmmc, 0);
247 if (host->pdata && host->pdata->setpower)
248 host->pdata->setpower(mmc_dev(host->mmc), vdd);
251 static inline int mxcmci_use_dma(struct mxcmci_host *host)
253 return host->do_dma;
256 static void mxcmci_softreset(struct mxcmci_host *host)
260 dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
263 mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
264 mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
268 mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
270 mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
296 static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
305 host->data = data;
308 mxcmci_writew(host, nob, MMC_REG_NOB);
309 mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
310 host->datasize = datasize;
312 if (!mxcmci_use_dma(host))
317 host->do_dma = 0;
323 host->dma_dir = DMA_FROM_DEVICE;
326 host->dma_dir = DMA_TO_DEVICE;
332 nents = dma_map_sg(host->dma->device->dev, data->sg,
333 data->sg_len, host->dma_dir);
337 host->desc = dmaengine_prep_slave_sg(host->dma,
341 if (!host->desc) {
342 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
343 host->dma_dir);
344 host->do_dma = 0;
349 dmaengine_submit(host->desc);
350 dma_async_issue_pending(host->dma);
352 mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
357 static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
358 static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
362 struct mxcmci_host *host = data;
365 del_timer(&host->watchdog);
367 stat = mxcmci_readl(host, MMC_REG_STATUS);
369 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
371 mxcmci_data_done(host, stat);
374 static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
377 u32 int_cntr = host->default_irq_mask;
380 WARN_ON(host->cmd != NULL);
381 host->cmd = cmd;
397 dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
405 if (mxcmci_use_dma(host)) {
406 if (host->dma_dir == DMA_FROM_DEVICE) {
407 host->desc->callback = mxcmci_dma_callback;
408 host->desc->callback_param = host;
414 spin_lock_irqsave(&host->lock, flags);
415 if (host->use_sdio)
417 mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
418 spin_unlock_irqrestore(&host->lock, flags);
420 mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
421 mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
422 mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
427 static void mxcmci_finish_request(struct mxcmci_host *host,
430 u32 int_cntr = host->default_irq_mask;
433 spin_lock_irqsave(&host->lock, flags);
434 if (host->use_sdio)
436 mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
437 spin_unlock_irqrestore(&host->lock, flags);
439 host->req = NULL;
440 host->cmd = NULL;
441 host->data = NULL;
443 mmc_request_done(host->mmc, req);
446 static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
448 struct mmc_data *data = host->data;
451 if (mxcmci_use_dma(host)) {
452 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
453 host->dma_dir);
458 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
461 dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
466 dev_err(mmc_dev(host->mmc),
470 dev_err(mmc_dev(host->mmc),
475 dev_err(mmc_dev(host->mmc),
479 dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
483 data->bytes_xfered = host->datasize;
488 host->data = NULL;
493 static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
495 struct mmc_command *cmd = host->cmd;
503 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
506 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
513 a = mxcmci_readw(host, MMC_REG_RES_FIFO);
514 b = mxcmci_readw(host, MMC_REG_RES_FIFO);
518 a = mxcmci_readw(host, MMC_REG_RES_FIFO);
519 b = mxcmci_readw(host, MMC_REG_RES_FIFO);
520 c = mxcmci_readw(host, MMC_REG_RES_FIFO);
526 static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
532 stat = mxcmci_readl(host, MMC_REG_STATUS);
536 mxcmci_softreset(host);
537 mxcmci_set_clk_rate(host, host->clock);
546 static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
552 stat = mxcmci_poll_status(host,
556 *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
564 stat = mxcmci_poll_status(host,
568 tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
575 static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
581 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
584 mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
592 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
597 mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
600 return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
603 static int mxcmci_transfer_data(struct mxcmci_host *host)
605 struct mmc_data *data = host->req->data;
609 host->data = data;
610 host->datasize = 0;
614 stat = mxcmci_pull(host, sg_virt(sg), sg->length);
617 host->datasize += sg->length;
621 stat = mxcmci_push(host, sg_virt(sg), sg->length);
624 host->datasize += sg->length;
626 stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
635 struct mxcmci_host *host = container_of(work, struct mxcmci_host,
637 int datastat = mxcmci_transfer_data(host);
639 mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
641 mxcmci_finish_data(host, datastat);
643 if (host->req->stop) {
644 if (mxcmci_start_cmd(host, host->req->stop, 0)) {
645 mxcmci_finish_request(host, host->req);
649 mxcmci_finish_request(host, host->req);
653 static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
659 spin_lock_irqsave(&host->lock, flags);
661 if (!host->data) {
662 spin_unlock_irqrestore(&host->lock, flags);
666 if (!host->req) {
667 spin_unlock_irqrestore(&host->lock, flags);
671 req = host->req;
673 host->req = NULL; /* we will handle finish req below */
675 data_error = mxcmci_finish_data(host, stat);
677 spin_unlock_irqrestore(&host->lock, flags);
682 mxcmci_read_response(host, stat);
683 host->cmd = NULL;
686 if (mxcmci_start_cmd(host, req->stop, 0)) {
687 mxcmci_finish_request(host, req);
691 mxcmci_finish_request(host, req);
695 static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
697 mxcmci_read_response(host, stat);
698 host->cmd = NULL;
700 if (!host->data && host->req) {
701 mxcmci_finish_request(host, host->req);
709 if (!mxcmci_use_dma(host) && host->data)
710 schedule_work(&host->datawork);
716 struct mxcmci_host *host = devid;
720 stat = mxcmci_readl(host, MMC_REG_STATUS);
721 mxcmci_writel(host,
726 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
728 spin_lock(&host->lock);
729 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
730 spin_unlock(&host->lock);
732 if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
733 mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
736 mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
737 mmc_signal_sdio_irq(host->mmc);
741 mxcmci_cmd_done(host, stat);
743 if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
744 del_timer(&host->watchdog);
745 mxcmci_data_done(host, stat);
748 if (host->default_irq_mask &&
750 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
757 struct mxcmci_host *host = mmc_priv(mmc);
758 unsigned int cmdat = host->cmdat;
761 WARN_ON(host->req != NULL);
763 host->req = req;
764 host->cmdat &= ~CMD_DAT_CONT_INIT;
766 if (host->dma)
767 host->do_dma = 1;
770 error = mxcmci_setup_data(host, req->data);
783 error = mxcmci_start_cmd(host, req->cmd, cmdat);
787 mxcmci_finish_request(host, req);
790 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
794 unsigned int clk_in = clk_get_rate(host->clk_per);
817 mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
819 dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
825 struct mxcmci_host *host = mmc_priv(mmc);
826 struct dma_slave_config *config = &host->dma_slave_config;
828 config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
829 config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
832 config->dst_maxburst = host->burstlen;
833 config->src_maxburst = host->burstlen;
836 return dmaengine_slave_config(host->dma, config);
841 struct mxcmci_host *host = mmc_priv(mmc);
853 if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
854 host->burstlen = burstlen;
857 dev_err(mmc_dev(host->mmc),
859 dma_release_channel(host->dma);
860 host->do_dma = 0;
861 host->dma = NULL;
866 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
868 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
870 if (host->power_mode != ios->power_mode) {
871 host->power_mode = ios->power_mode;
872 mxcmci_set_power(host, ios->vdd);
875 host->cmdat |= CMD_DAT_CONT_INIT;
879 mxcmci_set_clk_rate(host, ios->clock);
880 mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
882 mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
885 host->clock = ios->clock;
900 struct mxcmci_host *host = mmc_priv(mmc);
902 if (host->pdata && host->pdata->get_ro)
903 return !!host->pdata->get_ro(mmc_dev(mmc));
914 struct mxcmci_host *host = mmc_priv(mmc);
918 spin_lock_irqsave(&host->lock, flags);
919 host->use_sdio = enable;
920 int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
927 mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
928 spin_unlock_irqrestore(&host->lock, flags);
931 static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
933 struct mxcmci_host *mxcmci = mmc_priv(host);
943 host->caps &= ~MMC_CAP_4_BIT_DATA;
945 host->caps |= MMC_CAP_4_BIT_DATA;
950 struct mxcmci_host *host = param;
955 chan->private = &host->dma_data;
962 struct mxcmci_host *host = from_timer(host, t, watchdog);
963 struct mmc_request *req = host->req;
964 unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
966 if (host->dma_dir == DMA_FROM_DEVICE) {
967 dmaengine_terminate_all(host->dma);
968 dev_err(mmc_dev(host->mmc),
972 dev_err(mmc_dev(host->mmc),
975 mxcmci_softreset(host);
980 if (host->data)
981 host->data->error = -ETIMEDOUT;
982 host->req = NULL;
983 host->cmd = NULL;
984 host->data = NULL;
985 mmc_request_done(host->mmc, req);
999 struct mxcmci_host *host;
1016 mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1020 host = mmc_priv(mmc);
1022 host->base = devm_ioremap_resource(&pdev->dev, res);
1023 if (IS_ERR(host->base)) {
1024 ret = PTR_ERR(host->base);
1028 host->phys_base = res->start;
1049 host->devtype = id_entry->driver_data;
1051 host->devtype = pdev->id_entry->driver_data;
1055 if (!is_mpc512x_mmc(host))
1058 host->mmc = mmc;
1059 host->pdata = pdata;
1060 spin_lock_init(&host->lock);
1080 host->default_irq_mask =
1083 host->default_irq_mask = 0;
1085 host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1086 if (IS_ERR(host->clk_ipg)) {
1087 ret = PTR_ERR(host->clk_ipg);
1091 host->clk_per = devm_clk_get(&pdev->dev, "per");
1092 if (IS_ERR(host->clk_per)) {
1093 ret = PTR_ERR(host->clk_per);
1097 ret = clk_prepare_enable(host->clk_per);
1101 ret = clk_prepare_enable(host->clk_ipg);
1105 mxcmci_softreset(host);
1107 host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1108 if (host->rev_no != 0x400) {
1110 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1111 host->rev_no);
1115 mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1116 mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1119 mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1121 mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1123 if (!host->pdata) {
1124 host->dma = dma_request_chan(&pdev->dev, "rx-tx");
1125 if (IS_ERR(host->dma)) {
1126 if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
1132 host->dma = NULL;
1137 host->dmareq = res->start;
1138 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1139 host->dma_data.priority = DMA_PRIO_LOW;
1140 host->dma_data.dma_request = host->dmareq;
1143 host->dma = dma_request_channel(mask, filter, host);
1146 if (host->dma)
1148 host->dma->device->dev);
1150 dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1152 INIT_WORK(&host->datawork, mxcmci_datawork);
1155 dev_name(&pdev->dev), host);
1161 if (host->pdata && host->pdata->init) {
1162 ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1163 host->mmc);
1168 timer_setup(&host->watchdog, mxcmci_watchdog, 0);
1177 if (host->dma)
1178 dma_release_channel(host->dma);
1181 clk_disable_unprepare(host->clk_ipg);
1183 clk_disable_unprepare(host->clk_per);
1194 struct mxcmci_host *host = mmc_priv(mmc);
1198 if (host->pdata && host->pdata->exit)
1199 host->pdata->exit(&pdev->dev, mmc);
1201 if (host->dma)
1202 dma_release_channel(host->dma);
1204 clk_disable_unprepare(host->clk_per);
1205 clk_disable_unprepare(host->clk_ipg);
1216 struct mxcmci_host *host = mmc_priv(mmc);
1218 clk_disable_unprepare(host->clk_per);
1219 clk_disable_unprepare(host->clk_ipg);
1226 struct mxcmci_host *host = mmc_priv(mmc);
1229 ret = clk_prepare_enable(host->clk_per);
1233 ret = clk_prepare_enable(host->clk_ipg);
1235 clk_disable_unprepare(host->clk_per);