Lines Matching defs:host

2  * MOXA ART MMC host driver.
25 #include <linux/mmc/host.h>
151 static inline void moxart_init_sg(struct moxart_host *host,
154 host->cur_sg = data->sg;
155 host->num_sg = data->sg_len;
156 host->data_remain = host->cur_sg->length;
158 if (host->data_remain > host->data_len)
159 host->data_remain = host->data_len;
162 static inline int moxart_next_sg(struct moxart_host *host)
165 struct mmc_data *data = host->mrq->cmd->data;
167 host->cur_sg++;
168 host->num_sg--;
170 if (host->num_sg > 0) {
171 host->data_remain = host->cur_sg->length;
172 remain = host->data_len - data->bytes_xfered;
173 if (remain > 0 && remain < host->data_remain)
174 host->data_remain = remain;
177 return host->num_sg;
180 static int moxart_wait_for_status(struct moxart_host *host,
187 *status = readl(host->base + REG_STATUS);
192 writel(*status & mask, host->base + REG_CLEAR);
198 dev_err(mmc_dev(host->mmc), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host *host,
210 RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR);
211 writel(cmd->arg, host->base + REG_ARGUMENT);
225 writel(cmdctrl | CMD_EN, host->base + REG_COMMAND);
227 if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT)
240 cmd->resp[3] = readl(host->base + REG_RESPONSE0);
241 cmd->resp[2] = readl(host->base + REG_RESPONSE1);
242 cmd->resp[1] = readl(host->base + REG_RESPONSE2);
243 cmd->resp[0] = readl(host->base + REG_RESPONSE3);
245 cmd->resp[0] = readl(host->base + REG_RESPONSE0);
252 struct moxart_host *host = param;
254 complete(&host->dma_complete);
257 static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
264 if (host->data_len == data->bytes_xfered)
268 dma_chan = host->dma_chan_tx;
271 dma_chan = host->dma_chan_rx;
284 dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
288 host->tx_desc = desc;
290 desc->callback_param = host;
295 data->bytes_xfered += host->data_remain;
298 &host->dma_complete, host->timeout);
306 static void moxart_transfer_pio(struct moxart_host *host)
308 struct mmc_data *data = host->mrq->cmd->data;
311 if (host->data_len == data->bytes_xfered)
314 sgp = sg_virt(host->cur_sg);
315 remain = host->data_remain;
319 if (moxart_wait_for_status(host, FIFO_URUN, &status)
322 complete(&host->pio_complete);
325 for (len = 0; len < remain && len < host->fifo_width;) {
326 iowrite32(*sgp, host->base + REG_DATA_WINDOW);
335 if (moxart_wait_for_status(host, FIFO_ORUN, &status)
338 complete(&host->pio_complete);
341 for (len = 0; len < remain && len < host->fifo_width;) {
342 *sgp = ioread32(host->base + REG_DATA_WINDOW);
350 data->bytes_xfered += host->data_remain - remain;
351 host->data_remain = remain;
353 if (host->data_len != data->bytes_xfered)
354 moxart_next_sg(host);
356 complete(&host->pio_complete);
359 static void moxart_prepare_data(struct moxart_host *host)
361 struct mmc_data *data = host->mrq->cmd->data;
368 host->data_len = data->blocks * data->blksz;
372 moxart_init_sg(host, data);
379 if ((host->data_len > host->fifo_width) && host->have_dma)
382 writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
383 writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
384 writel(host->rate, host->base + REG_DATA_TIMER);
385 writel(host->data_len, host->base + REG_DATA_LENGTH);
386 writel(datactrl, host->base + REG_DATA_CONTROL);
391 struct moxart_host *host = mmc_priv(mmc);
396 spin_lock_irqsave(&host->lock, flags);
398 init_completion(&host->dma_complete);
399 init_completion(&host->pio_complete);
401 host->mrq = mrq;
403 if (readl(host->base + REG_STATUS) & CARD_DETECT) {
408 moxart_prepare_data(host);
409 moxart_send_command(host, host->mrq->cmd);
412 if ((host->data_len > host->fifo_width) && host->have_dma) {
414 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
416 spin_unlock_irqrestore(&host->lock, flags);
418 moxart_transfer_dma(mrq->cmd->data, host);
420 spin_lock_irqsave(&host->lock, flags);
423 writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK);
425 spin_unlock_irqrestore(&host->lock, flags);
429 &host->pio_complete, host->timeout);
431 spin_lock_irqsave(&host->lock, flags);
434 if (host->is_removed) {
435 dev_err(mmc_dev(host->mmc), "card removed\n");
440 if (moxart_wait_for_status(host, MASK_DATA, &status)
450 moxart_send_command(host, mrq->cmd->data->stop);
454 spin_unlock_irqrestore(&host->lock, flags);
455 mmc_request_done(host->mmc, mrq);
460 struct moxart_host *host = (struct moxart_host *)devid;
464 spin_lock_irqsave(&host->lock, flags);
466 status = readl(host->base + REG_STATUS);
468 host->is_removed = status & CARD_DETECT;
469 if (host->is_removed && host->have_dma) {
470 dmaengine_terminate_all(host->dma_chan_tx);
471 dmaengine_terminate_all(host->dma_chan_rx);
473 host->mrq = NULL;
474 writel(MASK_INTR_PIO, host->base + REG_CLEAR);
475 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
476 mmc_detect_change(host->mmc, 0);
478 if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq)
479 moxart_transfer_pio(host);
481 spin_unlock_irqrestore(&host->lock, flags);
488 struct moxart_host *host = mmc_priv(mmc);
493 spin_lock_irqsave(&host->lock, flags);
497 if (ios->clock >= host->sysclk / (2 * (div + 1)))
501 host->rate = host->sysclk / (2 * (div + 1));
502 if (host->rate > host->sysclk)
504 writel(ctrl, host->base + REG_CLOCK_CONTROL);
508 writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON,
509 host->base + REG_POWER_CONTROL);
517 host->base + REG_POWER_CONTROL);
522 writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
525 writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
529 spin_unlock_irqrestore(&host->lock, flags);
535 struct moxart_host *host = mmc_priv(mmc);
537 return !!(readl(host->base + REG_STATUS) & WRITE_PROT);
552 struct moxart_host *host = NULL;
595 host = mmc_priv(mmc);
596 host->mmc = mmc;
597 host->base = reg_mmc;
598 host->reg_phys = res_mmc.start;
599 host->timeout = msecs_to_jiffies(1000);
600 host->sysclk = clk_get_rate(clk);
601 host->fifo_width = readl(host->base + REG_FEATURE) << 2;
602 host->dma_chan_tx = dma_request_chan(dev, "tx");
603 host->dma_chan_rx = dma_request_chan(dev, "rx");
605 spin_lock_init(&host->lock);
608 mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
609 mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
612 if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
613 if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER ||
614 PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) {
618 if (!IS_ERR(host->dma_chan_tx)) {
619 dma_release_channel(host->dma_chan_tx);
620 host->dma_chan_tx = NULL;
622 if (!IS_ERR(host->dma_chan_rx)) {
623 dma_release_channel(host->dma_chan_rx);
624 host->dma_chan_rx = NULL;
627 host->have_dma = false;
630 host->dma_chan_tx, host->dma_chan_rx);
631 host->have_dma = true;
639 cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW;
640 dmaengine_slave_config(host->dma_chan_tx, &cfg);
643 cfg.src_addr = host->reg_phys + REG_DATA_WINDOW;
645 dmaengine_slave_config(host->dma_chan_rx, &cfg);
648 if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
651 writel(0, host->base + REG_INTERRUPT_MASK);
653 writel(CMD_SDC_RESET, host->base + REG_COMMAND);
655 if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET))
660 ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host);
669 dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
674 if (!IS_ERR_OR_NULL(host->dma_chan_tx))
675 dma_release_channel(host->dma_chan_tx);
676 if (!IS_ERR_OR_NULL(host->dma_chan_rx))
677 dma_release_channel(host->dma_chan_rx);
687 struct moxart_host *host = mmc_priv(mmc);
691 if (!IS_ERR_OR_NULL(host->dma_chan_tx))
692 dma_release_channel(host->dma_chan_tx);
693 if (!IS_ERR_OR_NULL(host->dma_chan_rx))
694 dma_release_channel(host->dma_chan_rx);
697 writel(0, host->base + REG_INTERRUPT_MASK);
698 writel(0, host->base + REG_POWER_CONTROL);
699 writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
700 host->base + REG_CLOCK_CONTROL);