Lines Matching defs:host

2  * MOXA ART MMC host driver.
25 #include <linux/mmc/host.h>
151 static inline void moxart_init_sg(struct moxart_host *host,
154 host->cur_sg = data->sg;
155 host->num_sg = data->sg_len;
156 host->data_remain = host->cur_sg->length;
158 if (host->data_remain > host->data_len)
159 host->data_remain = host->data_len;
162 static inline int moxart_next_sg(struct moxart_host *host)
165 struct mmc_data *data = host->mrq->cmd->data;
167 host->cur_sg++;
168 host->num_sg--;
170 if (host->num_sg > 0) {
171 host->data_remain = host->cur_sg->length;
172 remain = host->data_len - data->bytes_xfered;
173 if (remain > 0 && remain < host->data_remain)
174 host->data_remain = remain;
177 return host->num_sg;
180 static int moxart_wait_for_status(struct moxart_host *host,
187 *status = readl(host->base + REG_STATUS);
192 writel(*status & mask, host->base + REG_CLEAR);
198 dev_err(mmc_dev(host->mmc), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host *host,
210 RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR);
211 writel(cmd->arg, host->base + REG_ARGUMENT);
225 writel(cmdctrl | CMD_EN, host->base + REG_COMMAND);
227 if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT)
240 cmd->resp[3] = readl(host->base + REG_RESPONSE0);
241 cmd->resp[2] = readl(host->base + REG_RESPONSE1);
242 cmd->resp[1] = readl(host->base + REG_RESPONSE2);
243 cmd->resp[0] = readl(host->base + REG_RESPONSE3);
245 cmd->resp[0] = readl(host->base + REG_RESPONSE0);
252 struct moxart_host *host = param;
254 complete(&host->dma_complete);
257 static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
263 if (host->data_len == data->bytes_xfered)
267 dma_chan = host->dma_chan_tx;
270 dma_chan = host->dma_chan_rx;
283 dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
287 host->tx_desc = desc;
289 desc->callback_param = host;
294 data->bytes_xfered += host->data_remain;
296 wait_for_completion_interruptible_timeout(&host->dma_complete,
297 host->timeout);
305 static void moxart_transfer_pio(struct moxart_host *host)
307 struct mmc_data *data = host->mrq->cmd->data;
310 if (host->data_len == data->bytes_xfered)
313 sgp = sg_virt(host->cur_sg);
314 remain = host->data_remain;
318 if (moxart_wait_for_status(host, FIFO_URUN, &status)
321 complete(&host->pio_complete);
324 for (len = 0; len < remain && len < host->fifo_width;) {
325 iowrite32(*sgp, host->base + REG_DATA_WINDOW);
334 if (moxart_wait_for_status(host, FIFO_ORUN, &status)
337 complete(&host->pio_complete);
340 for (len = 0; len < remain && len < host->fifo_width;) {
341 *sgp = ioread32(host->base + REG_DATA_WINDOW);
349 data->bytes_xfered += host->data_remain - remain;
350 host->data_remain = remain;
352 if (host->data_len != data->bytes_xfered)
353 moxart_next_sg(host);
355 complete(&host->pio_complete);
358 static void moxart_prepare_data(struct moxart_host *host)
360 struct mmc_data *data = host->mrq->cmd->data;
367 host->data_len = data->blocks * data->blksz;
371 moxart_init_sg(host, data);
378 if ((host->data_len > host->fifo_width) && host->have_dma)
381 writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
382 writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
383 writel(host->rate, host->base + REG_DATA_TIMER);
384 writel(host->data_len, host->base + REG_DATA_LENGTH);
385 writel(datactrl, host->base + REG_DATA_CONTROL);
390 struct moxart_host *host = mmc_priv(mmc);
394 spin_lock_irqsave(&host->lock, flags);
396 init_completion(&host->dma_complete);
397 init_completion(&host->pio_complete);
399 host->mrq = mrq;
401 if (readl(host->base + REG_STATUS) & CARD_DETECT) {
406 moxart_prepare_data(host);
407 moxart_send_command(host, host->mrq->cmd);
410 if ((host->data_len > host->fifo_width) && host->have_dma) {
412 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
414 spin_unlock_irqrestore(&host->lock, flags);
416 moxart_transfer_dma(mrq->cmd->data, host);
418 spin_lock_irqsave(&host->lock, flags);
421 writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK);
423 spin_unlock_irqrestore(&host->lock, flags);
426 wait_for_completion_interruptible_timeout(&host->pio_complete,
427 host->timeout);
429 spin_lock_irqsave(&host->lock, flags);
432 if (host->is_removed) {
433 dev_err(mmc_dev(host->mmc), "card removed\n");
438 if (moxart_wait_for_status(host, MASK_DATA, &status)
448 moxart_send_command(host, mrq->cmd->data->stop);
452 spin_unlock_irqrestore(&host->lock, flags);
453 mmc_request_done(host->mmc, mrq);
458 struct moxart_host *host = (struct moxart_host *)devid;
461 spin_lock(&host->lock);
463 status = readl(host->base + REG_STATUS);
465 host->is_removed = status & CARD_DETECT;
466 if (host->is_removed && host->have_dma) {
467 dmaengine_terminate_all(host->dma_chan_tx);
468 dmaengine_terminate_all(host->dma_chan_rx);
470 host->mrq = NULL;
471 writel(MASK_INTR_PIO, host->base + REG_CLEAR);
472 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
473 mmc_detect_change(host->mmc, 0);
475 if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq)
476 moxart_transfer_pio(host);
478 spin_unlock(&host->lock);
485 struct moxart_host *host = mmc_priv(mmc);
490 spin_lock_irqsave(&host->lock, flags);
494 if (ios->clock >= host->sysclk / (2 * (div + 1)))
498 host->rate = host->sysclk / (2 * (div + 1));
499 if (host->rate > host->sysclk)
501 writel(ctrl, host->base + REG_CLOCK_CONTROL);
505 writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON,
506 host->base + REG_POWER_CONTROL);
514 host->base + REG_POWER_CONTROL);
519 writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
522 writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
526 spin_unlock_irqrestore(&host->lock, flags);
532 struct moxart_host *host = mmc_priv(mmc);
534 return !!(readl(host->base + REG_STATUS) & WRITE_PROT);
549 struct moxart_host *host = NULL;
592 host = mmc_priv(mmc);
593 host->mmc = mmc;
594 host->base = reg_mmc;
595 host->reg_phys = res_mmc.start;
596 host->timeout = msecs_to_jiffies(1000);
597 host->sysclk = clk_get_rate(clk);
598 host->fifo_width = readl(host->base + REG_FEATURE) << 2;
599 host->dma_chan_tx = dma_request_chan(dev, "tx");
600 host->dma_chan_rx = dma_request_chan(dev, "rx");
602 spin_lock_init(&host->lock);
605 mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
606 mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
612 if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
613 if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER ||
614 PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) {
618 if (!IS_ERR(host->dma_chan_tx)) {
619 dma_release_channel(host->dma_chan_tx);
620 host->dma_chan_tx = NULL;
622 if (!IS_ERR(host->dma_chan_rx)) {
623 dma_release_channel(host->dma_chan_rx);
624 host->dma_chan_rx = NULL;
627 host->have_dma = false;
632 host->dma_chan_tx, host->dma_chan_rx);
633 host->have_dma = true;
641 cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW;
642 dmaengine_slave_config(host->dma_chan_tx, &cfg);
645 cfg.src_addr = host->reg_phys + REG_DATA_WINDOW;
647 dmaengine_slave_config(host->dma_chan_rx, &cfg);
650 dma_get_max_seg_size(host->dma_chan_rx->device->dev),
651 dma_get_max_seg_size(host->dma_chan_tx->device->dev));
654 if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
657 writel(0, host->base + REG_INTERRUPT_MASK);
659 writel(CMD_SDC_RESET, host->base + REG_COMMAND);
661 if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET))
666 ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host);
675 dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
680 if (!IS_ERR_OR_NULL(host->dma_chan_tx))
681 dma_release_channel(host->dma_chan_tx);
682 if (!IS_ERR_OR_NULL(host->dma_chan_rx))
683 dma_release_channel(host->dma_chan_rx);
693 struct moxart_host *host = mmc_priv(mmc);
695 if (!IS_ERR_OR_NULL(host->dma_chan_tx))
696 dma_release_channel(host->dma_chan_tx);
697 if (!IS_ERR_OR_NULL(host->dma_chan_rx))
698 dma_release_channel(host->dma_chan_rx);
701 writel(0, host->base + REG_INTERRUPT_MASK);
702 writel(0, host->base + REG_POWER_CONTROL);
703 writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
704 host->base + REG_CLOCK_CONTROL);