Lines Matching refs:owl_host
136 struct owl_mmc_host *owl_host = devid;
140 spin_lock_irqsave(&owl_host->lock, flags);
142 state = readl(owl_host->base + OWL_REG_SD_STATE);
144 state = readl(owl_host->base + OWL_REG_SD_STATE);
146 writel(state, owl_host->base + OWL_REG_SD_STATE);
147 complete(&owl_host->sdc_complete);
150 spin_unlock_irqrestore(&owl_host->lock, flags);
155 static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
157 struct mmc_request *mrq = owl_host->mrq;
163 owl_host->mrq = NULL;
166 dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
167 owl_host->dma_dir);
170 mmc_request_done(owl_host->mmc, mrq);
173 static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
181 init_completion(&owl_host->sdc_complete);
217 dev_warn(owl_host->dev, "Unknown MMC command\n");
223 mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
226 writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
227 writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
237 owl_host->cmd = cmd;
240 writel(mode, owl_host->base + OWL_REG_SD_CTL);
248 if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
249 dev_err(owl_host->dev, "CMD interrupt timeout\n");
254 state = readl(owl_host->base + OWL_REG_SD_STATE);
258 dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
264 dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
271 cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
272 cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
273 cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
274 cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
276 resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
277 resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
286 struct owl_mmc_host *owl_host = param;
287 struct mmc_data *data = owl_host->data;
290 complete(&owl_host->dma_complete);
293 static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
298 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
300 writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
301 writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
305 writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
307 writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
310 owl_host->dma_dir = DMA_TO_DEVICE;
311 owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
313 owl_host->dma_dir = DMA_FROM_DEVICE;
314 owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
317 dma_map_sg(owl_host->dma->device->dev, data->sg,
318 data->sg_len, owl_host->dma_dir);
320 dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
321 owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
323 owl_host->dma_cfg.direction,
326 if (!owl_host->desc) {
327 dev_err(owl_host->dev, "Can't prepare slave sg\n");
331 owl_host->data = data;
333 owl_host->desc->callback = owl_mmc_dma_complete;
334 owl_host->desc->callback_param = (void *)owl_host;
342 struct owl_mmc_host *owl_host = mmc_priv(mmc);
346 owl_host->mrq = mrq;
348 ret = owl_mmc_prepare_data(owl_host, data);
354 init_completion(&owl_host->dma_complete);
355 dmaengine_submit(owl_host->desc);
356 dma_async_issue_pending(owl_host->dma);
359 owl_mmc_send_cmd(owl_host, mrq->cmd, data);
362 if (!wait_for_completion_timeout(&owl_host->sdc_complete,
364 dev_err(owl_host->dev, "CMD interrupt timeout\n");
366 dmaengine_terminate_all(owl_host->dma);
370 if (!wait_for_completion_timeout(&owl_host->dma_complete,
372 dev_err(owl_host->dev, "DMA interrupt timeout\n");
374 dmaengine_terminate_all(owl_host->dma);
379 owl_mmc_send_cmd(owl_host, data->stop, NULL);
385 owl_mmc_finish_request(owl_host);
388 static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
395 reg = readl(owl_host->base + OWL_REG_SD_CTL);
402 owl_host->base + OWL_REG_SD_CTL);
406 owl_host->base + OWL_REG_SD_CTL);
407 } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
410 owl_host->base + OWL_REG_SD_CTL);
412 } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
415 owl_host->base + OWL_REG_SD_CTL);
417 dev_err(owl_host->dev, "SD clock rate not supported\n");
421 clk_rate = clk_round_rate(owl_host->clk, rate << 1);
422 ret = clk_set_rate(owl_host->clk, clk_rate);
427 static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
432 owl_host->clock = ios->clock;
433 owl_mmc_set_clk_rate(owl_host, ios->clock);
436 static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
441 reg = readl(owl_host->base + OWL_REG_SD_EN);
454 writel(reg, owl_host->base + OWL_REG_SD_EN);
457 static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
459 reset_control_assert(owl_host->reset);
461 reset_control_deassert(owl_host->reset);
464 static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
468 init_completion(&owl_host->sdc_complete);
471 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
475 mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
477 writel(mode, owl_host->base + OWL_REG_SD_CTL);
479 if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
480 dev_err(owl_host->dev, "CMD interrupt timeout\n");
487 struct owl_mmc_host *owl_host = mmc_priv(mmc);
491 dev_dbg(owl_host->dev, "Powering card up\n");
494 owl_mmc_ctr_reset(owl_host);
495 clk_prepare_enable(owl_host->clk);
497 owl_host->base + OWL_REG_SD_EN);
502 dev_dbg(owl_host->dev, "Powering card on\n");
503 owl_mmc_power_on(owl_host);
508 dev_dbg(owl_host->dev, "Powering card off\n");
509 clk_disable_unprepare(owl_host->clk);
514 dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
518 if (ios->clock != owl_host->clock)
519 owl_mmc_set_clk(owl_host, ios);
521 owl_mmc_set_bus_width(owl_host, ios);
525 owl_host->ddr_50 = 1;
526 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
529 owl_host->ddr_50 = 0;
536 struct owl_mmc_host *owl_host = mmc_priv(mmc);
541 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
545 owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
565 struct owl_mmc_host *owl_host;
577 owl_host = mmc_priv(mmc);
578 owl_host->dev = &pdev->dev;
579 owl_host->mmc = mmc;
580 spin_lock_init(&owl_host->lock);
583 owl_host->base = devm_ioremap_resource(&pdev->dev, res);
584 if (IS_ERR(owl_host->base)) {
586 ret = PTR_ERR(owl_host->base);
590 owl_host->clk = devm_clk_get(&pdev->dev, NULL);
591 if (IS_ERR(owl_host->clk)) {
593 ret = PTR_ERR(owl_host->clk);
597 owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
598 if (IS_ERR(owl_host->reset)) {
600 ret = PTR_ERR(owl_host->reset);
625 owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
626 if (IS_ERR(owl_host->dma)) {
627 dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
628 ret = PTR_ERR(owl_host->dma);
633 dma_chan_name(owl_host->dma));
635 owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
636 owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
637 owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
638 owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
639 owl_host->dma_cfg.device_fc = false;
641 owl_host->irq = platform_get_irq(pdev, 0);
642 if (owl_host->irq < 0) {
643 ret = owl_host->irq;
647 ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
648 0, dev_name(&pdev->dev), owl_host);
651 owl_host->irq);
666 dma_release_channel(owl_host->dma);
676 struct owl_mmc_host *owl_host = mmc_priv(mmc);
679 disable_irq(owl_host->irq);
680 dma_release_channel(owl_host->dma);