Lines Matching refs:host

16 #include <linux/mmc/host.h>
209 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
211 iowrite32(data, host->base + reg);
212 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
213 host->base, reg, data);
216 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
218 iowrite16(data, host->base + reg);
219 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
220 host->base, reg, data);
223 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
225 u32 data = ioread32(host->base + reg);
226 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
227 host->base, reg, data);
231 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
233 u16 data = ioread16(host->base + reg);
234 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
235 host->base, reg, data);
239 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
241 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
242 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
243 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
244 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
247 static void usdhi6_wait_for_resp(struct usdhi6_host *host)
249 usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
254 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
256 usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
261 static void usdhi6_only_cd(struct usdhi6_host *host)
264 usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
267 static void usdhi6_mask_all(struct usdhi6_host *host)
269 usdhi6_irq_enable(host, 0, 0);
272 static int usdhi6_error_code(struct usdhi6_host *host)
276 usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
278 if (host->io_error &
280 u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
281 int opc = host->mrq ? host->mrq->cmd->opcode : -1;
283 err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
285 if (host->wait == USDHI6_WAIT_FOR_CMD)
286 dev_dbg(mmc_dev(host->mmc),
288 err, rsp54, host->wait, opc);
290 dev_warn(mmc_dev(host->mmc),
292 err, rsp54, host->wait, opc);
296 err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
298 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
299 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
300 if (host->io_error & USDHI6_SD_INFO2_ILA)
314 static void usdhi6_blk_bounce(struct usdhi6_host *host,
317 struct mmc_data *data = host->mrq->data;
318 size_t blk_head = host->head_len;
320 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
321 __func__, host->mrq->cmd->opcode, data->sg_len,
324 host->head_pg.page = host->pg.page;
325 host->head_pg.mapped = host->pg.mapped;
326 host->pg.page = nth_page(host->pg.page, 1);
327 host->pg.mapped = kmap(host->pg.page);
329 host->blk_page = host->bounce_buf;
330 host->offset = 0;
335 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
337 memcpy(host->bounce_buf + blk_head, host->pg.mapped,
342 static void usdhi6_sg_prep(struct usdhi6_host *host)
344 struct mmc_request *mrq = host->mrq;
347 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
349 host->sg = data->sg;
351 host->offset = host->sg->offset;
355 static void *usdhi6_sg_map(struct usdhi6_host *host)
357 struct mmc_data *data = host->mrq->data;
358 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
362 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
368 host->pg.page = sg_page(sg);
369 host->pg.mapped = kmap(host->pg.page);
370 host->offset = sg->offset;
376 host->head_len = blk_head;
383 usdhi6_blk_bounce(host, sg);
385 host->blk_page = host->pg.mapped;
387 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
388 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
389 sg->offset, host->mrq->cmd->opcode, host->mrq);
391 return host->blk_page + host->offset;
395 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
397 struct mmc_data *data = host->mrq->data;
398 struct page *page = host->head_pg.page;
403 host->sg : data->sg;
404 size_t blk_head = host->head_len;
407 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
408 host->bounce_buf, blk_head);
409 memcpy(host->pg.mapped, host->bounce_buf + blk_head,
416 host->head_pg.page = NULL;
419 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
424 page = host->pg.page;
431 host->pg.page = NULL;
435 static void usdhi6_sg_advance(struct usdhi6_host *host)
437 struct mmc_data *data = host->mrq->data;
441 if (host->head_pg.page) {
443 host->page_idx++;
444 host->offset = data->blksz - host->head_len;
445 host->blk_page = host->pg.mapped;
446 usdhi6_sg_unmap(host, false);
448 host->offset += data->blksz;
450 if (host->offset == PAGE_SIZE) {
452 host->offset = 0;
453 host->page_idx++;
458 * Now host->blk_page + host->offset point at the end of our last block
459 * and host->page_idx is the index of the page, in which our new block
463 done = (host->page_idx << PAGE_SHIFT) + host->offset;
464 total = host->sg->offset + sg_dma_len(host->sg);
466 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
467 done, total, host->offset);
469 if (done < total && host->offset) {
471 if (host->offset + data->blksz > PAGE_SIZE)
473 usdhi6_blk_bounce(host, host->sg);
479 usdhi6_sg_unmap(host, false);
487 struct scatterlist *next = sg_next(host->sg);
489 host->page_idx = 0;
492 host->wait = USDHI6_WAIT_FOR_DATA_END;
493 host->sg = next;
506 host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
507 host->pg.mapped = kmap(host->pg.page);
508 host->blk_page = host->pg.mapped;
510 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
511 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
512 host->mrq->cmd->opcode, host->mrq);
517 static void usdhi6_dma_release(struct usdhi6_host *host)
519 host->dma_active = false;
520 if (host->chan_tx) {
521 struct dma_chan *chan = host->chan_tx;
522 host->chan_tx = NULL;
525 if (host->chan_rx) {
526 struct dma_chan *chan = host->chan_rx;
527 host->chan_rx = NULL;
532 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
534 struct mmc_data *data = host->mrq->data;
536 if (!host->dma_active)
539 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
540 host->dma_active = false;
543 dma_unmap_sg(host->chan_rx->device->dev, data->sg,
546 dma_unmap_sg(host->chan_tx->device->dev, data->sg,
552 struct usdhi6_host *host = arg;
553 struct mmc_request *mrq = host->mrq;
556 dev_name(mmc_dev(host->mmc)), mrq))
559 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
562 usdhi6_dma_stop_unmap(host);
563 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
566 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
569 struct mmc_data *data = host->mrq->data;
589 host->dma_active = true;
596 desc->callback_param = host;
600 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
607 usdhi6_dma_release(host);
608 dev_warn(mmc_dev(host->mmc),
615 static int usdhi6_dma_start(struct usdhi6_host *host)
617 if (!host->chan_rx || !host->chan_tx)
620 if (host->mrq->data->flags & MMC_DATA_READ)
621 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
623 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
626 static void usdhi6_dma_kill(struct usdhi6_host *host)
628 struct mmc_data *data = host->mrq->data;
630 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
634 dmaengine_terminate_all(host->chan_rx);
636 dmaengine_terminate_all(host->chan_tx);
639 static void usdhi6_dma_check_error(struct usdhi6_host *host)
641 struct mmc_data *data = host->mrq->data;
643 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
644 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
646 if (host->io_error) {
647 data->error = usdhi6_error_code(host);
649 usdhi6_dma_kill(host);
650 usdhi6_dma_release(host);
651 dev_warn(mmc_dev(host->mmc),
661 if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
662 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
665 static void usdhi6_dma_kick(struct usdhi6_host *host)
667 if (host->mrq->data->flags & MMC_DATA_READ)
668 dma_async_issue_pending(host->chan_rx);
670 dma_async_issue_pending(host->chan_tx);
673 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
681 host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
682 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
683 host->chan_tx);
685 if (IS_ERR(host->chan_tx)) {
686 host->chan_tx = NULL;
694 ret = dmaengine_slave_config(host->chan_tx, &cfg);
698 host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
699 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
700 host->chan_rx);
702 if (IS_ERR(host->chan_rx)) {
703 host->chan_rx = NULL;
711 ret = dmaengine_slave_config(host->chan_rx, &cfg);
718 dma_release_channel(host->chan_rx);
719 host->chan_rx = NULL;
721 dma_release_channel(host->chan_tx);
722 host->chan_tx = NULL;
727 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
734 if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
740 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
744 val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
749 if (host->imclk <= rate) {
752 new_rate = host->imclk;
755 new_rate = host->imclk / 2;
759 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
761 new_rate = host->imclk / div;
764 if (host->rate == new_rate)
767 host->rate = new_rate;
769 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
777 if (host->imclk == rate || host->imclk == host->rate || !rate)
778 usdhi6_write(host, USDHI6_SD_CLK_CTRL,
782 host->rate = 0;
786 usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
788 if (host->imclk == rate || host->imclk == host->rate ||
790 usdhi6_write(host, USDHI6_SD_CLK_CTRL,
794 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
796 struct mmc_host *mmc = host->mmc;
804 static int usdhi6_reset(struct usdhi6_host *host)
808 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
810 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
812 if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
820 struct usdhi6_host *host = mmc_priv(mmc);
829 usdhi6_set_power(host, ios);
830 usdhi6_only_cd(host);
837 ret = usdhi6_reset(host);
841 usdhi6_set_power(host, ios);
842 usdhi6_only_cd(host);
846 option = usdhi6_read(host, USDHI6_SD_OPTION);
862 usdhi6_write(host, USDHI6_SD_OPTION, option);
863 usdhi6_write(host, USDHI6_SDIF_MODE, mode);
867 if (host->rate != ios->clock)
868 usdhi6_clk_set(host, ios);
872 static void usdhi6_timeout_set(struct usdhi6_host *host)
874 struct mmc_request *mrq = host->mrq;
879 ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
881 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
893 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
894 mrq->data ? "data" : "cmd", ticks, host->rate);
897 usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
898 (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
901 static void usdhi6_request_done(struct usdhi6_host *host)
903 struct mmc_request *mrq = host->mrq;
906 if (WARN(host->pg.page || host->head_pg.page,
908 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
910 data ? host->offset : 0, data ? data->blocks : 0,
912 usdhi6_sg_unmap(host, true);
917 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
925 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
926 host->wait = USDHI6_WAIT_FOR_REQUEST;
927 host->mrq = NULL;
929 mmc_request_done(host->mmc, mrq);
932 static int usdhi6_cmd_flags(struct usdhi6_host *host)
934 struct mmc_request *mrq = host->mrq;
938 if (host->app_cmd) {
939 host->app_cmd = false;
975 dev_warn(mmc_dev(host->mmc),
985 static int usdhi6_rq_start(struct usdhi6_host *host)
987 struct mmc_request *mrq = host->mrq;
990 int opc = usdhi6_cmd_flags(host);
997 if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
1003 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
1011 host->page_idx = 0;
1034 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
1043 usdhi6_sg_prep(host);
1045 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1051 dev_dbg(mmc_dev(host->mmc),
1058 usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
1061 usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
1063 dev_dbg(mmc_dev(host->mmc),
1070 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
1075 usdhi6_wait_for_resp(host);
1077 host->wait = USDHI6_WAIT_FOR_CMD;
1078 schedule_delayed_work(&host->timeout_work, host->timeout);
1081 usdhi6_write(host, USDHI6_SD_STOP,
1083 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
1086 usdhi6_write(host, USDHI6_SD_CMD, opc);
1093 struct usdhi6_host *host = mmc_priv(mmc);
1096 cancel_delayed_work_sync(&host->timeout_work);
1098 host->mrq = mrq;
1099 host->sg = NULL;
1101 usdhi6_timeout_set(host);
1102 ret = usdhi6_rq_start(host);
1105 usdhi6_request_done(host);
1111 struct usdhi6_host *host = mmc_priv(mmc);
1113 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
1127 struct usdhi6_host *host = mmc_priv(mmc);
1129 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
1143 struct usdhi6_host *host = mmc_priv(mmc);
1148 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
1149 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
1150 usdhi6_write(host, USDHI6_SDIO_MODE, 1);
1152 usdhi6_write(host, USDHI6_SDIO_MODE, 0);
1153 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
1154 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
1158 static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
1160 if (IS_ERR(host->pins_uhs))
1166 return pinctrl_select_state(host->pinctrl,
1167 host->pins_uhs);
1170 return pinctrl_select_default_state(mmc_dev(host->mmc));
1200 static void usdhi6_resp_cmd12(struct usdhi6_host *host)
1202 struct mmc_command *cmd = host->mrq->stop;
1203 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1206 static void usdhi6_resp_read(struct usdhi6_host *host)
1208 struct mmc_command *cmd = host->mrq->cmd;
1229 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
1230 dev_err(mmc_dev(host->mmc),
1239 tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
1245 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
1247 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1249 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
1252 static int usdhi6_blk_read(struct usdhi6_host *host)
1254 struct mmc_data *data = host->mrq->data;
1258 if (host->io_error) {
1259 data->error = usdhi6_error_code(host);
1263 if (host->pg.page) {
1264 p = host->blk_page + host->offset;
1266 p = usdhi6_sg_map(host);
1274 *p = usdhi6_read(host, USDHI6_SD_BUF0);
1278 u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
1287 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1288 host->wait = USDHI6_WAIT_FOR_REQUEST;
1292 static int usdhi6_blk_write(struct usdhi6_host *host)
1294 struct mmc_data *data = host->mrq->data;
1298 if (host->io_error) {
1299 data->error = usdhi6_error_code(host);
1303 if (host->pg.page) {
1304 p = host->blk_page + host->offset;
1306 p = usdhi6_sg_map(host);
1314 usdhi6_write(host, USDHI6_SD_BUF0, *p);
1324 usdhi6_write16(host, USDHI6_SD_BUF0, d);
1330 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1331 host->wait = USDHI6_WAIT_FOR_REQUEST;
1335 static int usdhi6_stop_cmd(struct usdhi6_host *host)
1337 struct mmc_request *mrq = host->mrq;
1343 host->wait = USDHI6_WAIT_FOR_STOP;
1348 dev_err(mmc_dev(host->mmc),
1357 static bool usdhi6_end_cmd(struct usdhi6_host *host)
1359 struct mmc_request *mrq = host->mrq;
1362 if (host->io_error) {
1363 cmd->error = usdhi6_error_code(host);
1367 usdhi6_resp_read(host);
1372 if (host->dma_active) {
1373 usdhi6_dma_kick(host);
1375 host->wait = USDHI6_WAIT_FOR_DMA;
1376 else if (usdhi6_stop_cmd(host) < 0)
1382 host->wait = USDHI6_WAIT_FOR_MREAD;
1384 host->wait = USDHI6_WAIT_FOR_READ;
1389 host->wait = USDHI6_WAIT_FOR_MWRITE;
1391 host->wait = USDHI6_WAIT_FOR_WRITE;
1397 static bool usdhi6_read_block(struct usdhi6_host *host)
1400 int ret = usdhi6_blk_read(host);
1404 * cross-page, in which case for single-block IO host->page_idx == 0.
1407 usdhi6_sg_unmap(host, true);
1412 host->wait = USDHI6_WAIT_FOR_DATA_END;
1416 static bool usdhi6_mread_block(struct usdhi6_host *host)
1418 int ret = usdhi6_blk_read(host);
1423 usdhi6_sg_advance(host);
1425 return !host->mrq->data->error &&
1426 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1429 static bool usdhi6_write_block(struct usdhi6_host *host)
1431 int ret = usdhi6_blk_write(host);
1434 usdhi6_sg_unmap(host, true);
1439 host->wait = USDHI6_WAIT_FOR_DATA_END;
1443 static bool usdhi6_mwrite_block(struct usdhi6_host *host)
1445 int ret = usdhi6_blk_write(host);
1450 usdhi6_sg_advance(host);
1452 return !host->mrq->data->error &&
1453 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1460 struct usdhi6_host *host = dev_id;
1466 cancel_delayed_work_sync(&host->timeout_work);
1468 mrq = host->mrq;
1475 switch (host->wait) {
1481 io_wait = usdhi6_end_cmd(host);
1485 io_wait = usdhi6_mread_block(host);
1489 io_wait = usdhi6_read_block(host);
1493 io_wait = usdhi6_mwrite_block(host);
1497 io_wait = usdhi6_write_block(host);
1500 usdhi6_dma_check_error(host);
1503 usdhi6_write(host, USDHI6_SD_STOP, 0);
1504 if (host->io_error) {
1505 int ret = usdhi6_error_code(host);
1510 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
1513 usdhi6_resp_cmd12(host);
1517 if (host->io_error) {
1518 mrq->data->error = usdhi6_error_code(host);
1519 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
1525 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1526 usdhi6_request_done(host);
1531 schedule_delayed_work(&host->timeout_work, host->timeout);
1533 if (!host->dma_active)
1534 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1541 if (host->wait != USDHI6_WAIT_FOR_STOP &&
1542 host->mrq->stop &&
1543 !host->mrq->stop->error &&
1544 !usdhi6_stop_cmd(host)) {
1546 usdhi6_wait_for_resp(host);
1548 schedule_delayed_work(&host->timeout_work,
1549 host->timeout);
1557 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1559 usdhi6_sg_unmap(host, true);
1562 host->app_cmd = true;
1566 usdhi6_request_done(host);
1573 struct usdhi6_host *host = dev_id;
1576 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1578 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
1580 usdhi6_only_cd(host);
1582 dev_dbg(mmc_dev(host->mmc),
1592 usdhi6_write(host, USDHI6_SD_INFO1,
1600 usdhi6_write(host, USDHI6_SD_INFO2,
1604 host->io_error = error;
1605 host->irq_status = status;
1609 if (host->wait != USDHI6_WAIT_FOR_CMD ||
1611 dev_warn(mmc_dev(host->mmc),
1615 dev_dbg(mmc_dev(host->mmc),
1625 struct usdhi6_host *host = dev_id;
1626 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
1628 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
1633 usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
1635 mmc_signal_sdio_irq(host->mmc);
1642 struct usdhi6_host *host = dev_id;
1643 struct mmc_host *mmc = host->mmc;
1647 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1654 usdhi6_write(host, USDHI6_SD_INFO1, ~status);
1674 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
1675 struct mmc_request *mrq = host->mrq;
1679 dev_warn(mmc_dev(host->mmc),
1681 host->dma_active ? "DMA" : "PIO",
1682 host->wait, mrq ? mrq->cmd->opcode : -1,
1683 usdhi6_read(host, USDHI6_SD_INFO1),
1684 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
1686 if (host->dma_active) {
1687 usdhi6_dma_kill(host);
1688 usdhi6_dma_stop_unmap(host);
1691 switch (host->wait) {
1693 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1696 usdhi6_error_code(host);
1701 usdhi6_error_code(host);
1709 sg = host->sg ?: data->sg;
1710 dev_dbg(mmc_dev(host->mmc),
1712 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1713 host->offset, data->blocks, data->blksz, data->sg_len,
1715 usdhi6_sg_unmap(host, true);
1718 usdhi6_error_code(host);
1723 usdhi6_request_done(host);
1738 struct usdhi6_host *host;
1767 host = mmc_priv(mmc);
1768 host->mmc = mmc;
1769 host->wait = USDHI6_WAIT_FOR_REQUEST;
1770 host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
1777 host->pinctrl = devm_pinctrl_get(&pdev->dev);
1778 if (IS_ERR(host->pinctrl)) {
1779 ret = PTR_ERR(host->pinctrl);
1783 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
1786 host->base = devm_ioremap_resource(dev, res);
1787 if (IS_ERR(host->base)) {
1788 ret = PTR_ERR(host->base);
1792 host->clk = devm_clk_get(dev, NULL);
1793 if (IS_ERR(host->clk)) {
1794 ret = PTR_ERR(host->clk);
1798 host->imclk = clk_get_rate(host->clk);
1800 ret = clk_prepare_enable(host->clk);
1804 version = usdhi6_read(host, USDHI6_VERSION);
1811 dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
1812 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
1814 usdhi6_mask_all(host);
1818 dev_name(dev), host);
1826 dev_name(dev), host);
1831 dev_name(dev), host);
1835 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
1837 usdhi6_dma_request(host, res->start);
1857 mmc->f_max = host->imclk;
1858 mmc->f_min = host->imclk / 512;
1860 platform_set_drvdata(pdev, host);
1869 usdhi6_dma_release(host);
1871 clk_disable_unprepare(host->clk);
1880 struct usdhi6_host *host = platform_get_drvdata(pdev);
1882 mmc_remove_host(host->mmc);
1884 usdhi6_mask_all(host);
1885 cancel_delayed_work_sync(&host->timeout_work);
1886 usdhi6_dma_release(host);
1887 clk_disable_unprepare(host->clk);
1888 mmc_free_host(host->mmc);
1905 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");