Lines Matching refs:data
209 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
211 iowrite32(data, host->base + reg);
213 host->base, reg, data);
216 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
218 iowrite16(data, host->base + reg);
220 host->base, reg, data);
225 u32 data = ioread32(host->base + reg);
227 host->base, reg, data);
228 return data;
233 u16 data = ioread16(host->base + reg);
235 host->base, reg, data);
236 return data;
317 struct mmc_data *data = host->mrq->data;
321 __func__, host->mrq->cmd->opcode, data->sg_len,
322 data->blksz, data->blocks, sg->offset);
332 if (data->flags & MMC_DATA_READ)
338 data->blksz - blk_head);
345 struct mmc_data *data = mrq->data;
347 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
349 host->sg = data->sg;
357 struct mmc_data *data = host->mrq->data;
358 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
360 size_t blk_head = head % data->blksz;
363 if (WARN(sg_dma_len(sg) % data->blksz,
365 sg_dma_len(sg), data->blksz))
378 if (head < data->blksz)
397 struct mmc_data *data = host->mrq->data;
402 struct scatterlist *sg = data->sg_len > 1 ?
403 host->sg : data->sg;
406 if (!data->error && data->flags & MMC_DATA_READ) {
410 data->blksz - blk_head);
419 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
437 struct mmc_data *data = host->mrq->data;
444 host->offset = data->blksz - host->head_len;
448 host->offset += data->blksz;
471 if (host->offset + data->blksz > PAGE_SIZE)
495 if (WARN(next && sg_dma_len(next) % data->blksz,
497 sg_dma_len(next), data->blksz))
498 data->error = -EINVAL;
534 struct mmc_data *data = host->mrq->data;
542 if (data->flags & MMC_DATA_READ)
543 dma_unmap_sg(host->chan_rx->device->dev, data->sg,
544 data->sg_len, DMA_FROM_DEVICE);
546 dma_unmap_sg(host->chan_tx->device->dev, data->sg,
547 data->sg_len, DMA_TO_DEVICE);
555 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
563 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
569 struct mmc_data *data = host->mrq->data;
570 struct scatterlist *sg = data->sg;
587 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
601 __func__, data->sg_len, ret, cookie, desc);
620 if (host->mrq->data->flags & MMC_DATA_READ)
628 struct mmc_data *data = host->mrq->data;
631 __func__, data->sg_len, data->blocks, data->blksz);
633 if (data->flags & MMC_DATA_READ)
641 struct mmc_data *data = host->mrq->data;
647 data->error = usdhi6_error_code(host);
648 data->bytes_xfered = 0;
652 "DMA failed: %d, falling back to PIO\n", data->error);
658 * responses only come after the command phase, not after the data
667 if (host->mrq->data->flags & MMC_DATA_READ)
871 /* This is data timeout. Response timeout is fixed to 640 clock cycles */
878 if (!mrq->data)
881 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
882 mrq->data->timeout_clks;
894 mrq->data ? "data" : "cmd", ticks, host->rate);
904 struct mmc_data *data = mrq->data;
909 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
910 data ? host->offset : 0, data ? data->blocks : 0,
911 data ? data->blksz : 0, data ? data->sg_len : 0))
915 (data && data->error) ||
918 __func__, mrq->cmd->opcode, data ? data->blocks : 0,
919 data ? data->blksz : 0,
921 data ? data->error : 1,
943 if (mrq->data) {
946 if (mrq->data->flags & MMC_DATA_READ)
952 mrq->data->blocks > 1)) {
989 struct mmc_data *data = mrq->data;
1007 if (data) {
1013 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
1014 switch (data->blksz) {
1029 data->blksz != 512) {
1035 __func__, data->blocks, data->blksz);
1042 data->blocks > 1))
1045 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1047 if ((data->blksz >= USDHI6_MIN_DMA ||
1048 data->blocks > 1) &&
1049 (data->blksz % 4 ||
1050 data->sg->offset % 4))
1052 "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
1053 data->blksz, data->blocks, data->sg->offset);
1056 use_dma = data->blksz >= USDHI6_MIN_DMA &&
1057 !(data->blksz % 4) &&
1065 __func__, cmd->opcode, data->blocks, data->blksz,
1066 data->sg_len, use_dma ? "DMA" : "PIO",
1067 data->flags & MMC_DATA_READ ? "read" : "write",
1068 data->sg->offset, mrq->stop ? " + stop" : "");
1082 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
1264 struct mmc_data *data = host->mrq->data;
1269 data->error = usdhi6_error_code(host);
1278 data->error = -ENOMEM;
1283 for (i = 0; i < data->blksz / 4; i++, p++)
1286 rest = data->blksz % 4;
1297 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1299 return data->error;
1304 struct mmc_data *data = host->mrq->data;
1309 data->error = usdhi6_error_code(host);
1318 data->error = -ENOMEM;
1323 for (i = 0; i < data->blksz / 4; i++, p++)
1326 rest = data->blksz % 4;
1340 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1342 return data->error;
1379 if (!mrq->data)
1388 } else if (mrq->data->flags & MMC_DATA_READ) {
1391 mrq->data->blocks > 1))
1398 mrq->data->blocks > 1))
1435 return !host->mrq->data->error &&
1462 return !host->mrq->data->error &&
1473 struct mmc_data *data;
1483 data = mrq->data;
1490 /* Wait for data? */
1494 /* Wait for more data? */
1498 /* Wait for data end? */
1502 /* Wait data to write? */
1506 /* Wait for data end? */
1519 mrq->data->error = ret;
1528 mrq->data->error = usdhi6_error_code(host);
1530 mrq->data->error);
1542 /* Wait for more data or ACCESS_END */
1544 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1549 if (data) {
1550 if (!data->error) {
1564 data->bytes_xfered = data->blocks * data->blksz;
1567 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1568 __func__, data->error);
1686 struct mmc_data *data = mrq ? mrq->data : NULL;
1719 sg = host->sg ?: data->sg;
1722 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1723 host->offset, data->blocks, data->blksz, data->sg_len,
1729 data->error = -ETIMEDOUT;
1758 irq_sd = platform_get_irq_byname(pdev, "data");