Lines Matching refs:data
209 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
211 iowrite32(data, host->base + reg);
213 host->base, reg, data);
216 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
218 iowrite16(data, host->base + reg);
220 host->base, reg, data);
225 u32 data = ioread32(host->base + reg);
227 host->base, reg, data);
228 return data;
233 u16 data = ioread16(host->base + reg);
235 host->base, reg, data);
236 return data;
317 struct mmc_data *data = host->mrq->data;
321 __func__, host->mrq->cmd->opcode, data->sg_len,
322 data->blksz, data->blocks, sg->offset);
332 if (data->flags & MMC_DATA_READ)
338 data->blksz - blk_head);
345 struct mmc_data *data = mrq->data;
347 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
349 host->sg = data->sg;
357 struct mmc_data *data = host->mrq->data;
358 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
360 size_t blk_head = head % data->blksz;
363 if (WARN(sg_dma_len(sg) % data->blksz,
365 sg_dma_len(sg), data->blksz))
378 if (head < data->blksz)
397 struct mmc_data *data = host->mrq->data;
402 struct scatterlist *sg = data->sg_len > 1 ?
403 host->sg : data->sg;
406 if (!data->error && data->flags & MMC_DATA_READ) {
410 data->blksz - blk_head);
419 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
437 struct mmc_data *data = host->mrq->data;
444 host->offset = data->blksz - host->head_len;
448 host->offset += data->blksz;
471 if (host->offset + data->blksz > PAGE_SIZE)
495 if (WARN(next && sg_dma_len(next) % data->blksz,
497 sg_dma_len(next), data->blksz))
498 data->error = -EINVAL;
534 struct mmc_data *data = host->mrq->data;
542 if (data->flags & MMC_DATA_READ)
543 dma_unmap_sg(host->chan_rx->device->dev, data->sg,
544 data->sg_len, DMA_FROM_DEVICE);
546 dma_unmap_sg(host->chan_tx->device->dev, data->sg,
547 data->sg_len, DMA_TO_DEVICE);
555 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
563 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
569 struct mmc_data *data = host->mrq->data;
570 struct scatterlist *sg = data->sg;
587 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
601 __func__, data->sg_len, ret, cookie, desc);
620 if (host->mrq->data->flags & MMC_DATA_READ)
628 struct mmc_data *data = host->mrq->data;
631 __func__, data->sg_len, data->blocks, data->blksz);
633 if (data->flags & MMC_DATA_READ)
641 struct mmc_data *data = host->mrq->data;
647 data->error = usdhi6_error_code(host);
648 data->bytes_xfered = 0;
652 "DMA failed: %d, falling back to PIO\n", data->error);
658 * responses only come after the command phase, not after the data
667 if (host->mrq->data->flags & MMC_DATA_READ)
871 /* This is data timeout. Response timeout is fixed to 640 clock cycles */
878 if (!mrq->data)
881 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
882 mrq->data->timeout_clks;
894 mrq->data ? "data" : "cmd", ticks, host->rate);
904 struct mmc_data *data = mrq->data;
909 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
910 data ? host->offset : 0, data ? data->blocks : 0,
911 data ? data->blksz : 0, data ? data->sg_len : 0))
915 (data && data->error) ||
918 __func__, mrq->cmd->opcode, data ? data->blocks : 0,
919 data ? data->blksz : 0,
921 data ? data->error : 1,
943 if (mrq->data) {
946 if (mrq->data->flags & MMC_DATA_READ)
952 mrq->data->blocks > 1)) {
989 struct mmc_data *data = mrq->data;
1007 if (data) {
1013 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
1014 switch (data->blksz) {
1029 data->blksz != 512) {
1035 __func__, data->blocks, data->blksz);
1042 data->blocks > 1))
1045 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1047 if ((data->blksz >= USDHI6_MIN_DMA ||
1048 data->blocks > 1) &&
1049 (data->blksz % 4 ||
1050 data->sg->offset % 4))
1052 "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
1053 data->blksz, data->blocks, data->sg->offset);
1056 use_dma = data->blksz >= USDHI6_MIN_DMA &&
1057 !(data->blksz % 4) &&
1065 __func__, cmd->opcode, data->blocks, data->blksz,
1066 data->sg_len, use_dma ? "DMA" : "PIO",
1067 data->flags & MMC_DATA_READ ? "read" : "write",
1068 data->sg->offset, mrq->stop ? " + stop" : "");
1082 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
1254 struct mmc_data *data = host->mrq->data;
1259 data->error = usdhi6_error_code(host);
1268 data->error = -ENOMEM;
1273 for (i = 0; i < data->blksz / 4; i++, p++)
1276 rest = data->blksz % 4;
1287 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1289 return data->error;
1294 struct mmc_data *data = host->mrq->data;
1299 data->error = usdhi6_error_code(host);
1308 data->error = -ENOMEM;
1313 for (i = 0; i < data->blksz / 4; i++, p++)
1316 rest = data->blksz % 4;
1330 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1332 return data->error;
1369 if (!mrq->data)
1378 } else if (mrq->data->flags & MMC_DATA_READ) {
1381 mrq->data->blocks > 1))
1388 mrq->data->blocks > 1))
1425 return !host->mrq->data->error &&
1452 return !host->mrq->data->error &&
1463 struct mmc_data *data;
1473 data = mrq->data;
1480 /* Wait for data? */
1484 /* Wait for more data? */
1488 /* Wait for data end? */
1492 /* Wait data to write? */
1496 /* Wait for data end? */
1509 mrq->data->error = ret;
1518 mrq->data->error = usdhi6_error_code(host);
1520 mrq->data->error);
1532 /* Wait for more data or ACCESS_END */
1534 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1539 if (data) {
1540 if (!data->error) {
1554 data->bytes_xfered = data->blocks * data->blksz;
1557 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1558 __func__, data->error);
1676 struct mmc_data *data = mrq ? mrq->data : NULL;
1709 sg = host->sg ?: data->sg;
1712 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1713 host->offset, data->blocks, data->blksz, data->sg_len,
1719 data->error = -ETIMEDOUT;
1748 irq_sd = platform_get_irq_byname(pdev, "data");