Lines Matching defs:host

3  *  linux/drivers/mmc/host/omap.c
23 #include <linux/mmc/host.h>
72 #define mmc_omap7xx() (host->features & MMC_OMAP7XX)
73 #define mmc_omap15xx() (host->features & MMC_OMAP15XX)
74 #define mmc_omap16xx() (host->features & MMC_OMAP16XX)
76 #define mmc_omap1() (host->features & MMC_OMAP1_MASK)
79 #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
80 #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
81 #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
112 struct mmc_omap_host *host;
176 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
182 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
186 spin_lock_irqsave(&host->clk_lock, flags);
187 if (host->fclk_enabled != enable) {
188 host->fclk_enabled = enable;
190 clk_enable(host->fclk);
192 clk_disable(host->fclk);
194 spin_unlock_irqrestore(&host->clk_lock, flags);
199 struct mmc_omap_host *host = slot->host;
204 spin_lock_irqsave(&host->slot_lock, flags);
205 while (host->mmc != NULL) {
206 spin_unlock_irqrestore(&host->slot_lock, flags);
207 wait_event(host->slot_wq, host->mmc == NULL);
208 spin_lock_irqsave(&host->slot_lock, flags);
210 host->mmc = slot->mmc;
211 spin_unlock_irqrestore(&host->slot_lock, flags);
213 del_timer(&host->clk_timer);
214 if (host->current_slot != slot || !claimed)
215 mmc_omap_fclk_offdelay(host->current_slot);
217 if (host->current_slot != slot) {
218 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
219 if (host->pdata->switch_slot != NULL)
220 host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
221 host->current_slot = slot;
225 mmc_omap_fclk_enable(host, 1);
230 OMAP_MMC_READ(host, CON);
232 OMAP_MMC_WRITE(host, CON, slot->saved_con);
234 mmc_omap_fclk_enable(host, 0);
237 static void mmc_omap_start_request(struct mmc_omap_host *host,
242 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
244 struct mmc_omap_slot *next_slot = host->next_slot;
247 host->next_slot = NULL;
252 mmc_omap_start_request(host, rq);
257 struct mmc_omap_host *host = slot->host;
261 BUG_ON(slot == NULL || host->mmc == NULL);
265 mod_timer(&host->clk_timer, jiffies + HZ/10);
267 del_timer(&host->clk_timer);
269 mmc_omap_fclk_enable(host, 0);
272 spin_lock_irqsave(&host->slot_lock, flags);
274 for (i = 0; i < host->nr_slots; i++) {
277 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
280 BUG_ON(host->next_slot != NULL);
281 new_slot = host->slots[i];
283 BUG_ON(new_slot == host->current_slot);
285 host->next_slot = new_slot;
286 host->mmc = new_slot->mmc;
287 spin_unlock_irqrestore(&host->slot_lock, flags);
288 queue_work(host->mmc_omap_wq, &host->slot_release_work);
292 host->mmc = NULL;
293 wake_up(&host->slot_wq);
294 spin_unlock_irqrestore(&host->slot_lock, flags);
332 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
339 host->cmd = cmd;
360 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
376 if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
382 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
385 mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
387 OMAP_MMC_WRITE(host, CTO, 200);
388 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
389 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
397 OMAP_MMC_WRITE(host, IE, irq_mask);
398 OMAP_MMC_WRITE(host, CMD, cmdreg);
402 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
406 struct device *dev = mmc_dev(host->mmc);
411 c = host->dma_tx;
414 c = host->dma_rx;
424 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
429 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
431 struct mmc_omap_slot *slot = host->current_slot;
432 struct mmc_data *data = host->stop_data;
438 mmc_omap_start_command(host, data->stop);
442 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
444 if (host->dma_in_use)
445 mmc_omap_release_dma(host, data, data->error);
447 host->data = NULL;
448 host->sg_len = 0;
458 host->mrq = NULL;
459 mmc = host->mmc;
460 mmc_omap_release_slot(host->current_slot, 1);
465 host->stop_data = data;
466 queue_work(host->mmc_omap_wq, &host->send_stop_work);
470 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
472 struct mmc_omap_slot *slot = host->current_slot;
480 OMAP_MMC_WRITE(host, STAT, 0xFFFF);
481 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
485 stat = OMAP_MMC_READ(host, STAT);
495 OMAP_MMC_WRITE(host, STAT, stat);
499 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
501 if (host->dma_in_use)
502 mmc_omap_release_dma(host, data, 1);
504 host->data = NULL;
505 host->sg_len = 0;
507 mmc_omap_send_abort(host, 10000);
511 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
516 if (!host->dma_in_use) {
517 mmc_omap_xfer_done(host, data);
521 spin_lock_irqsave(&host->dma_lock, flags);
522 if (host->dma_done)
525 host->brs_received = 1;
526 spin_unlock_irqrestore(&host->dma_lock, flags);
528 mmc_omap_xfer_done(host, data);
532 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
538 spin_lock_irqsave(&host->dma_lock, flags);
539 if (host->brs_received)
542 host->dma_done = 1;
543 spin_unlock_irqrestore(&host->dma_lock, flags);
545 mmc_omap_xfer_done(host, data);
549 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
551 host->cmd = NULL;
553 del_timer(&host->cmd_abort_timer);
559 OMAP_MMC_READ(host, RSP0) |
560 (OMAP_MMC_READ(host, RSP1) << 16);
562 OMAP_MMC_READ(host, RSP2) |
563 (OMAP_MMC_READ(host, RSP3) << 16);
565 OMAP_MMC_READ(host, RSP4) |
566 (OMAP_MMC_READ(host, RSP5) << 16);
568 OMAP_MMC_READ(host, RSP6) |
569 (OMAP_MMC_READ(host, RSP7) << 16);
573 OMAP_MMC_READ(host, RSP6) |
574 (OMAP_MMC_READ(host, RSP7) << 16);
578 if (host->data == NULL || cmd->error) {
581 if (host->data != NULL)
582 mmc_omap_abort_xfer(host, host->data);
583 host->mrq = NULL;
584 mmc = host->mmc;
585 mmc_omap_release_slot(host->current_slot, 1);
596 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
598 BUG_ON(!host->cmd);
600 dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
601 host->cmd->opcode);
603 if (host->cmd->error == 0)
604 host->cmd->error = -ETIMEDOUT;
606 if (host->data == NULL) {
610 cmd = host->cmd;
611 host->cmd = NULL;
612 mmc_omap_send_abort(host, 10000);
614 host->mrq = NULL;
615 mmc = host->mmc;
616 mmc_omap_release_slot(host->current_slot, 1);
619 mmc_omap_cmd_done(host, host->cmd);
621 host->abort = 0;
622 enable_irq(host->irq);
628 struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
631 spin_lock_irqsave(&host->slot_lock, flags);
632 if (host->cmd != NULL && !host->abort) {
633 OMAP_MMC_WRITE(host, IE, 0);
634 disable_irq(host->irq);
635 host->abort = 1;
636 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
638 spin_unlock_irqrestore(&host->slot_lock, flags);
643 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
647 sg = host->data->sg + host->sg_idx;
648 host->buffer_bytes_left = sg->length;
649 host->buffer = sg_virt(sg);
650 if (host->buffer_bytes_left > host->total_bytes_left)
651 host->buffer_bytes_left = host->total_bytes_left;
657 struct mmc_omap_host *host = from_timer(host, t, clk_timer);
659 mmc_omap_fclk_enable(host, 0);
664 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
668 if (host->buffer_bytes_left == 0) {
669 host->sg_idx++;
670 BUG_ON(host->sg_idx == host->sg_len);
671 mmc_omap_sg_to_buf(host);
674 if (n > host->buffer_bytes_left)
675 n = host->buffer_bytes_left;
680 host->buffer_bytes_left -= n;
681 host->total_bytes_left -= n;
682 host->data->bytes_xfered += n;
685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
686 host->buffer, nwords);
688 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
689 host->buffer, nwords);
692 host->buffer += nwords;
696 static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
710 dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
713 static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
721 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
727 if (host->cmd == NULL && host->data == NULL) {
728 status = OMAP_MMC_READ(host, STAT);
729 dev_info(mmc_dev(host->slots[0]->mmc),
732 OMAP_MMC_WRITE(host, STAT, status);
733 OMAP_MMC_WRITE(host, IE, 0);
743 while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
746 OMAP_MMC_WRITE(host, STAT, status);
747 if (host->cmd != NULL)
748 cmd = host->cmd->opcode;
751 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
753 mmc_omap_report_irq(host, status);
755 if (host->total_bytes_left) {
758 mmc_omap_xfer_data(host, 0);
760 mmc_omap_xfer_data(host, 1);
767 dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
769 if (host->data) {
770 host->data->error = -ETIMEDOUT;
776 if (host->data) {
777 host->data->error = -EILSEQ;
778 dev_dbg(mmc_dev(host->mmc),
780 host->total_bytes_left);
783 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
789 if (host->cmd) {
791 host->current_slot;
794 dev_err(mmc_dev(host->mmc),
797 host->cmd->error = -ETIMEDOUT;
804 if (host->cmd) {
805 dev_err(mmc_dev(host->mmc),
807 cmd, host->cmd->arg);
808 host->cmd->error = -EILSEQ;
812 dev_err(mmc_dev(host->mmc),
817 dev_dbg(mmc_dev(host->mmc),
833 if (cmd_error && host->data) {
834 del_timer(&host->cmd_abort_timer);
835 host->abort = 1;
836 OMAP_MMC_WRITE(host, IE, 0);
837 disable_irq_nosync(host->irq);
838 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
842 if (end_command && host->cmd)
843 mmc_omap_cmd_done(host, host->cmd);
844 if (host->data != NULL) {
846 mmc_omap_xfer_done(host, host->data);
848 mmc_omap_end_of_data(host, host->data);
857 struct mmc_omap_host *host = dev_get_drvdata(dev);
858 struct mmc_omap_slot *slot = host->slots[num];
860 BUG_ON(num >= host->nr_slots);
863 if (host->nr_slots == 0 || !host->slots[num])
903 struct mmc_omap_host *host = priv;
904 struct mmc_data *data = host->data;
909 mmc_omap_dma_done(host, data);
912 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
916 reg = OMAP_MMC_READ(host, SDIO);
918 OMAP_MMC_WRITE(host, SDIO, reg);
920 OMAP_MMC_WRITE(host, CTO, 0xfd);
923 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
928 cycle_ns = 1000000000 / host->current_slot->fclk_freq;
933 reg = OMAP_MMC_READ(host, SDIO);
939 OMAP_MMC_WRITE(host, SDIO, reg);
940 OMAP_MMC_WRITE(host, DTO, timeout);
944 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
951 host->data = data;
953 OMAP_MMC_WRITE(host, BLEN, 0);
954 OMAP_MMC_WRITE(host, NBLK, 0);
955 OMAP_MMC_WRITE(host, BUF, 0);
956 host->dma_in_use = 0;
957 set_cmd_timeout(host, req);
963 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
964 OMAP_MMC_WRITE(host, BLEN, block_size - 1);
965 set_data_timeout(host, req);
980 host->sg_idx = 0;
1001 c = host->dma_tx;
1002 bp = &host->dma_tx_burst;
1006 c = host->dma_rx;
1007 bp = &host->dma_rx_burst;
1018 .src_addr = host->phys_base +
1019 OMAP_MMC_REG(host, DATA),
1020 .dst_addr = host->phys_base +
1021 OMAP_MMC_REG(host, DATA),
1034 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
1036 if (host->sg_len == 0)
1039 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
1045 OMAP_MMC_WRITE(host, BUF, buf);
1048 tx->callback_param = host;
1050 host->brs_received = 0;
1051 host->dma_done = 0;
1052 host->dma_in_use = 1;
1058 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1059 host->total_bytes_left = data->blocks * block_size;
1060 host->sg_len = sg_len;
1061 mmc_omap_sg_to_buf(host);
1062 host->dma_in_use = 0;
1065 static void mmc_omap_start_request(struct mmc_omap_host *host,
1068 BUG_ON(host->mrq != NULL);
1070 host->mrq = req;
1073 mmc_omap_prepare_data(host, req);
1074 mmc_omap_start_command(host, req->cmd);
1075 if (host->dma_in_use) {
1076 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
1077 host->dma_tx : host->dma_rx;
1086 struct mmc_omap_host *host = slot->host;
1089 spin_lock_irqsave(&host->slot_lock, flags);
1090 if (host->mmc != NULL) {
1093 spin_unlock_irqrestore(&host->slot_lock, flags);
1096 host->mmc = mmc;
1097 spin_unlock_irqrestore(&host->slot_lock, flags);
1099 mmc_omap_start_request(host, req);
1105 struct mmc_omap_host *host;
1107 host = slot->host;
1116 w = OMAP_MMC_READ(host, CON);
1117 OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1119 w = OMAP_MMC_READ(host, CON);
1120 OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1128 struct mmc_omap_host *host = slot->host;
1129 int func_clk_rate = clk_get_rate(host->fclk);
1156 struct mmc_omap_host *host = slot->host;
1179 mmc_omap_fclk_enable(host, 1);
1200 OMAP_MMC_WRITE(host, CON, dsor);
1207 OMAP_MMC_WRITE(host, IE, 0);
1208 OMAP_MMC_WRITE(host, STAT, 0xffff);
1209 OMAP_MMC_WRITE(host, CMD, 1 << 7);
1210 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
1214 OMAP_MMC_WRITE(host, STAT, 1);
1226 static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1232 mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1237 slot->host = host;
1241 slot->pdata = &host->pdata->slots[id];
1243 host->slots[id] = slot;
1246 if (host->pdata->slots[id].wires >= 4)
1256 if (host->pdata->max_freq)
1257 mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1317 flush_workqueue(slot->host->mmc_omap_wq);
1326 struct mmc_omap_host *host = NULL;
1340 host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host),
1342 if (host == NULL)
1350 host->virt_base = devm_ioremap_resource(&pdev->dev, res);
1351 if (IS_ERR(host->virt_base))
1352 return PTR_ERR(host->virt_base);
1354 INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
1355 INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
1357 INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1358 timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0);
1360 spin_lock_init(&host->clk_lock);
1361 timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0);
1363 spin_lock_init(&host->dma_lock);
1364 spin_lock_init(&host->slot_lock);
1365 init_waitqueue_head(&host->slot_wq);
1367 host->pdata = pdata;
1368 host->features = host->pdata->slots[0].features;
1369 host->dev = &pdev->dev;
1370 platform_set_drvdata(pdev, host);
1372 host->id = pdev->id;
1373 host->irq = irq;
1374 host->phys_base = res->start;
1375 host->iclk = clk_get(&pdev->dev, "ick");
1376 if (IS_ERR(host->iclk))
1377 return PTR_ERR(host->iclk);
1378 clk_enable(host->iclk);
1380 host->fclk = clk_get(&pdev->dev, "fck");
1381 if (IS_ERR(host->fclk)) {
1382 ret = PTR_ERR(host->fclk);
1386 host->dma_tx_burst = -1;
1387 host->dma_rx_burst = -1;
1389 host->dma_tx = dma_request_chan(&pdev->dev, "tx");
1390 if (IS_ERR(host->dma_tx)) {
1391 ret = PTR_ERR(host->dma_tx);
1393 clk_put(host->fclk);
1397 host->dma_tx = NULL;
1398 dev_warn(host->dev, "TX DMA channel request failed\n");
1401 host->dma_rx = dma_request_chan(&pdev->dev, "rx");
1402 if (IS_ERR(host->dma_rx)) {
1403 ret = PTR_ERR(host->dma_rx);
1405 if (host->dma_tx)
1406 dma_release_channel(host->dma_tx);
1407 clk_put(host->fclk);
1411 host->dma_rx = NULL;
1412 dev_warn(host->dev, "RX DMA channel request failed\n");
1415 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1425 host->nr_slots = pdata->nr_slots;
1426 host->reg_shift = (mmc_omap7xx() ? 1 : 2);
1428 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1429 if (!host->mmc_omap_wq) {
1435 ret = mmc_omap_new_slot(host, i);
1438 mmc_omap_remove_slot(host->slots[i]);
1447 destroy_workqueue(host->mmc_omap_wq);
1452 free_irq(host->irq, host);
1454 if (host->dma_tx)
1455 dma_release_channel(host->dma_tx);
1456 if (host->dma_rx)
1457 dma_release_channel(host->dma_rx);
1458 clk_put(host->fclk);
1460 clk_disable(host->iclk);
1461 clk_put(host->iclk);
1467 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1470 BUG_ON(host == NULL);
1472 for (i = 0; i < host->nr_slots; i++)
1473 mmc_omap_remove_slot(host->slots[i]);
1475 if (host->pdata->cleanup)
1476 host->pdata->cleanup(&pdev->dev);
1478 mmc_omap_fclk_enable(host, 0);
1479 free_irq(host->irq, host);
1480 clk_put(host->fclk);
1481 clk_disable(host->iclk);
1482 clk_put(host->iclk);
1484 if (host->dma_tx)
1485 dma_release_channel(host->dma_tx);
1486 if (host->dma_rx)
1487 dma_release_channel(host->dma_rx);
1489 destroy_workqueue(host->mmc_omap_wq);