Lines Matching defs:acdev

226 static void cf_dumpregs(struct arasan_cf_dev *acdev)
228 struct device *dev = acdev->host->dev;
231 dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
232 dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
233 dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
234 dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
235 dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
236 dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
237 dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
238 dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
239 dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
240 dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
245 static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
248 writel(enable, acdev->vbase + GIRQ_STS_EN);
249 writel(enable, acdev->vbase + GIRQ_SGN_EN);
254 cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
256 u32 val = readl(acdev->vbase + IRQ_EN);
259 writel(mask, acdev->vbase + IRQ_STS);
260 writel(val | mask, acdev->vbase + IRQ_EN);
262 writel(val & ~mask, acdev->vbase + IRQ_EN);
265 static inline void cf_card_reset(struct arasan_cf_dev *acdev)
267 u32 val = readl(acdev->vbase + OP_MODE);
269 writel(val | CARD_RESET, acdev->vbase + OP_MODE);
271 writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
274 static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
276 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
277 acdev->vbase + OP_MODE);
278 writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
279 acdev->vbase + OP_MODE);
282 static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
284 struct ata_port *ap = acdev->host->ports[0];
286 u32 val = readl(acdev->vbase + CFI_STS);
290 if (acdev->card_present)
292 acdev->card_present = 1;
293 cf_card_reset(acdev);
295 if (!acdev->card_present)
297 acdev->card_present = 0;
306 static int cf_init(struct arasan_cf_dev *acdev)
308 struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
313 ret = clk_prepare_enable(acdev->clk);
315 dev_dbg(acdev->host->dev, "clock enable failed");
319 ret = clk_set_rate(acdev->clk, 166000000);
321 dev_warn(acdev->host->dev, "clock set rate failed");
322 clk_disable_unprepare(acdev->clk);
326 spin_lock_irqsave(&acdev->host->lock, flags);
333 writel(if_clk, acdev->vbase + CLK_CFG);
335 writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
336 cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
337 cf_ginterrupt_enable(acdev, 1);
338 spin_unlock_irqrestore(&acdev->host->lock, flags);
343 static void cf_exit(struct arasan_cf_dev *acdev)
347 spin_lock_irqsave(&acdev->host->lock, flags);
348 cf_ginterrupt_enable(acdev, 0);
349 cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
350 cf_card_reset(acdev);
351 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
352 acdev->vbase + OP_MODE);
353 spin_unlock_irqrestore(&acdev->host->lock, flags);
354 clk_disable_unprepare(acdev->clk);
359 struct arasan_cf_dev *acdev = dev;
361 complete(&acdev->dma_completion);
364 static inline void dma_complete(struct arasan_cf_dev *acdev)
366 struct ata_queued_cmd *qc = acdev->qc;
369 acdev->qc = NULL;
370 ata_sff_interrupt(acdev->irq, acdev->host);
372 spin_lock_irqsave(&acdev->host->lock, flags);
375 spin_unlock_irqrestore(&acdev->host->lock, flags);
378 static inline int wait4buf(struct arasan_cf_dev *acdev)
380 if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
381 u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
383 dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
388 if (acdev->dma_status & ATA_DMA_ERR)
395 dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
398 struct dma_chan *chan = acdev->dma_chan;
405 dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
410 tx->callback_param = acdev;
415 dev_err(acdev->host->dev, "dma_submit_error\n");
422 if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
424 dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
431 static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
435 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
442 dest = acdev->pbase + EXT_WRITE_PORT;
445 src = acdev->pbase + EXT_READ_PORT;
457 spin_lock_irqsave(&acdev->host->lock, flags);
458 xfer_ctr = readl(acdev->vbase + XFER_CTR) &
461 acdev->vbase + XFER_CTR);
462 spin_unlock_irqrestore(&acdev->host->lock, flags);
468 ret = wait4buf(acdev);
475 ret = dma_xfer(acdev, src, dest, dma_len);
477 dev_err(acdev->host->dev, "dma failed");
491 ret = wait4buf(acdev);
499 spin_lock_irqsave(&acdev->host->lock, flags);
500 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
501 acdev->vbase + XFER_CTR);
502 spin_unlock_irqrestore(&acdev->host->lock, flags);
520 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
522 struct ata_queued_cmd *qc = acdev->qc;
530 acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
531 if (IS_ERR(acdev->dma_chan)) {
532 dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan),
534 acdev->dma_chan = NULL;
539 ret = sg_xfer(acdev, sg);
544 dma_release_channel(acdev->dma_chan);
545 acdev->dma_chan = NULL;
551 spin_lock_irqsave(&acdev->host->lock, flags);
553 spin_unlock_irqrestore(&acdev->host->lock, flags);
555 ata_sff_queue_delayed_work(&acdev->dwork, 1);
562 cf_dumpregs(acdev);
565 spin_lock_irqsave(&acdev->host->lock, flags);
570 cf_ctrl_reset(acdev);
571 spin_unlock_irqrestore(&acdev->host->lock, flags);
573 dma_complete(acdev);
578 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
580 struct ata_queued_cmd *qc = acdev->qc;
584 spin_lock_irqsave(&acdev->host->lock, flags);
586 spin_unlock_irqrestore(&acdev->host->lock, flags);
589 ata_sff_queue_delayed_work(&acdev->dwork, 1);
591 dma_complete(acdev);
596 struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
600 irqsts = readl(acdev->vbase + GIRQ_STS);
604 spin_lock_irqsave(&acdev->host->lock, flags);
605 irqsts = readl(acdev->vbase + IRQ_STS);
606 writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
607 writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
613 cf_card_detect(acdev, 1);
614 spin_unlock_irqrestore(&acdev->host->lock, flags);
619 acdev->dma_status = ATA_DMA_ERR;
620 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
621 acdev->vbase + XFER_CTR);
622 spin_unlock_irqrestore(&acdev->host->lock, flags);
623 complete(&acdev->cf_completion);
624 dev_err(acdev->host->dev, "pio xfer err irq\n");
628 spin_unlock_irqrestore(&acdev->host->lock, flags);
631 complete(&acdev->cf_completion);
636 struct ata_queued_cmd *qc = acdev->qc;
640 complete(&acdev->cf_completion);
648 struct arasan_cf_dev *acdev = ap->host->private_data;
651 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
652 acdev->vbase + XFER_CTR);
653 cf_ctrl_reset(acdev);
654 acdev->dma_status = ATA_DMA_ERR;
662 struct arasan_cf_dev *acdev = ap->host->private_data;
669 cancel_work_sync(&acdev->work);
670 cancel_delayed_work_sync(&acdev->dwork);
674 static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
676 struct ata_queued_cmd *qc = acdev->qc;
679 u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
683 writel(xfer_ctr, acdev->vbase + XFER_CTR);
686 ata_sff_queue_work(&acdev->work);
692 struct arasan_cf_dev *acdev = ap->host->private_data;
710 acdev->dma_status = 0;
711 acdev->qc = qc;
713 arasan_cf_dma_start(acdev);
727 struct arasan_cf_dev *acdev = ap->host->private_data;
738 spin_lock_irqsave(&acdev->host->lock, flags);
739 val = readl(acdev->vbase + OP_MODE) &
741 writel(val, acdev->vbase + OP_MODE);
742 val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
744 writel(val, acdev->vbase + TM_CFG);
746 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
747 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
748 spin_unlock_irqrestore(&acdev->host->lock, flags);
753 struct arasan_cf_dev *acdev = ap->host->private_data;
757 spin_lock_irqsave(&acdev->host->lock, flags);
758 opmode = readl(acdev->vbase + OP_MODE) &
760 tmcfg = readl(acdev->vbase + TM_CFG);
773 spin_unlock_irqrestore(&acdev->host->lock, flags);
777 writel(opmode, acdev->vbase + OP_MODE);
778 writel(tmcfg, acdev->vbase + TM_CFG);
779 writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
781 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
782 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
783 spin_unlock_irqrestore(&acdev->host->lock, flags);
797 struct arasan_cf_dev *acdev;
816 acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
817 if (!acdev)
831 acdev->irq = ret;
839 acdev->pbase = res->start;
840 acdev->vbase = devm_ioremap(&pdev->dev, res->start,
842 if (!acdev->vbase) {
847 acdev->clk = devm_clk_get(&pdev->dev, NULL);
848 if (IS_ERR(acdev->clk)) {
850 return PTR_ERR(acdev->clk);
861 host->private_data = acdev;
862 acdev->host = host;
868 init_completion(&acdev->cf_completion);
869 init_completion(&acdev->dma_completion);
870 INIT_WORK(&acdev->work, data_xfer);
871 INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
872 dma_cap_set(DMA_MEMCPY, acdev->mask);
887 ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
888 ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
889 ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
890 ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
891 ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
892 ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
893 ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
894 ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
895 ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
896 ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
897 ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
898 ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
899 ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
902 (unsigned long long) res->start, acdev->vbase);
904 ret = cf_init(acdev);
908 cf_card_detect(acdev, 0);
910 ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
915 cf_exit(acdev);
923 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
926 cf_exit(acdev);
933 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
935 if (acdev->dma_chan)
936 dmaengine_terminate_all(acdev->dma_chan);
938 cf_exit(acdev);
946 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
948 cf_init(acdev);