Lines Matching defs:acdev

225 static void cf_dumpregs(struct arasan_cf_dev *acdev)
227 struct device *dev = acdev->host->dev;
230 dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
231 dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
232 dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
233 dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
234 dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
235 dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
236 dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
237 dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
238 dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
239 dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
244 static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
247 writel(enable, acdev->vbase + GIRQ_STS_EN);
248 writel(enable, acdev->vbase + GIRQ_SGN_EN);
253 cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
255 u32 val = readl(acdev->vbase + IRQ_EN);
258 writel(mask, acdev->vbase + IRQ_STS);
259 writel(val | mask, acdev->vbase + IRQ_EN);
261 writel(val & ~mask, acdev->vbase + IRQ_EN);
264 static inline void cf_card_reset(struct arasan_cf_dev *acdev)
266 u32 val = readl(acdev->vbase + OP_MODE);
268 writel(val | CARD_RESET, acdev->vbase + OP_MODE);
270 writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
273 static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
275 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
276 acdev->vbase + OP_MODE);
277 writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
278 acdev->vbase + OP_MODE);
281 static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
283 struct ata_port *ap = acdev->host->ports[0];
285 u32 val = readl(acdev->vbase + CFI_STS);
289 if (acdev->card_present)
291 acdev->card_present = 1;
292 cf_card_reset(acdev);
294 if (!acdev->card_present)
296 acdev->card_present = 0;
305 static int cf_init(struct arasan_cf_dev *acdev)
307 struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
312 ret = clk_prepare_enable(acdev->clk);
314 dev_dbg(acdev->host->dev, "clock enable failed");
318 ret = clk_set_rate(acdev->clk, 166000000);
320 dev_warn(acdev->host->dev, "clock set rate failed");
321 clk_disable_unprepare(acdev->clk);
325 spin_lock_irqsave(&acdev->host->lock, flags);
332 writel(if_clk, acdev->vbase + CLK_CFG);
334 writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
335 cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
336 cf_ginterrupt_enable(acdev, 1);
337 spin_unlock_irqrestore(&acdev->host->lock, flags);
342 static void cf_exit(struct arasan_cf_dev *acdev)
346 spin_lock_irqsave(&acdev->host->lock, flags);
347 cf_ginterrupt_enable(acdev, 0);
348 cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
349 cf_card_reset(acdev);
350 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
351 acdev->vbase + OP_MODE);
352 spin_unlock_irqrestore(&acdev->host->lock, flags);
353 clk_disable_unprepare(acdev->clk);
358 struct arasan_cf_dev *acdev = dev;
360 complete(&acdev->dma_completion);
363 static inline void dma_complete(struct arasan_cf_dev *acdev)
365 struct ata_queued_cmd *qc = acdev->qc;
368 acdev->qc = NULL;
369 ata_sff_interrupt(acdev->irq, acdev->host);
371 spin_lock_irqsave(&acdev->host->lock, flags);
374 spin_unlock_irqrestore(&acdev->host->lock, flags);
377 static inline int wait4buf(struct arasan_cf_dev *acdev)
379 if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
380 u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
382 dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
387 if (acdev->dma_status & ATA_DMA_ERR)
394 dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
397 struct dma_chan *chan = acdev->dma_chan;
404 dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
409 tx->callback_param = acdev;
414 dev_err(acdev->host->dev, "dma_submit_error\n");
421 if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
423 dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
430 static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
434 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
441 dest = acdev->pbase + EXT_WRITE_PORT;
444 src = acdev->pbase + EXT_READ_PORT;
456 spin_lock_irqsave(&acdev->host->lock, flags);
457 xfer_ctr = readl(acdev->vbase + XFER_CTR) &
460 acdev->vbase + XFER_CTR);
461 spin_unlock_irqrestore(&acdev->host->lock, flags);
467 ret = wait4buf(acdev);
474 ret = dma_xfer(acdev, src, dest, dma_len);
476 dev_err(acdev->host->dev, "dma failed");
490 ret = wait4buf(acdev);
498 spin_lock_irqsave(&acdev->host->lock, flags);
499 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
500 acdev->vbase + XFER_CTR);
501 spin_unlock_irqrestore(&acdev->host->lock, flags);
519 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
521 struct ata_queued_cmd *qc = acdev->qc;
529 acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
530 if (IS_ERR(acdev->dma_chan)) {
531 dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan),
533 acdev->dma_chan = NULL;
538 ret = sg_xfer(acdev, sg);
543 dma_release_channel(acdev->dma_chan);
544 acdev->dma_chan = NULL;
550 spin_lock_irqsave(&acdev->host->lock, flags);
552 spin_unlock_irqrestore(&acdev->host->lock, flags);
554 ata_sff_queue_delayed_work(&acdev->dwork, 1);
561 cf_dumpregs(acdev);
564 spin_lock_irqsave(&acdev->host->lock, flags);
569 cf_ctrl_reset(acdev);
570 spin_unlock_irqrestore(&acdev->host->lock, flags);
572 dma_complete(acdev);
577 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
579 struct ata_queued_cmd *qc = acdev->qc;
583 spin_lock_irqsave(&acdev->host->lock, flags);
585 spin_unlock_irqrestore(&acdev->host->lock, flags);
588 ata_sff_queue_delayed_work(&acdev->dwork, 1);
590 dma_complete(acdev);
595 struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
599 irqsts = readl(acdev->vbase + GIRQ_STS);
603 spin_lock_irqsave(&acdev->host->lock, flags);
604 irqsts = readl(acdev->vbase + IRQ_STS);
605 writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
606 writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
612 cf_card_detect(acdev, 1);
613 spin_unlock_irqrestore(&acdev->host->lock, flags);
618 acdev->dma_status = ATA_DMA_ERR;
619 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
620 acdev->vbase + XFER_CTR);
621 spin_unlock_irqrestore(&acdev->host->lock, flags);
622 complete(&acdev->cf_completion);
623 dev_err(acdev->host->dev, "pio xfer err irq\n");
627 spin_unlock_irqrestore(&acdev->host->lock, flags);
630 complete(&acdev->cf_completion);
635 struct ata_queued_cmd *qc = acdev->qc;
639 complete(&acdev->cf_completion);
647 struct arasan_cf_dev *acdev = ap->host->private_data;
650 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
651 acdev->vbase + XFER_CTR);
652 cf_ctrl_reset(acdev);
653 acdev->dma_status = ATA_DMA_ERR;
661 struct arasan_cf_dev *acdev = ap->host->private_data;
668 cancel_work_sync(&acdev->work);
669 cancel_delayed_work_sync(&acdev->dwork);
673 static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
675 struct ata_queued_cmd *qc = acdev->qc;
678 u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
682 writel(xfer_ctr, acdev->vbase + XFER_CTR);
685 ata_sff_queue_work(&acdev->work);
691 struct arasan_cf_dev *acdev = ap->host->private_data;
708 acdev->dma_status = 0;
709 acdev->qc = qc;
710 arasan_cf_dma_start(acdev);
724 struct arasan_cf_dev *acdev = ap->host->private_data;
735 spin_lock_irqsave(&acdev->host->lock, flags);
736 val = readl(acdev->vbase + OP_MODE) &
738 writel(val, acdev->vbase + OP_MODE);
739 val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
741 writel(val, acdev->vbase + TM_CFG);
743 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
744 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
745 spin_unlock_irqrestore(&acdev->host->lock, flags);
750 struct arasan_cf_dev *acdev = ap->host->private_data;
754 spin_lock_irqsave(&acdev->host->lock, flags);
755 opmode = readl(acdev->vbase + OP_MODE) &
757 tmcfg = readl(acdev->vbase + TM_CFG);
770 spin_unlock_irqrestore(&acdev->host->lock, flags);
774 writel(opmode, acdev->vbase + OP_MODE);
775 writel(tmcfg, acdev->vbase + TM_CFG);
776 writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
778 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
779 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
780 spin_unlock_irqrestore(&acdev->host->lock, flags);
794 struct arasan_cf_dev *acdev;
813 acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
814 if (!acdev)
828 acdev->irq = ret;
836 acdev->pbase = res->start;
837 acdev->vbase = devm_ioremap(&pdev->dev, res->start,
839 if (!acdev->vbase) {
844 acdev->clk = devm_clk_get(&pdev->dev, NULL);
845 if (IS_ERR(acdev->clk)) {
847 return PTR_ERR(acdev->clk);
858 host->private_data = acdev;
859 acdev->host = host;
865 init_completion(&acdev->cf_completion);
866 init_completion(&acdev->dma_completion);
867 INIT_WORK(&acdev->work, data_xfer);
868 INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
869 dma_cap_set(DMA_MEMCPY, acdev->mask);
884 ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
885 ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
886 ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
887 ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
888 ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
889 ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
890 ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
891 ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
892 ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
893 ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
894 ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
895 ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
896 ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
899 (unsigned long long) res->start, acdev->vbase);
901 ret = cf_init(acdev);
905 cf_card_detect(acdev, 0);
907 ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
912 cf_exit(acdev);
920 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
923 cf_exit(acdev);
932 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
934 if (acdev->dma_chan)
935 dmaengine_terminate_all(acdev->dma_chan);
937 cf_exit(acdev);
944 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
946 cf_init(acdev);