Lines Matching defs:flctl

114 static void empty_fifo(struct sh_flctl *flctl)
116 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
117 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
120 static void start_translation(struct sh_flctl *flctl)
122 writeb(TRSTRT, FLTRCR(flctl));
125 static void timeout_error(struct sh_flctl *flctl, const char *str)
127 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
130 static void wait_completion(struct sh_flctl *flctl)
135 if (readb(FLTRCR(flctl)) & TREND) {
136 writeb(0x0, FLTRCR(flctl));
142 timeout_error(flctl, __func__);
143 writeb(0x0, FLTRCR(flctl));
148 struct sh_flctl *flctl = param;
150 complete(&flctl->dma_complete);
153 static void flctl_release_dma(struct sh_flctl *flctl)
155 if (flctl->chan_fifo0_rx) {
156 dma_release_channel(flctl->chan_fifo0_rx);
157 flctl->chan_fifo0_rx = NULL;
159 if (flctl->chan_fifo0_tx) {
160 dma_release_channel(flctl->chan_fifo0_tx);
161 flctl->chan_fifo0_tx = NULL;
165 static void flctl_setup_dma(struct sh_flctl *flctl)
169 struct platform_device *pdev = flctl->pdev;
183 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
186 flctl->chan_fifo0_tx);
188 if (!flctl->chan_fifo0_tx)
193 cfg.dst_addr = flctl->fifo;
195 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
199 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
202 flctl->chan_fifo0_rx);
204 if (!flctl->chan_fifo0_rx)
209 cfg.src_addr = flctl->fifo;
210 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
214 init_completion(&flctl->dma_complete);
219 flctl_release_dma(flctl);
224 struct sh_flctl *flctl = mtd_to_flctl(mtd);
231 if (flctl->chip.options & NAND_BUSWIDTH_16)
233 if (flctl->page_size) {
238 if (flctl->rw_ADRCNT == ADRCNT2_E) {
241 writel(addr2, FLADR2(flctl));
250 writel(addr, FLADR(flctl));
253 static void wait_rfifo_ready(struct sh_flctl *flctl)
260 val = readl(FLDTCNTR(flctl)) >> 16;
265 timeout_error(flctl, __func__);
268 static void wait_wfifo_ready(struct sh_flctl *flctl)
274 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
279 timeout_error(flctl, __func__);
283 (struct sh_flctl *flctl, int sector_number)
300 size = readl(FLDTCNTR(flctl)) >> 24;
305 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
315 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
318 if (flctl->done_buff[i] != 0xff) {
325 dev_dbg(&flctl->pdev->dev,
329 writel(0, FL4ECCCR(flctl));
334 ecc_reg[0] = FL4ECCRESULT0(flctl);
335 ecc_reg[1] = FL4ECCRESULT1(flctl);
336 ecc_reg[2] = FL4ECCRESULT2(flctl);
337 ecc_reg[3] = FL4ECCRESULT3(flctl);
345 if (flctl->page_size)
351 org = flctl->done_buff[index];
352 flctl->done_buff[index] = org ^ (data & 0xFF);
355 writel(0, FL4ECCCR(flctl));
358 timeout_error(flctl, __func__);
362 static void wait_wecfifo_ready(struct sh_flctl *flctl)
369 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
374 timeout_error(flctl, __func__);
377 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
390 chan = flctl->chan_fifo0_rx;
393 chan = flctl->chan_fifo0_tx;
404 reg = readl(FLINTDMACR(flctl));
406 writel(reg, FLINTDMACR(flctl));
409 desc->callback_param = flctl;
413 dev_warn(&flctl->pdev->dev,
421 flctl_release_dma(flctl);
422 dev_warn(&flctl->pdev->dev,
429 wait_for_completion_timeout(&flctl->dma_complete,
434 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
439 reg = readl(FLINTDMACR(flctl));
441 writel(reg, FLINTDMACR(flctl));
449 static void read_datareg(struct sh_flctl *flctl, int offset)
452 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
454 wait_completion(flctl);
456 data = readl(FLDATAR(flctl));
460 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
463 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
468 if (flctl->chan_fifo0_rx && rlen >= 32 &&
469 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
474 wait_rfifo_ready(flctl);
475 buf[i] = readl(FLDTFIFO(flctl));
484 (struct sh_flctl *flctl, uint8_t *buff, int sector)
490 res = wait_recfifo_ready(flctl , sector);
494 ecc_buf[i] = readl(FLECFIFO(flctl));
502 static void write_fiforeg(struct sh_flctl *flctl, int rlen,
506 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
510 wait_wfifo_ready(flctl);
511 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
515 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
519 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
527 if (flctl->chan_fifo0_tx && rlen >= 32 &&
528 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
533 wait_wecfifo_ready(flctl);
534 writel(buf[i], FLECFIFO(flctl));
540 struct sh_flctl *flctl = mtd_to_flctl(mtd);
541 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
545 if (flctl->page_size)
556 addr_len_bytes = flctl->erase_ADRCNT;
562 addr_len_bytes = flctl->rw_ADRCNT;
564 if (flctl->chip.options & NAND_BUSWIDTH_16)
572 addr_len_bytes = flctl->rw_ADRCNT;
574 if (flctl->chip.options & NAND_BUSWIDTH_16)
595 writel(flcmncr_val, FLCMNCR(flctl));
596 writel(flcmdcr_val, FLCMDCR(flctl));
597 writel(flcmcdr_val, FLCMCDR(flctl));
623 struct sh_flctl *flctl = mtd_to_flctl(mtd);
627 page_sectors = flctl->page_size ? 4 : 1;
632 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
633 FLCMNCR(flctl));
634 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
635 writel(page_addr << 2, FLADR(flctl));
637 empty_fifo(flctl);
638 start_translation(flctl);
641 read_fiforeg(flctl, 512, 512 * sector);
643 ecc_result = read_ecfiforeg(flctl,
644 &flctl->done_buff[mtd->writesize + 16 * sector],
649 dev_info(&flctl->pdev->dev,
654 dev_warn(&flctl->pdev->dev,
664 wait_completion(flctl);
666 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
667 FLCMNCR(flctl));
672 struct sh_flctl *flctl = mtd_to_flctl(mtd);
673 int page_sectors = flctl->page_size ? 4 : 1;
679 empty_fifo(flctl);
683 writel(16, FLDTCNTR(flctl));
685 start_translation(flctl);
686 read_fiforeg(flctl, 16, 16 * i);
687 wait_completion(flctl);
693 struct sh_flctl *flctl = mtd_to_flctl(mtd);
694 int page_addr = flctl->seqin_page_addr;
697 page_sectors = flctl->page_size ? 4 : 1;
702 empty_fifo(flctl);
703 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
704 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
705 writel(page_addr << 2, FLADR(flctl));
706 start_translation(flctl);
709 write_fiforeg(flctl, 512, 512 * sector);
710 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
713 wait_completion(flctl);
714 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
719 struct sh_flctl *flctl = mtd_to_flctl(mtd);
720 int page_addr = flctl->seqin_page_addr;
723 page_sectors = flctl->page_size ? 4 : 1;
729 empty_fifo(flctl);
731 writel(16, FLDTCNTR(flctl)); /* set read size */
733 start_translation(flctl);
734 write_fiforeg(flctl, 16, 16 * sector);
735 wait_completion(flctl);
743 struct sh_flctl *flctl = mtd_to_flctl(mtd);
746 pm_runtime_get_sync(&flctl->pdev->dev);
748 flctl->read_bytes = 0;
750 flctl->index = 0;
755 if (flctl->hwecc) {
760 if (flctl->page_size)
768 flctl->read_bytes = mtd->writesize + mtd->oobsize;
769 if (flctl->chip.options & NAND_BUSWIDTH_16)
771 flctl->index += column;
775 if (flctl->hwecc) {
781 if (flctl->page_size) {
789 flctl->read_bytes = mtd->oobsize;
793 if (flctl->hwecc)
796 if (flctl->page_size)
804 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
811 if (flctl->chip.options & NAND_BUSWIDTH_16)
815 flctl->read_bytes = 8;
816 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
817 empty_fifo(flctl);
818 start_translation(flctl);
819 read_fiforeg(flctl, flctl->read_bytes, 0);
820 wait_completion(flctl);
824 flctl->erase1_page_addr = page_addr;
830 set_addr(mtd, -1, flctl->erase1_page_addr);
831 start_translation(flctl);
832 wait_completion(flctl);
836 if (!flctl->page_size) {
848 flctl->seqin_column = column;
849 flctl->seqin_page_addr = page_addr;
850 flctl->seqin_read_cmd = read_cmd;
854 empty_fifo(flctl);
855 if (!flctl->page_size) {
857 flctl->seqin_read_cmd);
859 writel(0, FLDTCNTR(flctl)); /* set 0 size */
860 start_translation(flctl);
861 wait_completion(flctl);
863 if (flctl->hwecc) {
865 if (flctl->seqin_column == mtd->writesize)
867 else if (!flctl->seqin_column)
874 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
875 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
876 start_translation(flctl);
877 write_fiforeg(flctl, flctl->index, 0);
878 wait_completion(flctl);
885 flctl->read_bytes = 1;
886 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
887 start_translation(flctl);
888 read_datareg(flctl, 0); /* read and end */
895 writel(0, FLDTCNTR(flctl)); /* set 0 size */
896 start_translation(flctl);
897 wait_completion(flctl);
906 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
907 empty_fifo(flctl);
908 start_translation(flctl);
909 read_fiforeg(flctl, flctl->read_bytes, 0);
910 wait_completion(flctl);
912 pm_runtime_put_sync(&flctl->pdev->dev);
918 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
923 flctl->flcmncr_base &= ~CE0_ENABLE;
925 pm_runtime_get_sync(&flctl->pdev->dev);
926 writel(flctl->flcmncr_base, FLCMNCR(flctl));
928 if (flctl->qos_request) {
929 dev_pm_qos_remove_request(&flctl->pm_qos);
930 flctl->qos_request = 0;
933 pm_runtime_put_sync(&flctl->pdev->dev);
936 flctl->flcmncr_base |= CE0_ENABLE;
938 if (!flctl->qos_request) {
939 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
940 &flctl->pm_qos,
944 dev_err(&flctl->pdev->dev,
946 flctl->qos_request = 1;
949 if (flctl->holden) {
950 pm_runtime_get_sync(&flctl->pdev->dev);
951 writel(HOLDEN, FLHOLDCR(flctl));
952 pm_runtime_put_sync(&flctl->pdev->dev);
962 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
964 memcpy(&flctl->done_buff[flctl->index], buf, len);
965 flctl->index += len;
970 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
973 data = flctl->done_buff[flctl->index];
974 flctl->index++;
980 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
982 memcpy(buf, &flctl->done_buff[flctl->index], len);
983 flctl->index += len;
990 struct sh_flctl *flctl = mtd_to_flctl(mtd);
994 * Add the SEL_16BIT flag in flctl->flcmncr_base.
997 flctl->flcmncr_base |= SEL_16BIT;
1000 flctl->page_size = 0;
1003 flctl->rw_ADRCNT = ADRCNT_4;
1004 flctl->erase_ADRCNT = ADRCNT_3;
1007 flctl->rw_ADRCNT = ADRCNT_3;
1008 flctl->erase_ADRCNT = ADRCNT_2;
1010 flctl->rw_ADRCNT = ADRCNT_2;
1011 flctl->erase_ADRCNT = ADRCNT_1;
1014 flctl->page_size = 1;
1017 flctl->rw_ADRCNT = ADRCNT2_E;
1018 flctl->erase_ADRCNT = ADRCNT_3;
1021 flctl->rw_ADRCNT = ADRCNT_4;
1022 flctl->erase_ADRCNT = ADRCNT_2;
1024 flctl->rw_ADRCNT = ADRCNT_3;
1025 flctl->erase_ADRCNT = ADRCNT_1;
1029 if (flctl->hwecc) {
1046 flctl->flcmncr_base |= _4ECCEN;
1061 struct sh_flctl *flctl = dev_id;
1063 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1064 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1082 { .compatible = "renesas,shmobile-flctl-sh7372",
1115 struct sh_flctl *flctl;
1122 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
1123 if (!flctl)
1126 flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1127 if (IS_ERR(flctl->reg))
1128 return PTR_ERR(flctl->reg);
1129 flctl->fifo = res->start + 0x24; /* FLDTFIFO */
1136 "flste", flctl);
1152 platform_set_drvdata(pdev, flctl);
1153 nand = &flctl->chip;
1157 flctl->pdev = pdev;
1158 flctl->hwecc = pdata->has_hwecc;
1159 flctl->holden = pdata->use_holden;
1160 flctl->flcmncr_base = pdata->flcmncr_val;
1161 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1183 flctl_setup_dma(flctl);
1199 flctl_release_dma(flctl);
1206 struct sh_flctl *flctl = platform_get_drvdata(pdev);
1207 struct nand_chip *chip = &flctl->chip;
1210 flctl_release_dma(flctl);