Lines Matching defs:flctl

115 static void empty_fifo(struct sh_flctl *flctl)
117 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
118 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
121 static void start_translation(struct sh_flctl *flctl)
123 writeb(TRSTRT, FLTRCR(flctl));
126 static void timeout_error(struct sh_flctl *flctl, const char *str)
128 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
131 static void wait_completion(struct sh_flctl *flctl)
136 if (readb(FLTRCR(flctl)) & TREND) {
137 writeb(0x0, FLTRCR(flctl));
143 timeout_error(flctl, __func__);
144 writeb(0x0, FLTRCR(flctl));
149 struct sh_flctl *flctl = param;
151 complete(&flctl->dma_complete);
154 static void flctl_release_dma(struct sh_flctl *flctl)
156 if (flctl->chan_fifo0_rx) {
157 dma_release_channel(flctl->chan_fifo0_rx);
158 flctl->chan_fifo0_rx = NULL;
160 if (flctl->chan_fifo0_tx) {
161 dma_release_channel(flctl->chan_fifo0_tx);
162 flctl->chan_fifo0_tx = NULL;
166 static void flctl_setup_dma(struct sh_flctl *flctl)
170 struct platform_device *pdev = flctl->pdev;
184 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
187 flctl->chan_fifo0_tx);
189 if (!flctl->chan_fifo0_tx)
194 cfg.dst_addr = flctl->fifo;
196 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
200 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
203 flctl->chan_fifo0_rx);
205 if (!flctl->chan_fifo0_rx)
210 cfg.src_addr = flctl->fifo;
211 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
215 init_completion(&flctl->dma_complete);
220 flctl_release_dma(flctl);
225 struct sh_flctl *flctl = mtd_to_flctl(mtd);
232 if (flctl->chip.options & NAND_BUSWIDTH_16)
234 if (flctl->page_size) {
239 if (flctl->rw_ADRCNT == ADRCNT2_E) {
242 writel(addr2, FLADR2(flctl));
251 writel(addr, FLADR(flctl));
254 static void wait_rfifo_ready(struct sh_flctl *flctl)
261 val = readl(FLDTCNTR(flctl)) >> 16;
266 timeout_error(flctl, __func__);
269 static void wait_wfifo_ready(struct sh_flctl *flctl)
275 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
280 timeout_error(flctl, __func__);
284 (struct sh_flctl *flctl, int sector_number)
301 size = readl(FLDTCNTR(flctl)) >> 24;
306 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
316 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
319 if (flctl->done_buff[i] != 0xff) {
326 dev_dbg(&flctl->pdev->dev,
330 writel(0, FL4ECCCR(flctl));
335 ecc_reg[0] = FL4ECCRESULT0(flctl);
336 ecc_reg[1] = FL4ECCRESULT1(flctl);
337 ecc_reg[2] = FL4ECCRESULT2(flctl);
338 ecc_reg[3] = FL4ECCRESULT3(flctl);
346 if (flctl->page_size)
352 org = flctl->done_buff[index];
353 flctl->done_buff[index] = org ^ (data & 0xFF);
356 writel(0, FL4ECCCR(flctl));
359 timeout_error(flctl, __func__);
363 static void wait_wecfifo_ready(struct sh_flctl *flctl)
370 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
375 timeout_error(flctl, __func__);
378 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
391 chan = flctl->chan_fifo0_rx;
394 chan = flctl->chan_fifo0_tx;
405 reg = readl(FLINTDMACR(flctl));
407 writel(reg, FLINTDMACR(flctl));
410 desc->callback_param = flctl;
414 dev_warn(&flctl->pdev->dev,
422 flctl_release_dma(flctl);
423 dev_warn(&flctl->pdev->dev,
430 wait_for_completion_timeout(&flctl->dma_complete,
435 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
440 reg = readl(FLINTDMACR(flctl));
442 writel(reg, FLINTDMACR(flctl));
450 static void read_datareg(struct sh_flctl *flctl, int offset)
453 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
455 wait_completion(flctl);
457 data = readl(FLDATAR(flctl));
461 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
464 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
469 if (flctl->chan_fifo0_rx && rlen >= 32 &&
470 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
475 wait_rfifo_ready(flctl);
476 buf[i] = readl(FLDTFIFO(flctl));
485 (struct sh_flctl *flctl, uint8_t *buff, int sector)
491 res = wait_recfifo_ready(flctl , sector);
495 ecc_buf[i] = readl(FLECFIFO(flctl));
503 static void write_fiforeg(struct sh_flctl *flctl, int rlen,
507 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
511 wait_wfifo_ready(flctl);
512 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
516 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
520 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
528 if (flctl->chan_fifo0_tx && rlen >= 32 &&
529 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
534 wait_wecfifo_ready(flctl);
535 writel(buf[i], FLECFIFO(flctl));
541 struct sh_flctl *flctl = mtd_to_flctl(mtd);
542 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
546 if (flctl->page_size)
557 addr_len_bytes = flctl->erase_ADRCNT;
563 addr_len_bytes = flctl->rw_ADRCNT;
565 if (flctl->chip.options & NAND_BUSWIDTH_16)
573 addr_len_bytes = flctl->rw_ADRCNT;
575 if (flctl->chip.options & NAND_BUSWIDTH_16)
596 writel(flcmncr_val, FLCMNCR(flctl));
597 writel(flcmdcr_val, FLCMDCR(flctl));
598 writel(flcmcdr_val, FLCMCDR(flctl));
624 struct sh_flctl *flctl = mtd_to_flctl(mtd);
628 page_sectors = flctl->page_size ? 4 : 1;
633 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
634 FLCMNCR(flctl));
635 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
636 writel(page_addr << 2, FLADR(flctl));
638 empty_fifo(flctl);
639 start_translation(flctl);
642 read_fiforeg(flctl, 512, 512 * sector);
644 ecc_result = read_ecfiforeg(flctl,
645 &flctl->done_buff[mtd->writesize + 16 * sector],
650 dev_info(&flctl->pdev->dev,
655 dev_warn(&flctl->pdev->dev,
665 wait_completion(flctl);
667 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
668 FLCMNCR(flctl));
673 struct sh_flctl *flctl = mtd_to_flctl(mtd);
674 int page_sectors = flctl->page_size ? 4 : 1;
680 empty_fifo(flctl);
684 writel(16, FLDTCNTR(flctl));
686 start_translation(flctl);
687 read_fiforeg(flctl, 16, 16 * i);
688 wait_completion(flctl);
694 struct sh_flctl *flctl = mtd_to_flctl(mtd);
695 int page_addr = flctl->seqin_page_addr;
698 page_sectors = flctl->page_size ? 4 : 1;
703 empty_fifo(flctl);
704 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
705 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
706 writel(page_addr << 2, FLADR(flctl));
707 start_translation(flctl);
710 write_fiforeg(flctl, 512, 512 * sector);
711 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
714 wait_completion(flctl);
715 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
720 struct sh_flctl *flctl = mtd_to_flctl(mtd);
721 int page_addr = flctl->seqin_page_addr;
724 page_sectors = flctl->page_size ? 4 : 1;
730 empty_fifo(flctl);
732 writel(16, FLDTCNTR(flctl)); /* set read size */
734 start_translation(flctl);
735 write_fiforeg(flctl, 16, 16 * sector);
736 wait_completion(flctl);
744 struct sh_flctl *flctl = mtd_to_flctl(mtd);
747 pm_runtime_get_sync(&flctl->pdev->dev);
749 flctl->read_bytes = 0;
751 flctl->index = 0;
756 if (flctl->hwecc) {
761 if (flctl->page_size)
769 flctl->read_bytes = mtd->writesize + mtd->oobsize;
770 if (flctl->chip.options & NAND_BUSWIDTH_16)
772 flctl->index += column;
776 if (flctl->hwecc) {
782 if (flctl->page_size) {
790 flctl->read_bytes = mtd->oobsize;
794 if (flctl->hwecc)
797 if (flctl->page_size)
805 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
812 if (flctl->chip.options & NAND_BUSWIDTH_16)
816 flctl->read_bytes = 8;
817 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
818 empty_fifo(flctl);
819 start_translation(flctl);
820 read_fiforeg(flctl, flctl->read_bytes, 0);
821 wait_completion(flctl);
825 flctl->erase1_page_addr = page_addr;
831 set_addr(mtd, -1, flctl->erase1_page_addr);
832 start_translation(flctl);
833 wait_completion(flctl);
837 if (!flctl->page_size) {
849 flctl->seqin_column = column;
850 flctl->seqin_page_addr = page_addr;
851 flctl->seqin_read_cmd = read_cmd;
855 empty_fifo(flctl);
856 if (!flctl->page_size) {
858 flctl->seqin_read_cmd);
860 writel(0, FLDTCNTR(flctl)); /* set 0 size */
861 start_translation(flctl);
862 wait_completion(flctl);
864 if (flctl->hwecc) {
866 if (flctl->seqin_column == mtd->writesize)
868 else if (!flctl->seqin_column)
875 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
876 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
877 start_translation(flctl);
878 write_fiforeg(flctl, flctl->index, 0);
879 wait_completion(flctl);
886 flctl->read_bytes = 1;
887 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
888 start_translation(flctl);
889 read_datareg(flctl, 0); /* read and end */
896 writel(0, FLDTCNTR(flctl)); /* set 0 size */
897 start_translation(flctl);
898 wait_completion(flctl);
907 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
908 empty_fifo(flctl);
909 start_translation(flctl);
910 read_fiforeg(flctl, flctl->read_bytes, 0);
911 wait_completion(flctl);
913 pm_runtime_put_sync(&flctl->pdev->dev);
919 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
924 flctl->flcmncr_base &= ~CE0_ENABLE;
926 pm_runtime_get_sync(&flctl->pdev->dev);
927 writel(flctl->flcmncr_base, FLCMNCR(flctl));
929 if (flctl->qos_request) {
930 dev_pm_qos_remove_request(&flctl->pm_qos);
931 flctl->qos_request = 0;
934 pm_runtime_put_sync(&flctl->pdev->dev);
937 flctl->flcmncr_base |= CE0_ENABLE;
939 if (!flctl->qos_request) {
940 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
941 &flctl->pm_qos,
945 dev_err(&flctl->pdev->dev,
947 flctl->qos_request = 1;
950 if (flctl->holden) {
951 pm_runtime_get_sync(&flctl->pdev->dev);
952 writel(HOLDEN, FLHOLDCR(flctl));
953 pm_runtime_put_sync(&flctl->pdev->dev);
963 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
965 memcpy(&flctl->done_buff[flctl->index], buf, len);
966 flctl->index += len;
971 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
974 data = flctl->done_buff[flctl->index];
975 flctl->index++;
981 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
983 memcpy(buf, &flctl->done_buff[flctl->index], len);
984 flctl->index += len;
991 struct sh_flctl *flctl = mtd_to_flctl(mtd);
995 * Add the SEL_16BIT flag in flctl->flcmncr_base.
998 flctl->flcmncr_base |= SEL_16BIT;
1001 flctl->page_size = 0;
1004 flctl->rw_ADRCNT = ADRCNT_4;
1005 flctl->erase_ADRCNT = ADRCNT_3;
1008 flctl->rw_ADRCNT = ADRCNT_3;
1009 flctl->erase_ADRCNT = ADRCNT_2;
1011 flctl->rw_ADRCNT = ADRCNT_2;
1012 flctl->erase_ADRCNT = ADRCNT_1;
1015 flctl->page_size = 1;
1018 flctl->rw_ADRCNT = ADRCNT2_E;
1019 flctl->erase_ADRCNT = ADRCNT_3;
1022 flctl->rw_ADRCNT = ADRCNT_4;
1023 flctl->erase_ADRCNT = ADRCNT_2;
1025 flctl->rw_ADRCNT = ADRCNT_3;
1026 flctl->erase_ADRCNT = ADRCNT_1;
1030 if (flctl->hwecc) {
1047 flctl->flcmncr_base |= _4ECCEN;
1062 struct sh_flctl *flctl = dev_id;
1064 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1065 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1083 { .compatible = "renesas,shmobile-flctl-sh7372",
1116 struct sh_flctl *flctl;
1123 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
1124 if (!flctl)
1128 flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1129 if (IS_ERR(flctl->reg))
1130 return PTR_ERR(flctl->reg);
1131 flctl->fifo = res->start + 0x24; /* FLDTFIFO */
1138 "flste", flctl);
1154 platform_set_drvdata(pdev, flctl);
1155 nand = &flctl->chip;
1159 flctl->pdev = pdev;
1160 flctl->hwecc = pdata->has_hwecc;
1161 flctl->holden = pdata->use_holden;
1162 flctl->flcmncr_base = pdata->flcmncr_val;
1163 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1185 flctl_setup_dma(flctl);
1201 flctl_release_dma(flctl);
1208 struct sh_flctl *flctl = platform_get_drvdata(pdev);
1209 struct nand_chip *chip = &flctl->chip;
1212 flctl_release_dma(flctl);