Lines Matching refs:ctrl
313 struct brcmnand_controller *ctrl;
624 static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
629 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
631 if (brcmnand_non_mmio_ops(ctrl))
632 return brcmnand_soc_read(ctrl->soc, offs);
633 return brcmnand_readl(ctrl->nand_base + offs);
636 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
639 if (brcmnand_non_mmio_ops(ctrl))
640 brcmnand_soc_write(ctrl->soc, val, offs);
642 brcmnand_writel(val, ctrl->nand_base + offs);
645 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
655 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
658 if (ctrl->nand_version < 0x0201) {
659 dev_err(ctrl->dev, "version %#x not supported\n",
660 ctrl->nand_version);
665 if (ctrl->nand_version >= 0x0702)
666 ctrl->reg_offsets = brcmnand_regs_v72;
667 else if (ctrl->nand_version == 0x0701)
668 ctrl->reg_offsets = brcmnand_regs_v71;
669 else if (ctrl->nand_version >= 0x0600)
670 ctrl->reg_offsets = brcmnand_regs_v60;
671 else if (ctrl->nand_version >= 0x0500)
672 ctrl->reg_offsets = brcmnand_regs_v50;
673 else if (ctrl->nand_version >= 0x0303)
674 ctrl->reg_offsets = brcmnand_regs_v33;
675 else if (ctrl->nand_version >= 0x0201)
676 ctrl->reg_offsets = brcmnand_regs_v21;
679 if (ctrl->nand_version >= 0x0701)
680 ctrl->reg_spacing = 0x14;
682 ctrl->reg_spacing = 0x10;
685 if (ctrl->nand_version >= 0x0701) {
686 ctrl->cs_offsets = brcmnand_cs_offsets_v71;
688 ctrl->cs_offsets = brcmnand_cs_offsets;
691 if (ctrl->nand_version >= 0x0303 &&
692 ctrl->nand_version <= 0x0500)
693 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
697 if (ctrl->nand_version >= 0x0701) {
699 ctrl->max_page_size = 16 * 1024;
700 ctrl->max_block_size = 2 * 1024 * 1024;
702 if (ctrl->nand_version >= 0x0304)
703 ctrl->page_sizes = page_sizes_v3_4;
704 else if (ctrl->nand_version >= 0x0202)
705 ctrl->page_sizes = page_sizes_v2_2;
707 ctrl->page_sizes = page_sizes_v2_1;
709 if (ctrl->nand_version >= 0x0202)
710 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
712 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
714 if (ctrl->nand_version >= 0x0600)
715 ctrl->block_sizes = block_sizes_v6;
716 else if (ctrl->nand_version >= 0x0400)
717 ctrl->block_sizes = block_sizes_v4;
718 else if (ctrl->nand_version >= 0x0202)
719 ctrl->block_sizes = block_sizes_v2_2;
721 ctrl->block_sizes = block_sizes_v2_1;
723 if (ctrl->nand_version < 0x0400) {
724 if (ctrl->nand_version < 0x0202)
725 ctrl->max_page_size = 2048;
727 ctrl->max_page_size = 4096;
728 ctrl->max_block_size = 512 * 1024;
733 if (ctrl->nand_version == 0x0702)
734 ctrl->max_oob = 128;
735 else if (ctrl->nand_version >= 0x0600)
736 ctrl->max_oob = 64;
737 else if (ctrl->nand_version >= 0x0500)
738 ctrl->max_oob = 32;
740 ctrl->max_oob = 16;
743 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
744 ctrl->features |= BRCMNAND_HAS_PREFETCH;
750 if (ctrl->nand_version >= 0x0700)
751 ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
753 if (ctrl->nand_version >= 0x0500)
754 ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
756 if (ctrl->nand_version >= 0x0700)
757 ctrl->features |= BRCMNAND_HAS_WP;
758 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
759 ctrl->features |= BRCMNAND_HAS_WP;
762 if (ctrl->nand_version == 0x0702)
763 ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
765 ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
770 static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
773 if (ctrl->nand_version >= 0x0703)
774 ctrl->flash_dma_offsets = flash_dma_regs_v4;
775 else if (ctrl->nand_version == 0x0602)
776 ctrl->flash_dma_offsets = flash_dma_regs_v0;
778 ctrl->flash_dma_offsets = flash_dma_regs_v1;
781 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
784 u16 offs = ctrl->reg_offsets[reg];
787 return nand_readreg(ctrl, offs);
792 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
795 u16 offs = ctrl->reg_offsets[reg];
798 nand_writereg(ctrl, offs, val);
801 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
805 u32 tmp = brcmnand_read_reg(ctrl, reg);
809 brcmnand_write_reg(ctrl, reg, tmp);
812 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
814 if (brcmnand_non_mmio_ops(ctrl))
815 return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
816 return __raw_readl(ctrl->nand_fc + word * 4);
819 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
822 if (brcmnand_non_mmio_ops(ctrl))
823 brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
825 __raw_writel(val, ctrl->nand_fc + word * 4);
828 static inline void edu_writel(struct brcmnand_controller *ctrl,
831 u16 offs = ctrl->edu_offsets[reg];
833 brcmnand_writel(val, ctrl->edu_base + offs);
836 static inline u32 edu_readl(struct brcmnand_controller *ctrl,
839 u16 offs = ctrl->edu_offsets[reg];
841 return brcmnand_readl(ctrl->edu_base + offs);
844 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
848 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
849 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
850 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
851 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
854 static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
858 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
859 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
866 static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
870 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
871 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
882 struct brcmnand_controller *ctrl = host->ctrl;
884 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
886 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
887 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
889 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
892 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
895 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
896 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
899 if (cs == 0 && ctrl->cs0_offsets)
900 cs_offs = ctrl->cs0_offsets[reg];
902 cs_offs = ctrl->cs_offsets[reg];
905 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
907 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
910 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
912 if (ctrl->nand_version < 0x0600)
914 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
919 struct brcmnand_controller *ctrl = host->ctrl;
924 if (!ctrl->reg_offsets[reg])
927 if (ctrl->nand_version == 0x0702)
929 else if (ctrl->nand_version >= 0x0600)
931 else if (ctrl->nand_version >= 0x0500)
936 if (ctrl->nand_version >= 0x0702) {
940 } else if (ctrl->nand_version >= 0x0600) {
945 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
948 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
950 if (ctrl->nand_version < 0x0602)
955 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
957 if (ctrl->nand_version == 0x0702)
959 else if (ctrl->nand_version >= 0x0600)
961 else if (ctrl->nand_version >= 0x0303)
967 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
969 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
974 if (ctrl->nand_version == 0x0702)
982 struct brcmnand_controller *ctrl = host->ctrl;
983 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
984 u32 acc_control = nand_readreg(ctrl, offs);
989 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
990 acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
993 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
996 nand_writereg(ctrl, offs, acc_control);
999 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
1001 if (ctrl->nand_version >= 0x0702)
1003 else if (ctrl->nand_version >= 0x0600)
1005 else if (ctrl->nand_version >= 0x0500)
1013 struct brcmnand_controller *ctrl = host->ctrl;
1014 int shift = brcmnand_sector_1k_shift(ctrl);
1015 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1021 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
1026 struct brcmnand_controller *ctrl = host->ctrl;
1027 int shift = brcmnand_sector_1k_shift(ctrl);
1028 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1035 tmp = nand_readreg(ctrl, acc_control_offs);
1038 nand_writereg(ctrl, acc_control_offs, tmp);
1050 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
1062 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1073 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1077 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
1083 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
1087 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
1094 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
1096 return ctrl->flash_dma_base;
1099 static inline bool has_edu(struct brcmnand_controller *ctrl)
1101 return ctrl->edu_base;
1104 static inline bool use_dma(struct brcmnand_controller *ctrl)
1106 return has_flash_dma(ctrl) || has_edu(ctrl);
1109 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
1111 if (ctrl->pio_poll_mode)
1114 if (has_flash_dma(ctrl)) {
1115 ctrl->flash_dma_base = NULL;
1116 disable_irq(ctrl->dma_irq);
1119 disable_irq(ctrl->irq);
1120 ctrl->pio_poll_mode = true;
1129 static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
1132 u16 offs = ctrl->flash_dma_offsets[dma_reg];
1134 brcmnand_writel(val, ctrl->flash_dma_base + offs);
1137 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
1140 u16 offs = ctrl->flash_dma_offsets[dma_reg];
1142 return brcmnand_readl(ctrl->flash_dma_base + offs);
1157 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
1160 if (ctrl->nand_version <= 0x0701)
1321 if (is_hamming_ecc(host->ctrl, p)) {
1353 struct brcmnand_controller *ctrl = host->ctrl;
1355 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1360 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1365 * make sure ctrl/flash ready before and after
1368 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1375 brcmnand_set_wp(ctrl, wp);
1378 ret = bcmnand_ctrl_poll_status(ctrl,
1394 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
1398 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
1399 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
1401 if (offs >= ctrl->max_oob)
1409 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
1412 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
1417 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
1418 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
1420 if (offs >= ctrl->max_oob)
1428 nand_writereg(ctrl, reg_offs, data);
1433 * @ctrl: NAND controller
1439 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
1447 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1448 tbytes = min_t(int, tbytes, ctrl->max_oob);
1451 oob[j] = oob_reg_read(ctrl, j);
1462 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
1472 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1473 tbytes = min_t(int, tbytes, ctrl->max_oob);
1480 oob_reg_write(ctrl, j,
1491 oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
1496 static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
1499 edu_writel(ctrl, EDU_ERR_STATUS, 0);
1500 edu_readl(ctrl, EDU_ERR_STATUS);
1501 edu_writel(ctrl, EDU_DONE, 0);
1502 edu_writel(ctrl, EDU_DONE, 0);
1503 edu_writel(ctrl, EDU_DONE, 0);
1504 edu_writel(ctrl, EDU_DONE, 0);
1505 edu_readl(ctrl, EDU_DONE);
1511 struct brcmnand_controller *ctrl = data;
1513 if (ctrl->edu_count) {
1514 ctrl->edu_count--;
1515 while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
1517 edu_writel(ctrl, EDU_DONE, 0);
1518 edu_readl(ctrl, EDU_DONE);
1521 if (ctrl->edu_count) {
1522 ctrl->edu_dram_addr += FC_BYTES;
1523 ctrl->edu_ext_addr += FC_BYTES;
1525 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
1526 edu_readl(ctrl, EDU_DRAM_ADDR);
1527 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
1528 edu_readl(ctrl, EDU_EXT_ADDR);
1531 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
1532 edu_readl(ctrl, EDU_CMD);
1537 complete(&ctrl->edu_done);
1544 struct brcmnand_controller *ctrl = data;
1547 if (ctrl->dma_pending)
1551 if (ctrl->edu_pending) {
1552 if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
1560 complete(&ctrl->done);
1567 struct brcmnand_controller *ctrl = data;
1569 if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1577 struct brcmnand_controller *ctrl = data;
1579 complete(&ctrl->dma_done);
1586 struct brcmnand_controller *ctrl = host->ctrl;
1590 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1592 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
1600 if (ctrl->cmd_pending &&
1601 bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
1604 BUG_ON(ctrl->cmd_pending != 0);
1605 ctrl->cmd_pending = cmd;
1607 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1611 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1612 cmd << brcmnand_cmd_shift(ctrl));
1620 unsigned int ctrl)
1628 struct brcmnand_controller *ctrl = host->ctrl;
1635 disable_ctrl_irqs(ctrl);
1636 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
1643 sts = wait_for_completion_timeout(&ctrl->done, timeo);
1653 struct brcmnand_controller *ctrl = host->ctrl;
1656 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1657 if (ctrl->cmd_pending)
1661 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1662 >> brcmnand_cmd_shift(ctrl);
1664 dev_err_ratelimited(ctrl->dev,
1666 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1667 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1669 ctrl->cmd_pending = 0;
1670 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1689 struct brcmnand_controller *ctrl = host->ctrl;
1714 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1716 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1717 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1728 struct brcmnand_controller *ctrl = host->ctrl;
1739 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1796 u32 *flash_cache = (u32 *)ctrl->flash_cache;
1799 brcmnand_soc_data_bus_prepare(ctrl->soc, true);
1810 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
1812 brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
1828 struct brcmnand_controller *ctrl = host->ctrl;
1835 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1838 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1843 ret = oob_reg_read(ctrl, host->last_byte);
1847 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1862 ret = ctrl->flash_cache[offs];
1871 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1875 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1913 struct brcmnand_controller *ctrl = host->ctrl;
1921 pa = dma_map_single(ctrl->dev, buf, len, dir);
1922 if (dma_mapping_error(ctrl->dev, pa)) {
1923 dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
1927 ctrl->edu_pending = true;
1928 ctrl->edu_dram_addr = pa;
1929 ctrl->edu_ext_addr = addr;
1930 ctrl->edu_cmd = edu_cmd;
1931 ctrl->edu_count = trans;
1933 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
1934 edu_readl(ctrl, EDU_DRAM_ADDR);
1935 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
1936 edu_readl(ctrl, EDU_EXT_ADDR);
1937 edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
1938 edu_readl(ctrl, EDU_LENGTH);
1942 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
1943 edu_readl(ctrl, EDU_CMD);
1945 if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
1946 dev_err(ctrl->dev,
1948 edu_readl(ctrl, EDU_STATUS),
1949 edu_readl(ctrl, EDU_ERR_STATUS));
1952 dma_unmap_single(ctrl->dev, pa, len, dir);
1955 if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1958 dev_info(ctrl->dev, "program failed at %llx\n",
1964 if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
1965 dev_warn(ctrl->dev, "EDU still active: %#x\n",
1966 edu_readl(ctrl, EDU_STATUS));
1968 if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
1969 dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
1974 ctrl->edu_pending = false;
1975 brcmnand_edu_init(ctrl);
1976 edu_writel(ctrl, EDU_STOP, 0); /* force stop */
1977 edu_readl(ctrl, EDU_STOP);
1986 err_addr = brcmnand_get_uncorrecc_addr(ctrl);
1988 err_addr = brcmnand_get_correcc_addr(ctrl);
2036 struct brcmnand_controller *ctrl = host->ctrl;
2039 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
2040 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
2041 if (ctrl->nand_version > 0x0602) {
2042 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
2044 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
2048 ctrl->dma_pending = true;
2050 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
2052 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
2053 dev_err(ctrl->dev,
2055 flash_dma_readl(ctrl, FLASH_DMA_STATUS),
2056 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
2058 ctrl->dma_pending = false;
2059 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
2065 struct brcmnand_controller *ctrl = host->ctrl;
2069 buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
2070 if (dma_mapping_error(ctrl->dev, buf_pa)) {
2071 dev_err(ctrl->dev, "unable to map buffer for DMA\n");
2075 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
2078 brcmnand_dma_run(host, ctrl->dma_pa);
2080 dma_unmap_single(ctrl->dev, buf_pa, len, dir);
2082 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
2084 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
2098 struct brcmnand_controller *ctrl = host->ctrl;
2101 brcmnand_clear_ecc_addr(ctrl);
2110 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
2113 *buf = brcmnand_read_fc(ctrl, j);
2115 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
2119 oob += read_oob_from_regs(ctrl, i, oob,
2124 *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2131 *err_addr = brcmnand_get_correcc_addr(ctrl);
2197 struct brcmnand_controller *ctrl = host->ctrl;
2203 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
2206 brcmnand_clear_ecc_addr(ctrl);
2208 if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
2209 err = ctrl->dma_trans(host, addr, buf,
2220 if (has_edu(ctrl) && err_addr)
2240 if ((ctrl->nand_version == 0x0700) ||
2241 (ctrl->nand_version == 0x0701)) {
2252 if (ctrl->nand_version < 0x0702) {
2260 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
2268 unsigned int corrected = brcmnand_count_corrected(ctrl);
2275 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
2341 struct brcmnand_controller *ctrl = host->ctrl;
2345 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
2348 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
2354 for (i = 0; i < ctrl->max_oob; i += 4)
2355 oob_reg_write(ctrl, i, 0xffffffff);
2357 if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
2358 if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
2371 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
2374 brcmnand_write_fc(ctrl, j, *buf);
2376 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
2379 brcmnand_write_fc(ctrl, j, 0xffffffff);
2383 oob += write_oob_to_regs(ctrl, i, oob,
2393 dev_info(ctrl->dev, "program failed at %llx\n",
2460 struct brcmnand_controller *ctrl = host->ctrl;
2462 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2463 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2465 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2470 if (ctrl->block_sizes) {
2473 for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
2474 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
2479 dev_warn(ctrl->dev, "invalid block size %u\n",
2487 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
2488 cfg->block_size > ctrl->max_block_size)) {
2489 dev_warn(ctrl->dev, "invalid block size %u\n",
2494 if (ctrl->page_sizes) {
2497 for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
2498 if (ctrl->page_sizes[i] == cfg->page_size) {
2503 dev_warn(ctrl->dev, "invalid page size %u\n",
2511 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
2512 cfg->page_size > ctrl->max_page_size)) {
2513 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
2518 dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
2530 tmp |= (page_size << ctrl->page_size_shift) |
2532 nand_writereg(ctrl, cfg_offs, tmp);
2534 nand_writereg(ctrl, cfg_offs, tmp);
2537 nand_writereg(ctrl, cfg_ext_offs, tmp);
2540 tmp = nand_readreg(ctrl, acc_control_offs);
2541 tmp &= ~brcmnand_ecc_level_mask(ctrl);
2542 tmp &= ~brcmnand_spare_area_mask(ctrl);
2543 if (ctrl->nand_version >= 0x0302) {
2544 tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
2547 nand_writereg(ctrl, acc_control_offs, tmp);
2569 if (is_hamming_ecc(host->ctrl, cfg))
2597 struct brcmnand_controller *ctrl = host->ctrl;
2615 if (cfg->spare_area_size > ctrl->max_oob)
2616 cfg->spare_area_size = ctrl->max_oob;
2632 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
2648 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
2659 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
2673 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
2674 dev_err(ctrl->dev, "1KB sectors not supported\n");
2678 dev_err(ctrl->dev,
2687 dev_err(ctrl->dev, "unsupported ECC size: %d\n",
2705 dev_info(ctrl->dev, "detected %s\n", msg);
2708 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
2709 tmp = nand_readreg(ctrl, offs);
2714 if (ctrl->nand_version >= 0x0702)
2717 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2720 nand_writereg(ctrl, offs, tmp);
2753 if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
2767 struct brcmnand_controller *ctrl = host->ctrl;
2810 chip->controller = &ctrl->controller;
2817 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2818 nand_writereg(ctrl, cfg_offs,
2819 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
2835 struct brcmnand_controller *ctrl = host->ctrl;
2836 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2837 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2839 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2841 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2842 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2845 nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2847 nand_writereg(ctrl, cfg_ext_offs,
2849 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2850 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2851 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2853 host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2856 nand_readreg(ctrl, cfg_ext_offs);
2857 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2858 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2859 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2865 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2868 list_for_each_entry(host, &ctrl->host_list, node)
2871 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2872 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2873 ctrl->corr_stat_threshold =
2874 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2876 if (has_flash_dma(ctrl))
2877 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2878 else if (has_edu(ctrl))
2879 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
2886 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2889 if (has_flash_dma(ctrl)) {
2890 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2891 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2894 if (has_edu(ctrl)) {
2895 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
2896 edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
2897 edu_readl(ctrl, EDU_CONFIG);
2898 brcmnand_edu_init(ctrl);
2901 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2902 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2903 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2904 ctrl->corr_stat_threshold);
2905 if (ctrl->soc) {
2907 ctrl->soc->ctlrdy_ack(ctrl->soc);
2908 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2911 list_for_each_entry(host, &ctrl->host_list, node) {
2951 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
2957 ctrl->edu_base = devm_ioremap_resource(dev, res);
2958 if (IS_ERR(ctrl->edu_base))
2959 return PTR_ERR(ctrl->edu_base);
2961 ctrl->edu_offsets = edu_regs;
2963 edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
2965 edu_readl(ctrl, EDU_CONFIG);
2968 brcmnand_edu_init(ctrl);
2970 ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
2971 if (ctrl->edu_irq < 0) {
2975 ret = devm_request_irq(dev, ctrl->edu_irq,
2977 "brcmnand-edu", ctrl);
2979 dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
2980 ctrl->edu_irq, ret);
2985 ctrl->edu_irq);
2996 struct brcmnand_controller *ctrl;
3007 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
3008 if (!ctrl)
3011 dev_set_drvdata(dev, ctrl);
3012 ctrl->dev = dev;
3017 if (brcmnand_soc_has_ops(ctrl->soc))
3020 init_completion(&ctrl->done);
3021 init_completion(&ctrl->dma_done);
3022 init_completion(&ctrl->edu_done);
3023 nand_controller_init(&ctrl->controller);
3024 ctrl->controller.ops = &brcmnand_controller_ops;
3025 INIT_LIST_HEAD(&ctrl->host_list);
3029 ctrl->nand_base = devm_ioremap_resource(dev, res);
3030 if (IS_ERR(ctrl->nand_base))
3031 return PTR_ERR(ctrl->nand_base);
3034 ctrl->clk = devm_clk_get(dev, "nand");
3035 if (!IS_ERR(ctrl->clk)) {
3036 ret = clk_prepare_enable(ctrl->clk);
3040 ret = PTR_ERR(ctrl->clk);
3044 ctrl->clk = NULL;
3048 ret = brcmnand_revision_init(ctrl);
3058 ctrl->nand_fc = devm_ioremap_resource(dev, res);
3059 if (IS_ERR(ctrl->nand_fc)) {
3060 ret = PTR_ERR(ctrl->nand_fc);
3064 ctrl->nand_fc = ctrl->nand_base +
3065 ctrl->reg_offsets[BRCMNAND_FC_BASE];
3071 ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
3072 if (IS_ERR(ctrl->flash_dma_base)) {
3073 ret = PTR_ERR(ctrl->flash_dma_base);
3078 brcmnand_flash_dma_revision_init(ctrl);
3081 if (ctrl->nand_version >= 0x0700)
3091 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
3092 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
3095 ctrl->dma_desc = dmam_alloc_coherent(dev,
3096 sizeof(*ctrl->dma_desc),
3097 &ctrl->dma_pa, GFP_KERNEL);
3098 if (!ctrl->dma_desc) {
3103 ctrl->dma_irq = platform_get_irq(pdev, 1);
3104 if ((int)ctrl->dma_irq < 0) {
3110 ret = devm_request_irq(dev, ctrl->dma_irq,
3112 ctrl);
3115 ctrl->dma_irq, ret);
3121 ctrl->dma_trans = brcmnand_dma_trans;
3127 if (has_edu(ctrl))
3129 ctrl->dma_trans = brcmnand_edu_trans;
3133 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
3136 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
3138 if (ctrl->features & BRCMNAND_HAS_WP) {
3141 brcmnand_set_wp(ctrl, false);
3147 ctrl->irq = platform_get_irq(pdev, 0);
3148 if ((int)ctrl->irq < 0) {
3159 ctrl->soc = soc;
3161 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
3162 DRV_NAME, ctrl);
3165 ctrl->soc->ctlrdy_ack(ctrl->soc);
3166 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
3169 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
3170 DRV_NAME, ctrl);
3174 ctrl->irq, ret);
3189 host->ctrl = ctrl;
3197 list_add_tail(&host->node, &ctrl->host_list);
3202 if (list_empty(&ctrl->host_list)) {
3210 clk_disable_unprepare(ctrl->clk);
3218 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
3223 list_for_each_entry(host, &ctrl->host_list, node) {
3230 clk_disable_unprepare(ctrl->clk);