Lines Matching refs:ctrl
317 struct brcmnand_controller *ctrl;
628 static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
637 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
639 if (brcmnand_non_mmio_ops(ctrl))
640 return brcmnand_soc_read(ctrl->soc, offs);
641 return brcmnand_readl(ctrl->nand_base + offs);
644 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
647 if (brcmnand_non_mmio_ops(ctrl))
648 brcmnand_soc_write(ctrl->soc, val, offs);
650 brcmnand_writel(val, ctrl->nand_base + offs);
653 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
663 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
666 if (ctrl->nand_version < 0x0201) {
667 dev_err(ctrl->dev, "version %#x not supported\n",
668 ctrl->nand_version);
673 if (ctrl->nand_version >= 0x0702)
674 ctrl->reg_offsets = brcmnand_regs_v72;
675 else if (ctrl->nand_version == 0x0701)
676 ctrl->reg_offsets = brcmnand_regs_v71;
677 else if (ctrl->nand_version >= 0x0600)
678 ctrl->reg_offsets = brcmnand_regs_v60;
679 else if (ctrl->nand_version >= 0x0500)
680 ctrl->reg_offsets = brcmnand_regs_v50;
681 else if (ctrl->nand_version >= 0x0303)
682 ctrl->reg_offsets = brcmnand_regs_v33;
683 else if (ctrl->nand_version >= 0x0201)
684 ctrl->reg_offsets = brcmnand_regs_v21;
687 if (ctrl->nand_version >= 0x0701)
688 ctrl->reg_spacing = 0x14;
690 ctrl->reg_spacing = 0x10;
693 if (ctrl->nand_version >= 0x0701) {
694 ctrl->cs_offsets = brcmnand_cs_offsets_v71;
696 ctrl->cs_offsets = brcmnand_cs_offsets;
699 if (ctrl->nand_version >= 0x0303 &&
700 ctrl->nand_version <= 0x0500)
701 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
705 if (ctrl->nand_version >= 0x0701) {
707 ctrl->max_page_size = 16 * 1024;
708 ctrl->max_block_size = 2 * 1024 * 1024;
710 if (ctrl->nand_version >= 0x0304)
711 ctrl->page_sizes = page_sizes_v3_4;
712 else if (ctrl->nand_version >= 0x0202)
713 ctrl->page_sizes = page_sizes_v2_2;
715 ctrl->page_sizes = page_sizes_v2_1;
717 if (ctrl->nand_version >= 0x0202)
718 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
720 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
722 if (ctrl->nand_version >= 0x0600)
723 ctrl->block_sizes = block_sizes_v6;
724 else if (ctrl->nand_version >= 0x0400)
725 ctrl->block_sizes = block_sizes_v4;
726 else if (ctrl->nand_version >= 0x0202)
727 ctrl->block_sizes = block_sizes_v2_2;
729 ctrl->block_sizes = block_sizes_v2_1;
731 if (ctrl->nand_version < 0x0400) {
732 if (ctrl->nand_version < 0x0202)
733 ctrl->max_page_size = 2048;
735 ctrl->max_page_size = 4096;
736 ctrl->max_block_size = 512 * 1024;
741 if (ctrl->nand_version == 0x0702)
742 ctrl->max_oob = 128;
743 else if (ctrl->nand_version >= 0x0600)
744 ctrl->max_oob = 64;
745 else if (ctrl->nand_version >= 0x0500)
746 ctrl->max_oob = 32;
748 ctrl->max_oob = 16;
751 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
752 ctrl->features |= BRCMNAND_HAS_PREFETCH;
758 if (ctrl->nand_version >= 0x0700)
759 ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
761 if (ctrl->nand_version >= 0x0500)
762 ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
764 if (ctrl->nand_version >= 0x0700)
765 ctrl->features |= BRCMNAND_HAS_WP;
766 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
767 ctrl->features |= BRCMNAND_HAS_WP;
770 if (ctrl->nand_version == 0x0702)
771 ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
773 ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
778 static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
781 if (ctrl->nand_version >= 0x0703)
782 ctrl->flash_dma_offsets = flash_dma_regs_v4;
783 else if (ctrl->nand_version == 0x0602)
784 ctrl->flash_dma_offsets = flash_dma_regs_v0;
786 ctrl->flash_dma_offsets = flash_dma_regs_v1;
789 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
792 u16 offs = ctrl->reg_offsets[reg];
795 return nand_readreg(ctrl, offs);
800 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
803 u16 offs = ctrl->reg_offsets[reg];
806 nand_writereg(ctrl, offs, val);
809 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
813 u32 tmp = brcmnand_read_reg(ctrl, reg);
817 brcmnand_write_reg(ctrl, reg, tmp);
820 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
822 if (brcmnand_non_mmio_ops(ctrl))
823 return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
824 return __raw_readl(ctrl->nand_fc + word * 4);
827 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
830 if (brcmnand_non_mmio_ops(ctrl))
831 brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
833 __raw_writel(val, ctrl->nand_fc + word * 4);
836 static inline void edu_writel(struct brcmnand_controller *ctrl,
839 u16 offs = ctrl->edu_offsets[reg];
841 brcmnand_writel(val, ctrl->edu_base + offs);
844 static inline u32 edu_readl(struct brcmnand_controller *ctrl,
847 u16 offs = ctrl->edu_offsets[reg];
849 return brcmnand_readl(ctrl->edu_base + offs);
852 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
856 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
857 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
858 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
859 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
862 static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
866 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
867 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
874 static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
878 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
879 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
890 struct brcmnand_controller *ctrl = host->ctrl;
892 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
894 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
895 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
897 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
900 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
903 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
904 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
907 if (cs == 0 && ctrl->cs0_offsets)
908 cs_offs = ctrl->cs0_offsets[reg];
910 cs_offs = ctrl->cs_offsets[reg];
913 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
915 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
918 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
920 if (ctrl->nand_version < 0x0600)
922 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
927 struct brcmnand_controller *ctrl = host->ctrl;
932 if (!ctrl->reg_offsets[reg])
935 if (ctrl->nand_version == 0x0702)
937 else if (ctrl->nand_version >= 0x0600)
939 else if (ctrl->nand_version >= 0x0500)
944 if (ctrl->nand_version >= 0x0702) {
948 } else if (ctrl->nand_version >= 0x0600) {
953 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
956 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
961 if (ctrl->nand_version == 0x0304 && brcmnand_non_mmio_ops(ctrl))
964 if (ctrl->nand_version < 0x0602)
969 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
971 if (ctrl->nand_version == 0x0702)
973 else if (ctrl->nand_version >= 0x0600)
975 else if (ctrl->nand_version >= 0x0303)
981 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
983 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
988 if (ctrl->nand_version == 0x0702)
996 struct brcmnand_controller *ctrl = host->ctrl;
997 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
998 u32 acc_control = nand_readreg(ctrl, offs);
1003 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
1004 acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
1007 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
1010 nand_writereg(ctrl, offs, acc_control);
1013 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
1015 if (ctrl->nand_version >= 0x0702)
1017 else if (ctrl->nand_version >= 0x0600)
1019 else if (ctrl->nand_version >= 0x0500)
1027 struct brcmnand_controller *ctrl = host->ctrl;
1028 int shift = brcmnand_sector_1k_shift(ctrl);
1029 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1035 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
1040 struct brcmnand_controller *ctrl = host->ctrl;
1041 int shift = brcmnand_sector_1k_shift(ctrl);
1042 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1049 tmp = nand_readreg(ctrl, acc_control_offs);
1052 nand_writereg(ctrl, acc_control_offs, tmp);
1064 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
1076 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1087 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1091 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
1097 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
1101 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
1108 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
1110 return ctrl->flash_dma_base;
1113 static inline bool has_edu(struct brcmnand_controller *ctrl)
1115 return ctrl->edu_base;
1118 static inline bool use_dma(struct brcmnand_controller *ctrl)
1120 return has_flash_dma(ctrl) || has_edu(ctrl);
1123 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
1125 if (ctrl->pio_poll_mode)
1128 if (has_flash_dma(ctrl)) {
1129 ctrl->flash_dma_base = NULL;
1130 disable_irq(ctrl->dma_irq);
1133 disable_irq(ctrl->irq);
1134 ctrl->pio_poll_mode = true;
1143 static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
1146 u16 offs = ctrl->flash_dma_offsets[dma_reg];
1148 brcmnand_writel(val, ctrl->flash_dma_base + offs);
1151 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
1154 u16 offs = ctrl->flash_dma_offsets[dma_reg];
1156 return brcmnand_readl(ctrl->flash_dma_base + offs);
1171 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
1174 if (ctrl->nand_version <= 0x0701)
1335 if (is_hamming_ecc(host->ctrl, p)) {
1367 struct brcmnand_controller *ctrl = host->ctrl;
1369 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1374 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1379 * make sure ctrl/flash ready before and after
1382 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1389 brcmnand_set_wp(ctrl, wp);
1392 ret = bcmnand_ctrl_poll_status(ctrl,
1408 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
1412 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
1413 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
1415 if (offs >= ctrl->max_oob)
1423 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
1426 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
1431 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
1432 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
1434 if (offs >= ctrl->max_oob)
1442 nand_writereg(ctrl, reg_offs, data);
1447 * @ctrl: NAND controller
1453 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
1461 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1462 tbytes = min_t(int, tbytes, ctrl->max_oob);
1465 oob[j] = oob_reg_read(ctrl, j);
1476 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
1486 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1487 tbytes = min_t(int, tbytes, ctrl->max_oob);
1494 oob_reg_write(ctrl, j,
1505 oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
1510 static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
1513 edu_writel(ctrl, EDU_ERR_STATUS, 0);
1514 edu_readl(ctrl, EDU_ERR_STATUS);
1515 edu_writel(ctrl, EDU_DONE, 0);
1516 edu_writel(ctrl, EDU_DONE, 0);
1517 edu_writel(ctrl, EDU_DONE, 0);
1518 edu_writel(ctrl, EDU_DONE, 0);
1519 edu_readl(ctrl, EDU_DONE);
1525 struct brcmnand_controller *ctrl = data;
1527 if (ctrl->edu_count) {
1528 ctrl->edu_count--;
1529 while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
1531 edu_writel(ctrl, EDU_DONE, 0);
1532 edu_readl(ctrl, EDU_DONE);
1535 if (ctrl->edu_count) {
1536 ctrl->edu_dram_addr += FC_BYTES;
1537 ctrl->edu_ext_addr += FC_BYTES;
1539 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
1540 edu_readl(ctrl, EDU_DRAM_ADDR);
1541 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
1542 edu_readl(ctrl, EDU_EXT_ADDR);
1544 if (ctrl->oob) {
1545 if (ctrl->edu_cmd == EDU_CMD_READ) {
1546 ctrl->oob += read_oob_from_regs(ctrl,
1547 ctrl->edu_count + 1,
1548 ctrl->oob, ctrl->sas,
1549 ctrl->sector_size_1k);
1551 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1552 ctrl->edu_ext_addr);
1553 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1554 ctrl->oob += write_oob_to_regs(ctrl,
1555 ctrl->edu_count,
1556 ctrl->oob, ctrl->sas,
1557 ctrl->sector_size_1k);
1562 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
1563 edu_readl(ctrl, EDU_CMD);
1568 complete(&ctrl->edu_done);
1575 struct brcmnand_controller *ctrl = data;
1578 if (ctrl->dma_pending)
1582 if (ctrl->edu_pending) {
1583 if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
1591 complete(&ctrl->done);
1598 struct brcmnand_controller *ctrl = data;
1600 if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1608 struct brcmnand_controller *ctrl = data;
1610 complete(&ctrl->dma_done);
1617 struct brcmnand_controller *ctrl = host->ctrl;
1621 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1623 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
1631 if (ctrl->cmd_pending &&
1632 bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
1635 BUG_ON(ctrl->cmd_pending != 0);
1636 ctrl->cmd_pending = cmd;
1638 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1642 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1643 cmd << brcmnand_cmd_shift(ctrl));
1651 unsigned int ctrl)
1659 struct brcmnand_controller *ctrl = host->ctrl;
1664 if (mtd->oops_panic_write || ctrl->irq < 0) {
1666 disable_ctrl_irqs(ctrl);
1667 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
1674 sts = wait_for_completion_timeout(&ctrl->done, timeo);
1684 struct brcmnand_controller *ctrl = host->ctrl;
1687 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1688 if (ctrl->cmd_pending)
1691 ctrl->cmd_pending = 0;
1693 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1694 >> brcmnand_cmd_shift(ctrl);
1696 dev_err_ratelimited(ctrl->dev,
1698 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1699 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1702 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1721 struct brcmnand_controller *ctrl = host->ctrl;
1746 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1748 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1749 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1760 struct brcmnand_controller *ctrl = host->ctrl;
1771 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1828 u32 *flash_cache = (u32 *)ctrl->flash_cache;
1831 brcmnand_soc_data_bus_prepare(ctrl->soc, true);
1842 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
1844 brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
1860 struct brcmnand_controller *ctrl = host->ctrl;
1867 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1870 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1875 ret = oob_reg_read(ctrl, host->last_byte);
1879 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1894 ret = ctrl->flash_cache[offs];
1903 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1907 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1945 struct brcmnand_controller *ctrl = host->ctrl;
1954 dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
1957 pa = dma_map_single(ctrl->dev, buf, len, dir);
1958 if (dma_mapping_error(ctrl->dev, pa)) {
1959 dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
1963 ctrl->edu_pending = true;
1964 ctrl->edu_dram_addr = pa;
1965 ctrl->edu_ext_addr = addr;
1966 ctrl->edu_cmd = edu_cmd;
1967 ctrl->edu_count = trans;
1968 ctrl->sas = cfg->spare_area_size;
1969 ctrl->oob = oob;
1971 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
1972 edu_readl(ctrl, EDU_DRAM_ADDR);
1973 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
1974 edu_readl(ctrl, EDU_EXT_ADDR);
1975 edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
1976 edu_readl(ctrl, EDU_LENGTH);
1978 if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_WRITE)) {
1979 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1980 ctrl->edu_ext_addr);
1981 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1982 ctrl->oob += write_oob_to_regs(ctrl,
1984 ctrl->oob, ctrl->sas,
1985 ctrl->sector_size_1k);
1990 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
1991 edu_readl(ctrl, EDU_CMD);
1993 if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
1994 dev_err(ctrl->dev,
1996 edu_readl(ctrl, EDU_STATUS),
1997 edu_readl(ctrl, EDU_ERR_STATUS));
2000 dma_unmap_single(ctrl->dev, pa, len, dir);
2003 if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_READ)) {
2004 ctrl->oob += read_oob_from_regs(ctrl,
2006 ctrl->oob, ctrl->sas,
2007 ctrl->sector_size_1k);
2011 if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
2014 dev_info(ctrl->dev, "program failed at %llx\n",
2020 if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
2021 dev_warn(ctrl->dev, "EDU still active: %#x\n",
2022 edu_readl(ctrl, EDU_STATUS));
2024 if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
2025 dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
2030 ctrl->edu_pending = false;
2031 brcmnand_edu_init(ctrl);
2032 edu_writel(ctrl, EDU_STOP, 0); /* force stop */
2033 edu_readl(ctrl, EDU_STOP);
2042 err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2044 err_addr = brcmnand_get_correcc_addr(ctrl);
2092 struct brcmnand_controller *ctrl = host->ctrl;
2095 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
2096 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
2097 if (ctrl->nand_version > 0x0602) {
2098 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
2100 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
2104 ctrl->dma_pending = true;
2106 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
2108 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
2109 dev_err(ctrl->dev,
2111 flash_dma_readl(ctrl, FLASH_DMA_STATUS),
2112 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
2114 ctrl->dma_pending = false;
2115 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
2121 struct brcmnand_controller *ctrl = host->ctrl;
2125 buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
2126 if (dma_mapping_error(ctrl->dev, buf_pa)) {
2127 dev_err(ctrl->dev, "unable to map buffer for DMA\n");
2131 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
2134 brcmnand_dma_run(host, ctrl->dma_pa);
2136 dma_unmap_single(ctrl->dev, buf_pa, len, dir);
2138 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
2140 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
2154 struct brcmnand_controller *ctrl = host->ctrl;
2157 brcmnand_clear_ecc_addr(ctrl);
2166 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
2169 *buf = brcmnand_read_fc(ctrl, j);
2171 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
2175 oob += read_oob_from_regs(ctrl, i, oob,
2180 *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2187 *err_addr = brcmnand_get_correcc_addr(ctrl);
2253 struct brcmnand_controller *ctrl = host->ctrl;
2259 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
2262 brcmnand_clear_ecc_addr(ctrl);
2264 if (ctrl->dma_trans && (has_edu(ctrl) || !oob) &&
2266 err = ctrl->dma_trans(host, addr, buf, oob,
2277 if (has_edu(ctrl) && err_addr)
2297 if ((ctrl->nand_version == 0x0700) ||
2298 (ctrl->nand_version == 0x0701)) {
2309 if (ctrl->nand_version < 0x0702) {
2317 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
2325 unsigned int corrected = brcmnand_count_corrected(ctrl);
2332 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
2398 struct brcmnand_controller *ctrl = host->ctrl;
2402 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
2405 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
2411 for (i = 0; i < ctrl->max_oob; i += 4)
2412 oob_reg_write(ctrl, i, 0xffffffff);
2416 disable_ctrl_irqs(ctrl);
2418 if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
2419 if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize,
2432 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
2435 brcmnand_write_fc(ctrl, j, *buf);
2437 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
2440 brcmnand_write_fc(ctrl, j, 0xffffffff);
2444 oob += write_oob_to_regs(ctrl, i, oob,
2454 dev_info(ctrl->dev, "program failed at %llx\n",
2521 struct brcmnand_controller *ctrl = host->ctrl;
2523 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2524 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2526 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2531 if (ctrl->block_sizes) {
2534 for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
2535 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
2540 dev_warn(ctrl->dev, "invalid block size %u\n",
2548 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
2549 cfg->block_size > ctrl->max_block_size)) {
2550 dev_warn(ctrl->dev, "invalid block size %u\n",
2555 if (ctrl->page_sizes) {
2558 for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
2559 if (ctrl->page_sizes[i] == cfg->page_size) {
2564 dev_warn(ctrl->dev, "invalid page size %u\n",
2572 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
2573 cfg->page_size > ctrl->max_page_size)) {
2574 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
2579 dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
2591 tmp |= (page_size << ctrl->page_size_shift) |
2593 nand_writereg(ctrl, cfg_offs, tmp);
2595 nand_writereg(ctrl, cfg_offs, tmp);
2598 nand_writereg(ctrl, cfg_ext_offs, tmp);
2601 tmp = nand_readreg(ctrl, acc_control_offs);
2602 tmp &= ~brcmnand_ecc_level_mask(ctrl);
2603 tmp &= ~brcmnand_spare_area_mask(ctrl);
2604 if (ctrl->nand_version >= 0x0302) {
2605 tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
2608 nand_writereg(ctrl, acc_control_offs, tmp);
2630 if (is_hamming_ecc(host->ctrl, cfg))
2658 struct brcmnand_controller *ctrl = host->ctrl;
2676 if (cfg->spare_area_size > ctrl->max_oob)
2677 cfg->spare_area_size = ctrl->max_oob;
2693 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
2709 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
2720 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
2734 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
2735 dev_err(ctrl->dev, "1KB sectors not supported\n");
2739 dev_err(ctrl->dev,
2748 dev_err(ctrl->dev, "unsupported ECC size: %d\n",
2766 dev_info(ctrl->dev, "detected %s\n", msg);
2769 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
2770 tmp = nand_readreg(ctrl, offs);
2775 if (ctrl->nand_version >= 0x0702)
2778 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2781 nand_writereg(ctrl, offs, tmp);
2814 if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
2829 struct brcmnand_controller *ctrl = host->ctrl;
2830 struct device *dev = ctrl->dev;
2865 chip->controller = &ctrl->controller;
2872 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2873 nand_writereg(ctrl, cfg_offs,
2874 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
2890 struct brcmnand_controller *ctrl = host->ctrl;
2891 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2892 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2894 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2896 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2897 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2900 nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2902 nand_writereg(ctrl, cfg_ext_offs,
2904 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2905 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2906 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2908 host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2911 nand_readreg(ctrl, cfg_ext_offs);
2912 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2913 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2914 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2920 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2923 list_for_each_entry(host, &ctrl->host_list, node)
2926 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2927 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2928 ctrl->corr_stat_threshold =
2929 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2931 if (has_flash_dma(ctrl))
2932 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2933 else if (has_edu(ctrl))
2934 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
2941 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2944 if (has_flash_dma(ctrl)) {
2945 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2946 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2949 if (has_edu(ctrl)) {
2950 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
2951 edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
2952 edu_readl(ctrl, EDU_CONFIG);
2953 brcmnand_edu_init(ctrl);
2956 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2957 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2958 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2959 ctrl->corr_stat_threshold);
2960 if (ctrl->soc) {
2962 ctrl->soc->ctlrdy_ack(ctrl->soc);
2963 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2966 list_for_each_entry(host, &ctrl->host_list, node) {
3006 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
3012 ctrl->edu_base = devm_ioremap_resource(dev, res);
3013 if (IS_ERR(ctrl->edu_base))
3014 return PTR_ERR(ctrl->edu_base);
3016 ctrl->edu_offsets = edu_regs;
3018 edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
3020 edu_readl(ctrl, EDU_CONFIG);
3023 brcmnand_edu_init(ctrl);
3025 ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
3026 if (ctrl->edu_irq < 0) {
3030 ret = devm_request_irq(dev, ctrl->edu_irq,
3032 "brcmnand-edu", ctrl);
3034 dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
3035 ctrl->edu_irq, ret);
3040 ctrl->edu_irq);
3052 struct brcmnand_controller *ctrl;
3060 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
3061 if (!ctrl)
3064 dev_set_drvdata(dev, ctrl);
3065 ctrl->dev = dev;
3066 ctrl->soc = soc;
3071 if (brcmnand_soc_has_ops(ctrl->soc))
3074 init_completion(&ctrl->done);
3075 init_completion(&ctrl->dma_done);
3076 init_completion(&ctrl->edu_done);
3077 nand_controller_init(&ctrl->controller);
3078 ctrl->controller.ops = &brcmnand_controller_ops;
3079 INIT_LIST_HEAD(&ctrl->host_list);
3083 ctrl->nand_base = devm_ioremap_resource(dev, res);
3084 if (IS_ERR(ctrl->nand_base) && !brcmnand_soc_has_ops(soc))
3085 return PTR_ERR(ctrl->nand_base);
3088 ctrl->clk = devm_clk_get(dev, "nand");
3089 if (!IS_ERR(ctrl->clk)) {
3090 ret = clk_prepare_enable(ctrl->clk);
3094 ret = PTR_ERR(ctrl->clk);
3098 ctrl->clk = NULL;
3102 ret = brcmnand_revision_init(ctrl);
3112 ctrl->nand_fc = devm_ioremap_resource(dev, res);
3113 if (IS_ERR(ctrl->nand_fc)) {
3114 ret = PTR_ERR(ctrl->nand_fc);
3118 ctrl->nand_fc = ctrl->nand_base +
3119 ctrl->reg_offsets[BRCMNAND_FC_BASE];
3125 ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
3126 if (IS_ERR(ctrl->flash_dma_base)) {
3127 ret = PTR_ERR(ctrl->flash_dma_base);
3132 brcmnand_flash_dma_revision_init(ctrl);
3135 if (ctrl->nand_version >= 0x0700)
3145 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
3146 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
3149 ctrl->dma_desc = dmam_alloc_coherent(dev,
3150 sizeof(*ctrl->dma_desc),
3151 &ctrl->dma_pa, GFP_KERNEL);
3152 if (!ctrl->dma_desc) {
3157 ctrl->dma_irq = platform_get_irq(pdev, 1);
3158 if ((int)ctrl->dma_irq < 0) {
3164 ret = devm_request_irq(dev, ctrl->dma_irq,
3166 ctrl);
3169 ctrl->dma_irq, ret);
3175 ctrl->dma_trans = brcmnand_dma_trans;
3181 if (has_edu(ctrl))
3183 ctrl->dma_trans = brcmnand_edu_trans;
3187 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
3190 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
3192 if (ctrl->features & BRCMNAND_HAS_WP) {
3195 brcmnand_set_wp(ctrl, false);
3201 ctrl->irq = platform_get_irq_optional(pdev, 0);
3202 if (ctrl->irq > 0) {
3208 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
3209 DRV_NAME, ctrl);
3212 ctrl->soc->ctlrdy_ack(ctrl->soc);
3213 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
3216 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
3217 DRV_NAME, ctrl);
3221 ctrl->irq, ret);
3236 host->ctrl = ctrl;
3257 list_add_tail(&host->node, &ctrl->host_list);
3261 if (!list_empty(&ctrl->host_list))
3276 host->ctrl = ctrl;
3285 list_add_tail(&host->node, &ctrl->host_list);
3288 if (list_empty(&ctrl->host_list)) {
3296 clk_disable_unprepare(ctrl->clk);
3304 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
3309 list_for_each_entry(host, &ctrl->host_list, node) {
3316 clk_disable_unprepare(ctrl->clk);