Lines Matching refs:umc

1116 static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
1136 ctx.inst_id = umc;
1139 if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
1153 if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
1176 if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
1229 * umc/channel# as instance id of the coherent slave
1232 if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
1348 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1354 if (pvt->umc[i].umc_cfg & BIT(12))
1533 static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1577 addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
1579 addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
1607 struct amd64_umc *umc;
1612 umc = &pvt->umc[i];
1614 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1615 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1616 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1617 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1624 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1627 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1628 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1630 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1632 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1634 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1636 if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
1704 int umc;
1706 for_each_umc(umc) {
1707 pvt->csels[umc].b_cnt = 4;
1708 pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
1720 int cs, umc;
1722 for_each_umc(umc) {
1723 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1724 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1726 for_each_chip_select(cs, umc, pvt) {
1727 base = &pvt->csels[umc].csbases[cs];
1728 base_sec = &pvt->csels[umc].csbases_sec[cs];
1735 umc, cs, *base, base_reg);
1739 umc, cs, *base_sec, base_reg_sec);
1742 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1743 umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
1745 for_each_chip_select_mask(cs, umc, pvt) {
1746 mask = &pvt->csels[umc].csmasks[cs];
1747 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1754 umc, cs, *mask, mask_reg);
1758 umc, cs, *mask_sec, mask_reg_sec);
1811 struct amd64_umc *umc;
1815 umc = &pvt->umc[i];
1817 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
1818 umc->dram_type = MEM_EMPTY;
1826 if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1827 if (umc->dimm_cfg & BIT(5))
1828 umc->dram_type = MEM_LRDDR5;
1829 else if (umc->dimm_cfg & BIT(4))
1830 umc->dram_type = MEM_RDDR5;
1832 umc->dram_type = MEM_DDR5;
1834 if (umc->dimm_cfg & BIT(5))
1835 umc->dram_type = MEM_LRDDR4;
1836 else if (umc->dimm_cfg & BIT(4))
1837 umc->dram_type = MEM_RDDR4;
1839 umc->dram_type = MEM_DDR4;
1842 edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
3170 struct amd64_umc *umc;
3177 umc = &pvt->umc[i];
3179 amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
3180 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3181 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3182 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3183 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3330 u8 umc, cs;
3345 for_each_umc(umc) {
3346 for_each_chip_select(cs, umc, pvt) {
3347 if (!csrow_enabled(cs, umc, pvt))
3350 dimm = mci->csrows[cs]->channels[umc]->dimm;
3355 dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
3356 dimm->mtype = pvt->umc[umc].dram_type;
3623 struct amd64_umc *umc;
3627 umc = &pvt->umc[i];
3630 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3635 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3659 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3660 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3661 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3663 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3664 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3745 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3746 if (!pvt->umc)
3783 static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
3786 u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
3807 struct amd64_umc *umc;
3811 umc = &pvt->umc[i];
3813 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
3814 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
3815 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
3840 u8 umc, cs;
3842 for_each_umc(umc) {
3843 for_each_chip_select(cs, umc, pvt) {
3844 if (!csrow_enabled(cs, umc, pvt))
3847 dimm = mci->csrows[umc]->channels[cs]->dimm;
3852 dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
3883 static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
3897 umc *= 2;
3900 umc++;
3902 return 0x50000 + (umc << 20) + ((channel % 4) << 12);
3908 struct amd64_umc *umc;
3914 umc = &pvt->umc[i];
3916 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3917 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3918 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3926 int umc, cs;
3928 for_each_umc(umc) {
3929 for_each_chip_select(cs, umc, pvt) {
3930 base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR;
3931 base = &pvt->csels[umc].csbases[cs];
3935 umc, cs, *base, base_reg);
3938 mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK;
3939 mask = &pvt->csels[umc].csmasks[cs];
3943 umc, cs, *mask, mask_reg);
3951 int umc;
3953 for_each_umc(umc) {
3954 pvt->csels[umc].b_cnt = 8;
3955 pvt->csels[umc].m_cnt = 8;
3967 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3968 if (!pvt->umc)
3982 kfree(pvt->umc);