Lines Matching refs:pvt

89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
94 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
116 switch (pvt->fam) {
129 if (dct_ganging_enabled(pvt))
141 dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 f15h_select_dct(pvt, dct);
153 return amd64_read_pci_cfg(pvt->F2, offset, val);
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
179 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
217 if (pvt->umc) {
218 __f17h_set_scrubval(pvt, scrubval);
219 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 f15h_select_dct(pvt, 0);
221 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 f15h_select_dct(pvt, 1);
223 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
236 struct amd64_pvt *pvt = mci->pvt_info;
239 if (pvt->fam == 0xf)
242 if (pvt->fam == 0x15) {
244 if (pvt->model < 0x10)
245 f15h_select_dct(pvt, 0);
247 if (pvt->model == 0x60)
250 return __set_scrub_rate(pvt, bw, min_scrubrate);
255 struct amd64_pvt *pvt = mci->pvt_info;
259 if (pvt->umc) {
260 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
262 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
268 } else if (pvt->fam == 0x15) {
270 if (pvt->model < 0x10)
271 f15h_select_dct(pvt, 0);
273 if (pvt->model == 0x60)
274 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
276 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
278 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
296 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
308 return ((addr >= get_dram_base(pvt, nid)) &&
309 (addr <= get_dram_limit(pvt, nid)));
321 struct amd64_pvt *pvt;
329 pvt = mci->pvt_info;
336 intlv_en = dram_intlv_en(pvt, 0);
340 if (base_limit_match(pvt, sys_addr, node_id))
356 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
364 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
385 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
391 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
392 csbase = pvt->csels[dct].csbases[csrow];
393 csmask = pvt->csels[dct].csmasks[csrow];
402 } else if (pvt->fam == 0x16 ||
403 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
404 csbase = pvt->csels[dct].csbases[csrow];
405 csmask = pvt->csels[dct].csmasks[csrow >> 1];
420 csbase = pvt->csels[dct].csbases[csrow];
421 csmask = pvt->csels[dct].csmasks[csrow >> 1];
424 if (pvt->fam == 0x15)
441 #define for_each_chip_select(i, dct, pvt) \
442 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
444 #define chip_select_base(i, dct, pvt) \
445 pvt->csels[dct].csbases[i]
447 #define for_each_chip_select_mask(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
459 struct amd64_pvt *pvt;
463 pvt = mci->pvt_info;
465 for_each_chip_select(csrow, 0, pvt) {
466 if (!csrow_enabled(csrow, 0, pvt))
469 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
476 pvt->mc_node_id);
482 (unsigned long)input_addr, pvt->mc_node_id);
506 struct amd64_pvt *pvt = mci->pvt_info;
509 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
511 pvt->ext_model, pvt->mc_node_id);
516 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
521 if (!dhar_valid(pvt)) {
523 pvt->mc_node_id);
545 *hole_base = dhar_base(pvt);
548 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
549 : k8_dhar_offset(pvt);
552 pvt->mc_node_id, (unsigned long)*hole_base,
590 struct amd64_pvt *pvt = mci->pvt_info;
594 dram_base = get_dram_base(pvt, pvt->mc_node_id);
646 struct amd64_pvt *pvt;
650 pvt = mci->pvt_info;
656 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
718 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
723 if (pvt->umc) {
727 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
733 if (pvt->umc[i].umc_cfg & BIT(12))
740 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
744 if (pvt->dclr0 & BIT(bit))
753 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
757 if (pvt->dram_type == MEM_LRDDR3) {
758 u32 dcsm = pvt->csels[chan].csmasks[0];
774 if (pvt->fam == 0x10)
794 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
799 if (csrow_enabled(2 * dimm, ctrl, pvt))
802 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
806 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
814 for_each_chip_select(base, ctrl, pvt)
815 count += csrow_enabled(base, ctrl, pvt);
818 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
826 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
836 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
838 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
839 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
847 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
854 umc = &pvt->umc[i];
861 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
864 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
878 if (pvt->dram_type == MEM_LRDDR4) {
879 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
884 debug_display_dimm_sizes_df(pvt, i);
888 pvt->dhar, dhar_base(pvt));
892 static void __dump_misc_regs(struct amd64_pvt *pvt)
894 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
897 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
900 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
901 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
903 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
905 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
908 pvt->dhar, dhar_base(pvt),
909 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
910 : f10_dhar_offset(pvt));
912 debug_display_dimm_sizes(pvt, 0);
915 if (pvt->fam == 0xf)
918 debug_display_dimm_sizes(pvt, 1);
921 if (!dct_ganging_enabled(pvt))
922 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
926 static void dump_misc_regs(struct amd64_pvt *pvt)
928 if (pvt->umc)
929 __dump_misc_regs_df(pvt);
931 __dump_misc_regs(pvt);
933 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
935 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
941 static void prep_chip_selects(struct amd64_pvt *pvt)
943 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
944 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
945 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
946 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
947 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
948 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
949 } else if (pvt->fam >= 0x17) {
953 pvt->csels[umc].b_cnt = 4;
954 pvt->csels[umc].m_cnt = 2;
958 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
959 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
963 static void read_umc_base_mask(struct amd64_pvt *pvt)
977 for_each_chip_select(cs, umc, pvt) {
978 base = &pvt->csels[umc].csbases[cs];
979 base_sec = &pvt->csels[umc].csbases_sec[cs];
984 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
988 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
996 for_each_chip_select_mask(cs, umc, pvt) {
997 mask = &pvt->csels[umc].csmasks[cs];
998 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1003 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1007 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1017 static void read_dct_base_mask(struct amd64_pvt *pvt)
1021 prep_chip_selects(pvt);
1023 if (pvt->umc)
1024 return read_umc_base_mask(pvt);
1026 for_each_chip_select(cs, 0, pvt) {
1029 u32 *base0 = &pvt->csels[0].csbases[cs];
1030 u32 *base1 = &pvt->csels[1].csbases[cs];
1032 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1036 if (pvt->fam == 0xf)
1039 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1041 cs, *base1, (pvt->fam == 0x10) ? reg1
1045 for_each_chip_select_mask(cs, 0, pvt) {
1048 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1049 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1051 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1055 if (pvt->fam == 0xf)
1058 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1060 cs, *mask1, (pvt->fam == 0x10) ? reg1
1065 static void determine_memory_type(struct amd64_pvt *pvt)
1069 if (pvt->umc) {
1070 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1071 pvt->dram_type = MEM_LRDDR4;
1072 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1073 pvt->dram_type = MEM_RDDR4;
1075 pvt->dram_type = MEM_DDR4;
1079 switch (pvt->fam) {
1081 if (pvt->ext_model >= K8_REV_F)
1084 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1088 if (pvt->dchr0 & DDR3_MODE)
1091 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1095 if (pvt->model < 0x60)
1107 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1108 dcsm = pvt->csels[0].csmasks[0];
1111 pvt->dram_type = MEM_DDR4;
1112 else if (pvt->dclr0 & BIT(16))
1113 pvt->dram_type = MEM_DDR3;
1115 pvt->dram_type = MEM_LRDDR3;
1117 pvt->dram_type = MEM_RDDR3;
1125 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1126 pvt->dram_type = MEM_EMPTY;
1131 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1135 static int k8_early_channel_count(struct amd64_pvt *pvt)
1139 if (pvt->ext_model >= K8_REV_F)
1141 flag = pvt->dclr0 & WIDTH_128;
1144 flag = pvt->dclr0 & REVE_WIDTH_128;
1147 pvt->dclr1 = 0;
1153 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1165 pvt = mci->pvt_info;
1167 if (pvt->fam == 0xf) {
1177 if (pvt->fam == 0x15) {
1186 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1201 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1234 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1242 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1243 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1245 if (pvt->fam == 0xf)
1248 if (!dram_rw(pvt, range))
1251 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1252 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1255 if (pvt->fam != 0x15)
1258 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1262 if (pvt->model == 0x60)
1264 else if (pvt->model == 0x30)
1275 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1278 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1280 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1283 pvt->ranges[range].lim.hi |= llim >> 13;
1291 struct amd64_pvt *pvt = mci->pvt_info;
1315 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1356 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1359 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1361 if (pvt->ext_model >= K8_REV_F) {
1365 else if (pvt->ext_model >= K8_REV_D) {
1411 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1416 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1435 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1453 static int f17_early_channel_count(struct amd64_pvt *pvt)
1459 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1523 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1526 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1530 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1539 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1548 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1552 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1556 if (pvt->dram_type == MEM_DDR4) {
1561 } else if (pvt->dram_type == MEM_LRDDR3) {
1581 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1593 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1621 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1623 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1654 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1657 if (pvt->fam == 0xf)
1660 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1662 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1665 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1667 if (!dct_ganging_enabled(pvt))
1669 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1672 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1673 (dct_memory_cleared(pvt) ? "yes" : "no"));
1677 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1678 dct_sel_interleave_addr(pvt));
1681 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1688 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1702 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1719 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1722 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1724 if (dct_ganging_enabled(pvt))
1733 if (dct_interleave_enabled(pvt)) {
1734 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1756 if (dct_high_range_enabled(pvt))
1763 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1768 u64 dram_base = get_dram_base(pvt, range);
1769 u64 hole_off = f10_dhar_offset(pvt);
1770 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1785 dct_sel_base_addr < dhar_base(pvt)) &&
1786 dhar_valid(pvt) &&
1801 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1814 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1818 if (online_spare_swap_done(pvt, dct) &&
1819 csrow == online_spare_bad_dramcs(pvt, dct)) {
1821 for_each_chip_select(tmp_cs, dct, pvt) {
1822 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1842 struct amd64_pvt *pvt;
1851 pvt = mci->pvt_info;
1855 for_each_chip_select(csrow, dct, pvt) {
1856 if (!csrow_enabled(csrow, dct, pvt))
1859 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1870 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1874 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1888 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1892 if (pvt->fam == 0x10) {
1894 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1898 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1918 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1927 u8 node_id = dram_dst_node(pvt, range);
1928 u8 intlv_en = dram_intlv_en(pvt, range);
1929 u32 intlv_sel = dram_intlv_sel(pvt, range);
1932 range, sys_addr, get_dram_limit(pvt, range));
1934 if (dhar_valid(pvt) &&
1935 dhar_base(pvt) <= sys_addr &&
1945 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1947 dct_sel_base = dct_sel_baseaddr(pvt);
1953 if (dct_high_range_enabled(pvt) &&
1954 !dct_ganging_enabled(pvt) &&
1958 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1960 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1969 if (dct_interleave_enabled(pvt) &&
1970 !dct_high_range_enabled(pvt) &&
1971 !dct_ganging_enabled(pvt)) {
1973 if (dct_sel_interleave_addr(pvt) != 1) {
1974 if (dct_sel_interleave_addr(pvt) == 0x3)
1998 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2008 u64 dhar_offset = f10_dhar_offset(pvt);
2009 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2010 u8 node_id = dram_dst_node(pvt, range);
2011 u8 intlv_en = dram_intlv_en(pvt, range);
2013 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2014 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2020 range, sys_addr, get_dram_limit(pvt, range));
2022 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2023 !(get_dram_limit(pvt, range) >= sys_addr))
2026 if (dhar_valid(pvt) &&
2027 dhar_base(pvt) <= sys_addr &&
2035 dct_base = (u64) dct_sel_baseaddr(pvt);
2049 if (pvt->model >= 0x60)
2050 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2052 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2092 amd64_read_pci_cfg(pvt->F1,
2098 f15h_select_dct(pvt, channel);
2107 * pvt->csels[1]. So we need to use '1' here to get correct info.
2120 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2128 if (!dram_rw(pvt, range))
2131 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2132 cs_found = f15_m30h_match_to_this_node(pvt, range,
2136 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2137 (get_dram_limit(pvt, range) >= sys_addr)) {
2138 cs_found = f1x_match_to_this_node(pvt, range,
2157 struct amd64_pvt *pvt = mci->pvt_info;
2161 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2172 if (dct_ganging_enabled(pvt))
2180 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2183 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2184 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2186 if (pvt->fam == 0xf) {
2188 if (pvt->ext_model < K8_REV_F)
2194 if (pvt->fam == 0x10) {
2195 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2196 : pvt->dbam0;
2197 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2198 pvt->csels[1].csbases :
2199 pvt->csels[0].csbases;
2201 dbam = pvt->dbam0;
2202 dcsb = pvt->csels[1].csbases;
2220 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2226 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2521 struct amd64_pvt *pvt = mci->pvt_info;
2524 if (pvt->ecc_sym_sz == 8)
2527 pvt->ecc_sym_sz);
2528 else if (pvt->ecc_sym_sz == 4)
2531 pvt->ecc_sym_sz);
2533 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2537 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2590 struct amd64_pvt *pvt;
2601 pvt = mci->pvt_info;
2613 sys_addr = get_error_address(pvt, m);
2618 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2641 struct amd64_pvt *pvt;
2649 pvt = mci->pvt_info;
2674 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2686 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2691 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2693 if (pvt->umc) {
2694 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2695 if (!pvt->F0) {
2700 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2701 if (!pvt->F6) {
2702 pci_dev_put(pvt->F0);
2703 pvt->F0 = NULL;
2710 pci_ctl_dev = &pvt->F0->dev;
2712 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2713 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2714 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2720 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2721 if (!pvt->F1) {
2727 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2728 if (!pvt->F2) {
2729 pci_dev_put(pvt->F1);
2730 pvt->F1 = NULL;
2737 pci_ctl_dev = &pvt->F2->dev;
2739 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2740 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2741 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2746 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2748 if (pvt->umc) {
2749 pci_dev_put(pvt->F0);
2750 pci_dev_put(pvt->F6);
2752 pci_dev_put(pvt->F1);
2753 pci_dev_put(pvt->F2);
2757 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2759 pvt->ecc_sym_sz = 4;
2761 if (pvt->umc) {
2766 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2767 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2768 pvt->ecc_sym_sz = 16;
2770 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2771 pvt->ecc_sym_sz = 8;
2776 } else if (pvt->fam >= 0x10) {
2779 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2781 if (pvt->fam != 0x16)
2782 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2785 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2786 pvt->ecc_sym_sz = 8;
2793 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2795 u8 nid = pvt->mc_node_id;
2803 umc = &pvt->umc[i];
2817 static void read_mc_regs(struct amd64_pvt *pvt)
2826 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2827 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2832 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2833 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2838 if (pvt->umc) {
2839 __read_mc_regs_df(pvt);
2840 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2845 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2847 read_dram_ctl_register(pvt);
2853 read_dram_base_limit_regs(pvt, range);
2855 rw = dram_rw(pvt, range);
2861 get_dram_base(pvt, range),
2862 get_dram_limit(pvt, range));
2865 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2868 dram_intlv_sel(pvt, range),
2869 dram_dst_node(pvt, range));
2872 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2873 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2875 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2877 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2878 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2880 if (!dct_ganging_enabled(pvt)) {
2881 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2882 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2886 read_dct_base_mask(pvt);
2888 determine_memory_type(pvt);
2889 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2891 determine_ecc_sym_sz(pvt);
2928 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2930 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2934 if (!pvt->umc) {
2938 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2941 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2953 struct amd64_pvt *pvt = mci->pvt_info;
2974 for_each_chip_select(cs, umc, pvt) {
2975 if (!csrow_enabled(cs, umc, pvt))
2982 pvt->mc_node_id, cs);
2984 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2985 dimm->mtype = pvt->dram_type;
3001 struct amd64_pvt *pvt = mci->pvt_info;
3009 if (pvt->umc)
3012 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3014 pvt->nbcfg = val;
3017 pvt->mc_node_id, val,
3023 for_each_chip_select(i, 0, pvt) {
3024 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3027 if (pvt->fam != 0xf)
3028 row_dct1 = !!csrow_enabled(i, 1, pvt);
3037 pvt->mc_node_id, i);
3040 nr_pages = get_csrow_nr_pages(pvt, 0, i);
3045 if (pvt->fam != 0xf && row_dct1) {
3046 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3055 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3056 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3061 for (j = 0; j < pvt->channel_count; j++) {
3063 dimm->mtype = pvt->dram_type;
3232 static bool ecc_enabled(struct amd64_pvt *pvt)
3234 u16 nid = pvt->mc_node_id;
3244 umc = &pvt->umc[i];
3265 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3285 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3290 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3291 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3292 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3294 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3295 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3317 struct amd64_pvt *pvt = mci->pvt_info;
3322 if (pvt->umc) {
3323 f17h_determine_edac_ctl_cap(mci, pvt);
3325 if (pvt->nbcap & NBCAP_SECDED)
3328 if (pvt->nbcap & NBCAP_CHIPKILL)
3332 mci->edac_cap = determine_edac_cap(pvt);
3335 mci->dev_name = pci_name(pvt->F3);
3346 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3348 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3349 pvt->stepping = boot_cpu_data.x86_stepping;
3350 pvt->model = boot_cpu_data.x86_model;
3351 pvt->fam = boot_cpu_data.x86;
3353 switch (pvt->fam) {
3356 pvt->ops = &family_types[K8_CPUS].ops;
3361 pvt->ops = &family_types[F10_CPUS].ops;
3365 if (pvt->model == 0x30) {
3367 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3369 } else if (pvt->model == 0x60) {
3371 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3374 } else if (pvt->model == 0x13) {
3378 pvt->ops = &family_types[F15_CPUS].ops;
3383 if (pvt->model == 0x30) {
3385 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3389 pvt->ops = &family_types[F16_CPUS].ops;
3393 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3395 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3397 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3399 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3401 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3403 pvt->ops = &family_types[F17_M60H_CPUS].ops;
3405 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3407 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3413 pvt->ops = &family_types[F17_CPUS].ops;
3415 if (pvt->fam == 0x18)
3420 if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
3422 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3427 pvt->ops = &family_types[F19_CPUS].ops;
3437 (pvt->fam == 0xf ?
3438 (pvt->ext_model >= K8_REV_F ? "revF or later "
3440 : ""), pvt->mc_node_id);
3454 static int hw_info_get(struct amd64_pvt *pvt)
3459 if (pvt->fam >= 0x17) {
3460 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3461 if (!pvt->umc)
3471 ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3475 read_mc_regs(pvt);
3480 static void hw_info_put(struct amd64_pvt *pvt)
3482 if (pvt->F0 || pvt->F1)
3483 free_mc_sibling_devs(pvt);
3485 kfree(pvt->umc);
3488 static int init_one_instance(struct amd64_pvt *pvt)
3499 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3500 if (pvt->channel_count < 0)
3505 layers[0].size = pvt->csels[0].b_cnt;
3517 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3521 mci->pvt_info = pvt;
3522 mci->pdev = &pvt->F3->dev;
3539 static bool instance_has_memory(struct amd64_pvt *pvt)
3545 for_each_chip_select(cs, dct, pvt)
3546 cs_enabled |= csrow_enabled(cs, dct, pvt);
3555 struct amd64_pvt *pvt = NULL;
3566 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3567 if (!pvt)
3570 pvt->mc_node_id = nid;
3571 pvt->F3 = F3;
3574 fam_type = per_family_init(pvt);
3578 ret = hw_info_get(pvt);
3583 if (!instance_has_memory(pvt)) {
3588 if (!ecc_enabled(pvt)) {
3604 ret = init_one_instance(pvt);
3614 dump_misc_regs(pvt);
3619 hw_info_put(pvt);
3620 kfree(pvt);
3635 struct amd64_pvt *pvt;
3642 pvt = mci->pvt_info;
3652 hw_info_put(pvt);
3653 kfree(pvt);