Lines Matching refs:pvt

16 static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
18 if (!pvt->flags.zn_regs_v2)
102 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
106 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
107 reg &= (pvt->model == 0x30) ? ~3 : ~1;
109 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
126 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
129 switch (pvt->fam) {
142 if (dct_ganging_enabled(pvt))
154 dct = (dct && pvt->model == 0x30) ? 3 : dct;
155 f15h_select_dct(pvt, dct);
166 return amd64_read_pci_cfg(pvt->F2, offset, val);
187 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
215 if (pvt->fam == 0x15 && pvt->model == 0x60) {
216 f15h_select_dct(pvt, 0);
217 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
218 f15h_select_dct(pvt, 1);
219 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
232 struct amd64_pvt *pvt = mci->pvt_info;
235 if (pvt->fam == 0xf)
238 if (pvt->fam == 0x15) {
240 if (pvt->model < 0x10)
241 f15h_select_dct(pvt, 0);
243 if (pvt->model == 0x60)
246 return __set_scrub_rate(pvt, bw, min_scrubrate);
251 struct amd64_pvt *pvt = mci->pvt_info;
255 if (pvt->fam == 0x15) {
257 if (pvt->model < 0x10)
258 f15h_select_dct(pvt, 0);
260 if (pvt->model == 0x60)
261 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
263 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
265 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
283 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
295 return ((addr >= get_dram_base(pvt, nid)) &&
296 (addr <= get_dram_limit(pvt, nid)));
308 struct amd64_pvt *pvt;
316 pvt = mci->pvt_info;
323 intlv_en = dram_intlv_en(pvt, 0);
327 if (base_limit_match(pvt, sys_addr, node_id))
343 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
351 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
372 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
378 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
379 csbase = pvt->csels[dct].csbases[csrow];
380 csmask = pvt->csels[dct].csmasks[csrow];
389 } else if (pvt->fam == 0x16 ||
390 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
391 csbase = pvt->csels[dct].csbases[csrow];
392 csmask = pvt->csels[dct].csmasks[csrow >> 1];
407 csbase = pvt->csels[dct].csbases[csrow];
408 csmask = pvt->csels[dct].csmasks[csrow >> 1];
411 if (pvt->fam == 0x15)
428 #define for_each_chip_select(i, dct, pvt) \
429 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
431 #define chip_select_base(i, dct, pvt) \
432 pvt->csels[dct].csbases[i]
434 #define for_each_chip_select_mask(i, dct, pvt) \
435 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
438 for (i = 0; i < pvt->max_mcs; i++)
446 struct amd64_pvt *pvt;
450 pvt = mci->pvt_info;
452 for_each_chip_select(csrow, 0, pvt) {
453 if (!csrow_enabled(csrow, 0, pvt))
456 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
463 pvt->mc_node_id);
469 (unsigned long)input_addr, pvt->mc_node_id);
493 struct amd64_pvt *pvt = mci->pvt_info;
496 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
498 pvt->ext_model, pvt->mc_node_id);
503 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
508 if (!dhar_valid(pvt)) {
510 pvt->mc_node_id);
532 *hole_base = dhar_base(pvt);
535 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
536 : k8_dhar_offset(pvt);
539 pvt->mc_node_id, (unsigned long)*hole_base,
551 struct amd64_pvt *pvt = mci->pvt_info; \
553 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
602 struct amd64_pvt *pvt = mci->pvt_info;
603 return sprintf(buf, "0x%x\n", pvt->injection.section);
617 struct amd64_pvt *pvt = mci->pvt_info;
630 pvt->injection.section = (u32) value;
638 struct amd64_pvt *pvt = mci->pvt_info;
639 return sprintf(buf, "0x%x\n", pvt->injection.word);
653 struct amd64_pvt *pvt = mci->pvt_info;
666 pvt->injection.word = (u32) value;
675 struct amd64_pvt *pvt = mci->pvt_info;
676 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
689 struct amd64_pvt *pvt = mci->pvt_info;
702 pvt->injection.bit_map = (u32) value;
707 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
715 struct amd64_pvt *pvt = mci->pvt_info;
725 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
727 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
729 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
732 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
740 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
748 struct amd64_pvt *pvt = mci->pvt_info;
758 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
760 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
762 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
771 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
775 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
811 struct amd64_pvt *pvt = mci->pvt_info;
814 if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
857 struct amd64_pvt *pvt = mci->pvt_info;
861 dram_base = get_dram_base(pvt, pvt->mc_node_id);
912 struct amd64_pvt *pvt;
916 pvt = mci->pvt_info;
922 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
1327 static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
1332 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1336 if (pvt->dclr0 & BIT(bit))
1342 static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
1348 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1354 if (pvt->umc[i].umc_cfg & BIT(12))
1368 static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1370 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1371 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1374 if (pvt->fam == 0xf) {
1376 if (pvt->ext_model < K8_REV_F)
1382 if (pvt->fam == 0x10) {
1383 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1384 : pvt->dbam0;
1385 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1386 pvt->csels[1].csbases :
1387 pvt->csels[0].csbases;
1389 dbam = pvt->dbam0;
1390 dcsb = pvt->csels[1].csbases;
1407 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1413 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1424 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1428 if (pvt->dram_type == MEM_LRDDR3) {
1429 u32 dcsm = pvt->csels[chan].csmasks[0];
1445 if (pvt->fam == 0x10)
1465 static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1470 if (csrow_enabled(2 * dimm, ctrl, pvt))
1473 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1477 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1485 for_each_chip_select(base, ctrl, pvt)
1486 count += csrow_enabled(base, ctrl, pvt);
1489 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1533 static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1572 if (!pvt->flags.zn_regs_v2)
1577 addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
1579 addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
1584 static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1594 cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
1596 size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
1597 size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
1605 static void umc_dump_misc_regs(struct amd64_pvt *pvt)
1612 umc = &pvt->umc[i];
1619 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1622 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1637 amd_smn_read(pvt->mc_node_id,
1638 umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
1644 umc_debug_display_dimm_sizes(pvt, i);
1648 static void dct_dump_misc_regs(struct amd64_pvt *pvt)
1650 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1653 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1656 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1657 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1659 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1661 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1664 pvt->dhar, dhar_base(pvt),
1665 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1666 : f10_dhar_offset(pvt));
1668 dct_debug_display_dimm_sizes(pvt, 0);
1671 if (pvt->fam == 0xf)
1674 dct_debug_display_dimm_sizes(pvt, 1);
1677 if (!dct_ganging_enabled(pvt))
1678 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1680 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1682 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1688 static void dct_prep_chip_selects(struct amd64_pvt *pvt)
1690 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1691 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1692 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1693 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1694 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1695 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1697 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1698 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1702 static void umc_prep_chip_selects(struct amd64_pvt *pvt)
1707 pvt->csels[umc].b_cnt = 4;
1708 pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
1712 static void umc_read_base_mask(struct amd64_pvt *pvt)
1726 for_each_chip_select(cs, umc, pvt) {
1727 base = &pvt->csels[umc].csbases[cs];
1728 base_sec = &pvt->csels[umc].csbases_sec[cs];
1733 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1737 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1743 umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
1745 for_each_chip_select_mask(cs, umc, pvt) {
1746 mask = &pvt->csels[umc].csmasks[cs];
1747 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1752 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1756 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1766 static void dct_read_base_mask(struct amd64_pvt *pvt)
1770 for_each_chip_select(cs, 0, pvt) {
1773 u32 *base0 = &pvt->csels[0].csbases[cs];
1774 u32 *base1 = &pvt->csels[1].csbases[cs];
1776 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1780 if (pvt->fam == 0xf)
1783 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1785 cs, *base1, (pvt->fam == 0x10) ? reg1
1789 for_each_chip_select_mask(cs, 0, pvt) {
1792 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1793 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1795 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1799 if (pvt->fam == 0xf)
1802 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1804 cs, *mask1, (pvt->fam == 0x10) ? reg1
1809 static void umc_determine_memory_type(struct amd64_pvt *pvt)
1815 umc = &pvt->umc[i];
1826 if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1846 static void dct_determine_memory_type(struct amd64_pvt *pvt)
1850 switch (pvt->fam) {
1852 if (pvt->ext_model >= K8_REV_F)
1855 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1859 if (pvt->dchr0 & DDR3_MODE)
1862 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1866 if (pvt->model < 0x60)
1878 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1879 dcsm = pvt->csels[0].csmasks[0];
1882 pvt->dram_type = MEM_DDR4;
1883 else if (pvt->dclr0 & BIT(16))
1884 pvt->dram_type = MEM_DDR3;
1886 pvt->dram_type = MEM_LRDDR3;
1888 pvt->dram_type = MEM_RDDR3;
1896 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1897 pvt->dram_type = MEM_EMPTY;
1900 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
1904 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1908 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1920 pvt = mci->pvt_info;
1922 if (pvt->fam == 0xf) {
1932 if (pvt->fam == 0x15) {
1941 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1956 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1989 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1997 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1998 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
2000 if (pvt->fam == 0xf)
2003 if (!dram_rw(pvt, range))
2006 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
2007 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
2010 if (pvt->fam != 0x15)
2013 nb = node_to_amd_nb(dram_dst_node(pvt, range));
2017 if (pvt->model == 0x60)
2019 else if (pvt->model == 0x30)
2030 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
2033 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
2035 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
2038 pvt->ranges[range].lim.hi |= llim >> 13;
2046 struct amd64_pvt *pvt = mci->pvt_info;
2070 if (pvt->nbcfg & NBCFG_CHIPKILL) {
2111 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2114 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2116 if (pvt->ext_model >= K8_REV_F) {
2120 else if (pvt->ext_model >= K8_REV_D) {
2215 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2218 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2222 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
2231 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2240 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2244 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
2248 if (pvt->dram_type == MEM_DDR4) {
2253 } else if (pvt->dram_type == MEM_LRDDR3) {
2273 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2285 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2288 if (pvt->fam == 0xf)
2291 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2293 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2296 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2298 if (!dct_ganging_enabled(pvt))
2300 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2303 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2304 (dct_memory_cleared(pvt) ? "yes" : "no"));
2308 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2309 dct_sel_interleave_addr(pvt));
2312 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2319 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2333 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2350 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2353 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2355 if (dct_ganging_enabled(pvt))
2364 if (dct_interleave_enabled(pvt)) {
2365 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2387 if (dct_high_range_enabled(pvt))
2394 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2399 u64 dram_base = get_dram_base(pvt, range);
2400 u64 hole_off = f10_dhar_offset(pvt);
2401 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2416 dct_sel_base_addr < dhar_base(pvt)) &&
2417 dhar_valid(pvt) &&
2432 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2445 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2449 if (online_spare_swap_done(pvt, dct) &&
2450 csrow == online_spare_bad_dramcs(pvt, dct)) {
2452 for_each_chip_select(tmp_cs, dct, pvt) {
2453 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2473 struct amd64_pvt *pvt;
2482 pvt = mci->pvt_info;
2486 for_each_chip_select(csrow, dct, pvt) {
2487 if (!csrow_enabled(csrow, dct, pvt))
2490 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2501 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2505 cs_found = f10_process_possible_spare(pvt, dct, csrow);
2519 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2523 if (pvt->fam == 0x10) {
2525 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2529 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2549 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2558 u8 node_id = dram_dst_node(pvt, range);
2559 u8 intlv_en = dram_intlv_en(pvt, range);
2560 u32 intlv_sel = dram_intlv_sel(pvt, range);
2563 range, sys_addr, get_dram_limit(pvt, range));
2565 if (dhar_valid(pvt) &&
2566 dhar_base(pvt) <= sys_addr &&
2576 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2578 dct_sel_base = dct_sel_baseaddr(pvt);
2584 if (dct_high_range_enabled(pvt) &&
2585 !dct_ganging_enabled(pvt) &&
2589 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2591 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2600 if (dct_interleave_enabled(pvt) &&
2601 !dct_high_range_enabled(pvt) &&
2602 !dct_ganging_enabled(pvt)) {
2604 if (dct_sel_interleave_addr(pvt) != 1) {
2605 if (dct_sel_interleave_addr(pvt) == 0x3)
2629 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2639 u64 dhar_offset = f10_dhar_offset(pvt);
2640 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2641 u8 node_id = dram_dst_node(pvt, range);
2642 u8 intlv_en = dram_intlv_en(pvt, range);
2644 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2645 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2651 range, sys_addr, get_dram_limit(pvt, range));
2653 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2654 !(get_dram_limit(pvt, range) >= sys_addr))
2657 if (dhar_valid(pvt) &&
2658 dhar_base(pvt) <= sys_addr &&
2666 dct_base = (u64) dct_sel_baseaddr(pvt);
2680 if (pvt->model >= 0x60)
2681 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2683 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2723 amd64_read_pci_cfg(pvt->F1,
2729 f15h_select_dct(pvt, channel);
2738 * pvt->csels[1]. So we need to use '1' here to get correct info.
2751 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2759 if (!dram_rw(pvt, range))
2762 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2763 cs_found = f15_m30h_match_to_this_node(pvt, range,
2767 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2768 (get_dram_limit(pvt, range) >= sys_addr)) {
2769 cs_found = f1x_match_to_this_node(pvt, range,
2788 struct amd64_pvt *pvt = mci->pvt_info;
2792 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2803 if (dct_ganging_enabled(pvt))
2944 struct amd64_pvt *pvt = mci->pvt_info;
2947 if (pvt->ecc_sym_sz == 8)
2950 pvt->ecc_sym_sz);
2951 else if (pvt->ecc_sym_sz == 4)
2954 pvt->ecc_sym_sz);
2956 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2960 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
3013 struct amd64_pvt *pvt;
3024 pvt = mci->pvt_info;
3036 sys_addr = get_error_address(pvt, m);
3041 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
3068 struct amd64_pvt *pvt;
3078 pvt = mci->pvt_info;
3099 pvt->ops->get_err_info(m, &err);
3101 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3113 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3117 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3120 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3121 if (!pvt->F1) {
3127 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3128 if (!pvt->F2) {
3129 pci_dev_put(pvt->F1);
3130 pvt->F1 = NULL;
3137 pci_ctl_dev = &pvt->F2->dev;
3139 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
3140 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
3141 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3146 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
3148 pvt->ecc_sym_sz = 4;
3150 if (pvt->fam >= 0x10) {
3153 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
3155 if (pvt->fam != 0x16)
3156 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
3159 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
3160 pvt->ecc_sym_sz = 8;
3167 static void umc_read_mc_regs(struct amd64_pvt *pvt)
3169 u8 nid = pvt->mc_node_id;
3177 umc = &pvt->umc[i];
3179 amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
3191 static void dct_read_mc_regs(struct amd64_pvt *pvt)
3200 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3201 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
3206 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3207 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3212 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3214 read_dram_ctl_register(pvt);
3220 read_dram_base_limit_regs(pvt, range);
3222 rw = dram_rw(pvt, range);
3228 get_dram_base(pvt, range),
3229 get_dram_limit(pvt, range));
3232 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
3235 dram_intlv_sel(pvt, range),
3236 dram_dst_node(pvt, range));
3239 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3240 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3242 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3244 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
3245 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3247 if (!dct_ganging_enabled(pvt)) {
3248 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
3249 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3252 determine_ecc_sym_sz(pvt);
3289 static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3291 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3297 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3307 static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3312 cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
3314 nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3326 struct amd64_pvt *pvt = mci->pvt_info;
3346 for_each_chip_select(cs, umc, pvt) {
3347 if (!csrow_enabled(cs, umc, pvt))
3353 pvt->mc_node_id, cs);
3355 dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
3356 dimm->mtype = pvt->umc[umc].dram_type;
3370 struct amd64_pvt *pvt = mci->pvt_info;
3378 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3380 pvt->nbcfg = val;
3383 pvt->mc_node_id, val,
3389 for_each_chip_select(i, 0, pvt) {
3390 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3393 if (pvt->fam != 0xf)
3394 row_dct1 = !!csrow_enabled(i, 1, pvt);
3402 pvt->mc_node_id, i);
3405 nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
3410 if (pvt->fam != 0xf && row_dct1) {
3411 int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
3420 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3421 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3426 for (j = 0; j < pvt->max_mcs; j++) {
3428 dimm->mtype = pvt->dram_type;
3595 static bool dct_ecc_enabled(struct amd64_pvt *pvt)
3597 u16 nid = pvt->mc_node_id;
3602 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3619 static bool umc_ecc_enabled(struct amd64_pvt *pvt)
3622 u16 nid = pvt->mc_node_id;
3627 umc = &pvt->umc[i];
3654 umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3659 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3660 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3661 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3663 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3664 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3686 struct amd64_pvt *pvt = mci->pvt_info;
3691 if (pvt->nbcap & NBCAP_SECDED)
3694 if (pvt->nbcap & NBCAP_CHIPKILL)
3697 mci->edac_cap = dct_determine_edac_cap(pvt);
3699 mci->ctl_name = pvt->ctl_name;
3700 mci->dev_name = pci_name(pvt->F3);
3712 struct amd64_pvt *pvt = mci->pvt_info;
3717 umc_determine_edac_ctl_cap(mci, pvt);
3719 mci->edac_cap = umc_determine_edac_cap(pvt);
3721 mci->ctl_name = pvt->ctl_name;
3722 mci->dev_name = pci_name(pvt->F3);
3728 static int dct_hw_info_get(struct amd64_pvt *pvt)
3730 int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
3735 dct_prep_chip_selects(pvt);
3736 dct_read_base_mask(pvt);
3737 dct_read_mc_regs(pvt);
3738 dct_determine_memory_type(pvt);
3743 static int umc_hw_info_get(struct amd64_pvt *pvt)
3745 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3746 if (!pvt->umc)
3749 umc_prep_chip_selects(pvt);
3750 umc_read_base_mask(pvt);
3751 umc_read_mc_regs(pvt);
3752 umc_determine_memory_type(pvt);
3783 static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
3786 u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
3791 static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
3799 for_each_chip_select(cs, ctrl, pvt) {
3800 size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
3805 static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
3811 umc = &pvt->umc[i];
3818 gpu_debug_display_dimm_sizes(pvt, i);
3822 static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3827 nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3838 struct amd64_pvt *pvt = mci->pvt_info;
3843 for_each_chip_select(cs, umc, pvt) {
3844 if (!csrow_enabled(cs, umc, pvt))
3850 pvt->mc_node_id, cs);
3852 dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
3863 struct amd64_pvt *pvt = mci->pvt_info;
3870 mci->ctl_name = pvt->ctl_name;
3871 mci->dev_name = pci_name(pvt->F3);
3878 static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
3905 static void gpu_read_mc_regs(struct amd64_pvt *pvt)
3907 u8 nid = pvt->mc_node_id;
3914 umc = &pvt->umc[i];
3922 static void gpu_read_base_mask(struct amd64_pvt *pvt)
3929 for_each_chip_select(cs, umc, pvt) {
3931 base = &pvt->csels[umc].csbases[cs];
3933 if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
3939 mask = &pvt->csels[umc].csmasks[cs];
3941 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
3949 static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
3954 pvt->csels[umc].b_cnt = 8;
3955 pvt->csels[umc].m_cnt = 8;
3959 static int gpu_hw_info_get(struct amd64_pvt *pvt)
3967 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3968 if (!pvt->umc)
3971 gpu_prep_chip_selects(pvt);
3972 gpu_read_base_mask(pvt);
3973 gpu_read_mc_regs(pvt);
3978 static void hw_info_put(struct amd64_pvt *pvt)
3980 pci_dev_put(pvt->F1);
3981 pci_dev_put(pvt->F2);
3982 kfree(pvt->umc);
4011 static int per_family_init(struct amd64_pvt *pvt)
4013 pvt->ext_model = boot_cpu_data.x86_model >> 4;
4014 pvt->stepping = boot_cpu_data.x86_stepping;
4015 pvt->model = boot_cpu_data.x86_model;
4016 pvt->fam = boot_cpu_data.x86;
4017 pvt->max_mcs = 2;
4023 if (pvt->fam >= 0x17)
4024 pvt->ops = &umc_ops;
4026 pvt->ops = &dct_ops;
4028 switch (pvt->fam) {
4030 pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
4032 pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
4033 pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
4034 pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
4035 pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
4039 pvt->ctl_name = "F10h";
4040 pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
4041 pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
4042 pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
4046 switch (pvt->model) {
4048 pvt->ctl_name = "F15h_M30h";
4049 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
4050 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
4053 pvt->ctl_name = "F15h_M60h";
4054 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
4055 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
4056 pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
4062 pvt->ctl_name = "F15h";
4063 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
4064 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
4065 pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
4071 switch (pvt->model) {
4073 pvt->ctl_name = "F16h_M30h";
4074 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
4075 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
4078 pvt->ctl_name = "F16h";
4079 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
4080 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
4086 switch (pvt->model) {
4088 pvt->ctl_name = "F17h_M10h";
4091 pvt->ctl_name = "F17h_M30h";
4092 pvt->max_mcs = 8;
4095 pvt->ctl_name = "F17h_M60h";
4098 pvt->ctl_name = "F17h_M70h";
4101 pvt->ctl_name = "F17h";
4107 pvt->ctl_name = "F18h";
4111 switch (pvt->model) {
4113 pvt->ctl_name = "F19h";
4114 pvt->max_mcs = 8;
4117 pvt->ctl_name = "F19h_M10h";
4118 pvt->max_mcs = 12;
4119 pvt->flags.zn_regs_v2 = 1;
4122 pvt->ctl_name = "F19h_M20h";
4125 if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
4126 pvt->ctl_name = "MI200";
4127 pvt->max_mcs = 4;
4128 pvt->ops = &gpu_ops;
4130 pvt->ctl_name = "F19h_M30h";
4131 pvt->max_mcs = 8;
4135 pvt->ctl_name = "F19h_M50h";
4138 pvt->ctl_name = "F19h_M60h";
4139 pvt->flags.zn_regs_v2 = 1;
4142 pvt->ctl_name = "F19h_M70h";
4143 pvt->flags.zn_regs_v2 = 1;
4146 pvt->ctl_name = "F19h_MA0h";
4147 pvt->max_mcs = 12;
4148 pvt->flags.zn_regs_v2 = 1;
4154 switch (pvt->model) {
4156 pvt->ctl_name = "F1Ah";
4157 pvt->max_mcs = 12;
4158 pvt->flags.zn_regs_v2 = 1;
4161 pvt->ctl_name = "F1Ah_M40h";
4162 pvt->flags.zn_regs_v2 = 1;
4183 static int init_one_instance(struct amd64_pvt *pvt)
4194 layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
4195 pvt->max_mcs : pvt->csels[0].b_cnt;
4198 layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
4199 pvt->csels[0].b_cnt : pvt->max_mcs;
4202 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
4206 mci->pvt_info = pvt;
4207 mci->pdev = &pvt->F3->dev;
4209 pvt->ops->setup_mci_misc_attrs(mci);
4221 static bool instance_has_memory(struct amd64_pvt *pvt)
4226 for (dct = 0; dct < pvt->max_mcs; dct++) {
4227 for_each_chip_select(cs, dct, pvt)
4228 cs_enabled |= csrow_enabled(cs, dct, pvt);
4237 struct amd64_pvt *pvt = NULL;
4248 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
4249 if (!pvt)
4252 pvt->mc_node_id = nid;
4253 pvt->F3 = F3;
4255 ret = per_family_init(pvt);
4259 ret = pvt->ops->hw_info_get(pvt);
4264 if (!instance_has_memory(pvt)) {
4269 if (!pvt->ops->ecc_enabled(pvt)) {
4285 ret = init_one_instance(pvt);
4295 amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
4298 pvt->ops->dump_misc_regs(pvt);
4303 hw_info_put(pvt);
4304 kfree(pvt);
4319 struct amd64_pvt *pvt;
4326 pvt = mci->pvt_info;
4336 hw_info_put(pvt);
4337 kfree(pvt);