Lines Matching defs:sys_addr
293 * returns true if the SysAddr given by sys_addr matches the
296 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
306 addr = sys_addr & 0x000000ffffffffffull;
319 u64 sys_addr)
340 if (base_limit_match(pvt, sys_addr, node_id))
353 bits = (((u32) sys_addr) >> 12) & intlv_en;
363 /* sanity test for sys_addr */
364 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
365 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
367 __func__, sys_addr, node_id);
375 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
376 (unsigned long)sys_addr);
560 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
561 * assumed that sys_addr maps to the node given by mci.
588 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
599 if ((sys_addr >= (1ULL << 32)) &&
600 (sys_addr < ((1ULL << 32) + hole_size))) {
602 dram_addr = sys_addr - hole_offset;
605 (unsigned long)sys_addr,
614 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
616 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
621 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
624 (unsigned long)sys_addr, (unsigned long)dram_addr);
668 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
669 * assumed that @sys_addr maps to the node given by mci.
671 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
676 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
679 (unsigned long)sys_addr, (unsigned long)input_addr);
693 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
697 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
700 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
704 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
708 "address 0x%lx\n", (unsigned long)sys_addr);
1288 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1293 error_address_to_page_and_offset(sys_addr, err);
1299 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1302 (unsigned long)sys_addr);
1307 /* Now map the sys_addr to a CSROW */
1308 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1338 err->channel = ((sys_addr & BIT(3)) != 0);
1688 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1699 select = (sys_addr >> 8) & 0x3;
1705 channel = (sys_addr >> 8) & 0x3;
1708 channel = (sys_addr >> 9) & 0x3;
1719 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1738 return sys_addr >> 6 & 1;
1742 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1744 return ((sys_addr >> shift) & 1) ^ temp;
1750 return (sys_addr >> shift) & 1;
1753 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1762 /* Convert the sys_addr to the normalized DCT address */
1764 u64 sys_addr, bool hi_rng,
1778 * sys_addr > 4Gb
1780 * remove hole offset from sys_addr
1782 * remove high range offset from sys_addr
1787 (sys_addr >= BIT_64(32)))
1795 * sys_addr > 4Gb
1801 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1807 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1888 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1895 return sys_addr;
1901 return sys_addr;
1906 tmp_addr = sys_addr >> 27;
1908 if (!(sys_addr >> 34) &&
1912 return sys_addr ^ (u64)swap_base << 27;
1914 return sys_addr;
1917 /* For a given @dram_range, check if @sys_addr falls within it. */
1919 u64 sys_addr, int *chan_sel)
1932 range, sys_addr, get_dram_limit(pvt, range));
1935 dhar_base(pvt) <= sys_addr &&
1936 sys_addr < BIT_64(32)) {
1938 sys_addr);
1942 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1945 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1955 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1958 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1960 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1999 u64 sys_addr, int *chan_sel)
2020 range, sys_addr, get_dram_limit(pvt, range));
2022 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2023 !(get_dram_limit(pvt, range) >= sys_addr))
2027 dhar_base(pvt) <= sys_addr &&
2028 sys_addr < BIT_64(32)) {
2030 sys_addr);
2034 /* Verify sys_addr is within DCT Range. */
2039 !(dct_base <= (sys_addr >> 27) &&
2040 dct_limit >= (sys_addr >> 27)))
2050 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2052 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2062 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2067 chan_addr = sys_addr - chan_offset;
2121 u64 sys_addr,
2133 sys_addr,
2136 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2137 (get_dram_limit(pvt, range) >= sys_addr)) {
2139 sys_addr, chan_sel);
2149 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2151 * The @sys_addr is usually an error address received from the hardware
2154 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2159 error_address_to_page_and_offset(sys_addr, err);
2161 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2594 u64 sys_addr;
2613 sys_addr = get_error_address(pvt, m);
2618 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2643 u64 sys_addr;
2674 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2679 error_address_to_page_and_offset(sys_addr, &err);