1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * arch/arm/include/asm/memory.h 4 * 5 * Copyright (C) 2000-2002 Russell King 6 * modification for nommu, Hyok S. Choi, 2004 7 * 8 * Note: this file should not be included by non-asm/.h files 9 */ 10#ifndef __ASM_ARM_MEMORY_H 11#define __ASM_ARM_MEMORY_H 12 13#include <linux/compiler.h> 14#include <linux/const.h> 15#include <linux/types.h> 16#include <linux/sizes.h> 17 18#ifdef CONFIG_NEED_MACH_MEMORY_H 19#include <mach/memory.h> 20#endif 21#include <asm/kasan_def.h> 22 23/* PAGE_OFFSET - the virtual address of the start of the kernel image */ 24#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 25 26#ifdef CONFIG_MMU 27 28/* 29 * TASK_SIZE - the maximum size of a user space task. 30 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 31 */ 32#ifndef CONFIG_KASAN 33#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) 34#else 35#define TASK_SIZE (KASAN_SHADOW_START) 36#endif 37#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 38 39/* 40 * The maximum size of a 26-bit user space task. 41 */ 42#define TASK_SIZE_26 (UL(1) << 26) 43 44/* 45 * The module space lives between the addresses given by TASK_SIZE 46 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 47 */ 48#ifndef CONFIG_THUMB2_KERNEL 49#define MODULES_VADDR (PAGE_OFFSET - SZ_16M) 50#else 51/* smaller range for Thumb-2 symbols relocation (2^24)*/ 52#define MODULES_VADDR (PAGE_OFFSET - SZ_8M) 53#endif 54 55#if TASK_SIZE > MODULES_VADDR 56#error Top of user space clashes with start of module space 57#endif 58 59/* 60 * The highmem pkmap virtual space shares the end of the module area. 61 */ 62#ifdef CONFIG_HIGHMEM 63#define MODULES_END (PAGE_OFFSET - PMD_SIZE) 64#else 65#define MODULES_END (PAGE_OFFSET) 66#endif 67 68/* 69 * The XIP kernel gets mapped at the bottom of the module vm area. 70 * Since we use sections to map it, this macro replaces the physical address 71 * with its virtual address while keeping offset from the base section. 72 */ 73#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) 74 75#define FDT_FIXED_BASE UL(0xff800000) 76#define FDT_FIXED_SIZE (2 * SECTION_SIZE) 77#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE)) 78 79#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 80/* 81 * Allow 16MB-aligned ioremap pages 82 */ 83#define IOREMAP_MAX_ORDER 24 84#endif 85 86#define VECTORS_BASE UL(0xffff0000) 87 88#else /* CONFIG_MMU */ 89 90#ifndef __ASSEMBLY__ 91extern unsigned long setup_vectors_base(void); 92extern unsigned long vectors_base; 93#define VECTORS_BASE vectors_base 94#endif 95 96/* 97 * The limitation of user task size can grow up to the end of free ram region. 98 * It is difficult to define and perhaps will never meet the original meaning 99 * of this define that was meant to. 100 * Fortunately, there is no reference for this in noMMU mode, for now. 101 */ 102#define TASK_SIZE UL(0xffffffff) 103 104#ifndef TASK_UNMAPPED_BASE 105#define TASK_UNMAPPED_BASE UL(0x00000000) 106#endif 107 108#ifndef END_MEM 109#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 110#endif 111 112/* 113 * The module can be at any place in ram in nommu mode. 114 */ 115#define MODULES_END (END_MEM) 116#define MODULES_VADDR PAGE_OFFSET 117 118#define XIP_VIRT_ADDR(physaddr) (physaddr) 119#define FDT_VIRT_BASE(physbase) ((void *)(physbase)) 120 121#endif /* !CONFIG_MMU */ 122 123#ifdef CONFIG_XIP_KERNEL 124#define KERNEL_START _sdata 125#else 126#define KERNEL_START _stext 127#endif 128#define KERNEL_END _end 129 130/* 131 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these 132 * locations 133 */ 134#ifdef CONFIG_HAVE_TCM 135#define ITCM_OFFSET UL(0xfffe0000) 136#define DTCM_OFFSET UL(0xfffe8000) 137#endif 138 139/* 140 * Convert a page to/from a physical address 141 */ 142#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 143#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 144 145/* 146 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 147 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't 148 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use 149 * PLAT_PHYS_OFFSET and not PHYS_OFFSET. 150 */ 151#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 152 153#ifdef CONFIG_XIP_KERNEL 154/* 155 * When referencing data in RAM from the XIP region in a relative manner 156 * with the MMU off, we need the relative offset between the two physical 157 * addresses. The macro below achieves this, which is: 158 * __pa(v_data) - __xip_pa(v_text) 159 */ 160#define PHYS_RELATIVE(v_data, v_text) \ 161 (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \ 162 ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \ 163 CONFIG_XIP_PHYS_ADDR)) 164#else 165#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text)) 166#endif 167 168#ifndef __ASSEMBLY__ 169 170#ifdef CONFIG_RANDOMIZE_BASE 171extern unsigned long __kaslr_offset; 172 173static inline unsigned long kaslr_offset(void) 174{ 175 return __kaslr_offset; 176} 177#else 178static inline unsigned long kaslr_offset(void) 179{ 180 return 0; 181} 182#endif 183 184/* 185 * Physical vs virtual RAM address space conversion. These are 186 * private definitions which should NOT be used outside memory.h 187 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 188 * 189 * PFNs are used to describe any physical page; this means 190 * PFN 0 == physical address 0. 191 */ 192 193#if defined(CONFIG_ARM_PATCH_PHYS_VIRT) 194 195/* 196 * Constants used to force the right instruction encodings and shifts 197 * so that all we need to do is modify the 8-bit constant field. 198 */ 199#define __PV_BITS_31_24 0x81000000 200#define __PV_BITS_23_16 0x810000 201#define __PV_BITS_7_0 0x81 202 203extern unsigned long __pv_phys_pfn_offset; 204extern u64 __pv_offset; 205extern void fixup_pv_table(const void *, unsigned long); 206extern const void *__pv_table_begin, *__pv_table_end; 207 208#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) 209#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) 210 211#ifndef CONFIG_THUMB2_KERNEL 212#define __pv_stub(from,to,instr) \ 213 __asm__("@ __pv_stub\n" \ 214 "1: " instr " %0, %1, %2\n" \ 215 "2: " instr " %0, %0, %3\n" \ 216 " .pushsection .pv_table,\"a\"\n" \ 217 " .long 1b - ., 2b - .\n" \ 218 " .popsection\n" \ 219 : "=r" (to) \ 220 : "r" (from), "I" (__PV_BITS_31_24), \ 221 "I"(__PV_BITS_23_16)) 222 223#define __pv_add_carry_stub(x, y) \ 224 __asm__("@ __pv_add_carry_stub\n" \ 225 "0: movw %R0, #0\n" \ 226 " adds %Q0, %1, %R0, lsl #20\n" \ 227 "1: mov %R0, %2\n" \ 228 " adc %R0, %R0, #0\n" \ 229 " .pushsection .pv_table,\"a\"\n" \ 230 " .long 0b - ., 1b - .\n" \ 231 " .popsection\n" \ 232 : "=&r" (y) \ 233 : "r" (x), "I" (__PV_BITS_7_0) \ 234 : "cc") 235 236#else 237#define __pv_stub(from,to,instr) \ 238 __asm__("@ __pv_stub\n" \ 239 "0: movw %0, #0\n" \ 240 " lsl %0, #21\n" \ 241 " " instr " %0, %1, %0\n" \ 242 " .pushsection .pv_table,\"a\"\n" \ 243 " .long 0b - .\n" \ 244 " .popsection\n" \ 245 : "=&r" (to) \ 246 : "r" (from)) 247 248#define __pv_add_carry_stub(x, y) \ 249 __asm__("@ __pv_add_carry_stub\n" \ 250 "0: movw %R0, #0\n" \ 251 " lsls %R0, #21\n" \ 252 " adds %Q0, %1, %R0\n" \ 253 "1: mvn %R0, #0\n" \ 254 " adc %R0, %R0, #0\n" \ 255 " .pushsection .pv_table,\"a\"\n" \ 256 " .long 0b - ., 1b - .\n" \ 257 " .popsection\n" \ 258 : "=&r" (y) \ 259 : "r" (x) \ 260 : "cc") 261#endif 262 263static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) 264{ 265 phys_addr_t t; 266 267 if (sizeof(phys_addr_t) == 4) { 268 __pv_stub(x, t, "add"); 269 } else { 270 __pv_add_carry_stub(x, t); 271 } 272 return t; 273} 274 275static inline unsigned long __phys_to_virt(phys_addr_t x) 276{ 277 unsigned long t; 278 279 /* 280 * 'unsigned long' cast discard upper word when 281 * phys_addr_t is 64 bit, and makes sure that inline 282 * assembler expression receives 32 bit argument 283 * in place where 'r' 32 bit operand is expected. 284 */ 285 __pv_stub((unsigned long) x, t, "sub"); 286 return t; 287} 288 289#else 290 291#define PHYS_OFFSET PLAT_PHYS_OFFSET 292#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) 293 294static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) 295{ 296 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; 297} 298 299static inline unsigned long __phys_to_virt(phys_addr_t x) 300{ 301 return x - PHYS_OFFSET + PAGE_OFFSET; 302} 303 304#endif 305 306#define virt_to_pfn(kaddr) \ 307 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ 308 PHYS_PFN_OFFSET) 309 310#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x)) 311 312#ifdef CONFIG_DEBUG_VIRTUAL 313extern phys_addr_t __virt_to_phys(unsigned long x); 314extern phys_addr_t __phys_addr_symbol(unsigned long x); 315#else 316#define __virt_to_phys(x) __virt_to_phys_nodebug(x) 317#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 318#endif 319 320/* 321 * These are *only* valid on the kernel direct mapped RAM memory. 322 * Note: Drivers should NOT use these. They are the wrong 323 * translation for translating DMA addresses. Use the driver 324 * DMA support - see dma-mapping.h. 325 */ 326#define virt_to_phys virt_to_phys 327static inline phys_addr_t virt_to_phys(const volatile void *x) 328{ 329 return __virt_to_phys((unsigned long)(x)); 330} 331 332#define phys_to_virt phys_to_virt 333static inline void *phys_to_virt(phys_addr_t x) 334{ 335 return (void *)__phys_to_virt(x); 336} 337 338/* 339 * Drivers should NOT use these either. 340 */ 341#define __pa(x) __virt_to_phys((unsigned long)(x)) 342#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 343#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 344#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) 345 346extern long long arch_phys_to_idmap_offset; 347 348/* 349 * These are for systems that have a hardware interconnect supported alias 350 * of physical memory for idmap purposes. Most cases should leave these 351 * untouched. Note: this can only return addresses less than 4GiB. 352 */ 353static inline bool arm_has_idmap_alias(void) 354{ 355 return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0; 356} 357 358#define IDMAP_INVALID_ADDR ((u32)~0) 359 360static inline unsigned long phys_to_idmap(phys_addr_t addr) 361{ 362 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) { 363 addr += arch_phys_to_idmap_offset; 364 if (addr > (u32)~0) 365 addr = IDMAP_INVALID_ADDR; 366 } 367 return addr; 368} 369 370static inline phys_addr_t idmap_to_phys(unsigned long idmap) 371{ 372 phys_addr_t addr = idmap; 373 374 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) 375 addr -= arch_phys_to_idmap_offset; 376 377 return addr; 378} 379 380static inline unsigned long __virt_to_idmap(unsigned long x) 381{ 382 return phys_to_idmap(__virt_to_phys(x)); 383} 384 385#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) 386 387/* 388 * Virtual <-> DMA view memory address translations 389 * Again, these are *only* valid on the kernel direct mapped RAM 390 * memory. Use of these is *deprecated* (and that doesn't mean 391 * use the __ prefixed forms instead.) See dma-mapping.h. 392 */ 393#ifndef __virt_to_bus 394#define __virt_to_bus __virt_to_phys 395#define __bus_to_virt __phys_to_virt 396#define __pfn_to_bus(x) __pfn_to_phys(x) 397#define __bus_to_pfn(x) __phys_to_pfn(x) 398#endif 399 400/* 401 * Conversion between a struct page and a physical address. 402 * 403 * page_to_pfn(page) convert a struct page * to a PFN number 404 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * 405 * 406 * virt_to_page(k) convert a _valid_ virtual address to struct page * 407 * virt_addr_valid(k) indicates whether a virtual address is valid 408 */ 409#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 410 411#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 412#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ 413 && pfn_valid(virt_to_pfn(kaddr))) 414 415#endif 416 417#include <asm-generic/memory_model.h> 418 419#endif 420