1// SPDX-License-Identifier: GPL-2.0 2#include <linux/errno.h> 3#include <linux/init.h> 4#include <asm/sclp.h> 5#include <asm/sections.h> 6#include <asm/mem_detect.h> 7#include <asm/sparsemem.h> 8#include "compressed/decompressor.h" 9#include "boot.h" 10 11unsigned long __bootdata(max_physmem_end); 12struct mem_detect_info __bootdata(mem_detect); 13 14/* up to 256 storage elements, 1020 subincrements each */ 15#define ENTRIES_EXTENDED_MAX \ 16 (256 * (1020 / 2) * sizeof(struct mem_detect_block)) 17 18/* 19 * To avoid corrupting old kernel memory during dump, find lowest memory 20 * chunk possible either right after the kernel end (decompressed kernel) or 21 * after initrd (if it is present and there is no hole between the kernel end 22 * and initrd) 23 */ 24static void *mem_detect_alloc_extended(void) 25{ 26 unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); 27 28 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && 29 INITRD_START < offset + ENTRIES_EXTENDED_MAX) 30 offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); 31 32 return (void *)offset; 33} 34 35static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n) 36{ 37 if (n < MEM_INLINED_ENTRIES) 38 return &mem_detect.entries[n]; 39 if (unlikely(!mem_detect.entries_extended)) 40 mem_detect.entries_extended = mem_detect_alloc_extended(); 41 return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES]; 42} 43 44/* 45 * sequential calls to add_mem_detect_block with adjacent memory areas 46 * are merged together into single memory block. 47 */ 48void add_mem_detect_block(u64 start, u64 end) 49{ 50 struct mem_detect_block *block; 51 52 if (mem_detect.count) { 53 block = __get_mem_detect_block_ptr(mem_detect.count - 1); 54 if (block->end == start) { 55 block->end = end; 56 return; 57 } 58 } 59 60 block = __get_mem_detect_block_ptr(mem_detect.count); 61 block->start = start; 62 block->end = end; 63 mem_detect.count++; 64} 65 66static int __diag260(unsigned long rx1, unsigned long rx2) 67{ 68 register unsigned long _rx1 asm("2") = rx1; 69 register unsigned long _rx2 asm("3") = rx2; 70 register unsigned long _ry asm("4") = 0x10; /* storage configuration */ 71 int rc = -1; /* fail */ 72 unsigned long reg1, reg2; 73 psw_t old; 74 75 asm volatile( 76 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 77 " epsw %0,%1\n" 78 " st %0,0(%[psw_pgm])\n" 79 " st %1,4(%[psw_pgm])\n" 80 " larl %0,1f\n" 81 " stg %0,8(%[psw_pgm])\n" 82 " diag %[rx],%[ry],0x260\n" 83 " ipm %[rc]\n" 84 " srl %[rc],28\n" 85 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 86 : "=&d" (reg1), "=&a" (reg2), 87 "+Q" (S390_lowcore.program_new_psw), 88 "=Q" (old), 89 [rc] "+&d" (rc), [ry] "+d" (_ry) 90 : [rx] "d" (_rx1), "d" (_rx2), 91 [psw_old] "a" (&old), 92 [psw_pgm] "a" (&S390_lowcore.program_new_psw) 93 : "cc", "memory"); 94 return rc == 0 ? _ry : -1; 95} 96 97static int diag260(void) 98{ 99 int rc, i; 100 101 struct { 102 unsigned long start; 103 unsigned long end; 104 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */ 105 106 memset(storage_extents, 0, sizeof(storage_extents)); 107 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents)); 108 if (rc == -1) 109 return -1; 110 111 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++) 112 add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1); 113 return 0; 114} 115 116static int tprot(unsigned long addr) 117{ 118 unsigned long reg1, reg2; 119 int rc = -EFAULT; 120 psw_t old; 121 122 asm volatile( 123 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 124 " epsw %[reg1],%[reg2]\n" 125 " st %[reg1],0(%[psw_pgm])\n" 126 " st %[reg2],4(%[psw_pgm])\n" 127 " larl %[reg1],1f\n" 128 " stg %[reg1],8(%[psw_pgm])\n" 129 " tprot 0(%[addr]),0\n" 130 " ipm %[rc]\n" 131 " srl %[rc],28\n" 132 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 133 : [reg1] "=&d" (reg1), 134 [reg2] "=&a" (reg2), 135 [rc] "+&d" (rc), 136 "=Q" (S390_lowcore.program_new_psw.addr), 137 "=Q" (old) 138 : [psw_old] "a" (&old), 139 [psw_pgm] "a" (&S390_lowcore.program_new_psw), 140 [addr] "a" (addr) 141 : "cc", "memory"); 142 return rc; 143} 144 145static void search_mem_end(void) 146{ 147 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */ 148 unsigned long offset = 0; 149 unsigned long pivot; 150 151 while (range > 1) { 152 range >>= 1; 153 pivot = offset + range; 154 if (!tprot(pivot << 20)) 155 offset = pivot; 156 } 157 158 add_mem_detect_block(0, (offset + 1) << 20); 159} 160 161void detect_memory(void) 162{ 163 sclp_early_get_memsize(&max_physmem_end); 164 165 if (!sclp_early_read_storage_info()) { 166 mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO; 167 return; 168 } 169 170 if (!diag260()) { 171 mem_detect.info_source = MEM_DETECT_DIAG260; 172 return; 173 } 174 175 if (max_physmem_end) { 176 add_mem_detect_block(0, max_physmem_end); 177 mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO; 178 return; 179 } 180 181 search_mem_end(); 182 mem_detect.info_source = MEM_DETECT_BIN_SEARCH; 183 max_physmem_end = get_mem_detect_end(); 184} 185