162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * PowerPC64 SLB support. 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 662306a36Sopenharmony_ci * Based on earlier code written by: 762306a36Sopenharmony_ci * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com 862306a36Sopenharmony_ci * Copyright (c) 2001 Dave Engebretsen 962306a36Sopenharmony_ci * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 1062306a36Sopenharmony_ci */ 1162306a36Sopenharmony_ci 1262306a36Sopenharmony_ci#include <asm/interrupt.h> 1362306a36Sopenharmony_ci#include <asm/mmu.h> 1462306a36Sopenharmony_ci#include <asm/mmu_context.h> 1562306a36Sopenharmony_ci#include <asm/paca.h> 1662306a36Sopenharmony_ci#include <asm/lppaca.h> 1762306a36Sopenharmony_ci#include <asm/ppc-opcode.h> 1862306a36Sopenharmony_ci#include <asm/cputable.h> 1962306a36Sopenharmony_ci#include <asm/cacheflush.h> 2062306a36Sopenharmony_ci#include <asm/smp.h> 2162306a36Sopenharmony_ci#include <linux/compiler.h> 2262306a36Sopenharmony_ci#include <linux/context_tracking.h> 2362306a36Sopenharmony_ci#include <linux/mm_types.h> 2462306a36Sopenharmony_ci#include <linux/pgtable.h> 2562306a36Sopenharmony_ci 2662306a36Sopenharmony_ci#include <asm/udbg.h> 2762306a36Sopenharmony_ci#include <asm/code-patching.h> 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci#include "internal.h" 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_cistatic long slb_allocate_user(struct mm_struct *mm, unsigned long ea); 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_cibool stress_slb_enabled __initdata; 3562306a36Sopenharmony_ci 3662306a36Sopenharmony_cistatic int __init parse_stress_slb(char *p) 3762306a36Sopenharmony_ci{ 3862306a36Sopenharmony_ci stress_slb_enabled = true; 3962306a36Sopenharmony_ci return 0; 4062306a36Sopenharmony_ci} 4162306a36Sopenharmony_ciearly_param("stress_slb", parse_stress_slb); 4262306a36Sopenharmony_ci 4362306a36Sopenharmony_ci__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key); 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_cistatic void assert_slb_presence(bool present, unsigned long ea) 4662306a36Sopenharmony_ci{ 4762306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_VM 4862306a36Sopenharmony_ci unsigned long tmp; 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci WARN_ON_ONCE(mfmsr() & MSR_EE); 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci if (!cpu_has_feature(CPU_FTR_ARCH_206)) 5362306a36Sopenharmony_ci return; 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci /* 5662306a36Sopenharmony_ci * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware 5762306a36Sopenharmony_ci * ignores all other bits from 0-27, so just clear them all. 5862306a36Sopenharmony_ci */ 5962306a36Sopenharmony_ci ea &= ~((1UL << SID_SHIFT) - 1); 6062306a36Sopenharmony_ci asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_ci WARN_ON(present == (tmp == 0)); 6362306a36Sopenharmony_ci#endif 6462306a36Sopenharmony_ci} 6562306a36Sopenharmony_ci 6662306a36Sopenharmony_cistatic inline void slb_shadow_update(unsigned long ea, int ssize, 6762306a36Sopenharmony_ci unsigned long flags, 6862306a36Sopenharmony_ci enum slb_index index) 6962306a36Sopenharmony_ci{ 7062306a36Sopenharmony_ci struct slb_shadow *p = get_slb_shadow(); 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci /* 7362306a36Sopenharmony_ci * Clear the ESID first so the entry is not valid while we are 7462306a36Sopenharmony_ci * updating it. No write barriers are needed here, provided 7562306a36Sopenharmony_ci * we only update the current CPU's SLB shadow buffer. 7662306a36Sopenharmony_ci */ 7762306a36Sopenharmony_ci WRITE_ONCE(p->save_area[index].esid, 0); 7862306a36Sopenharmony_ci WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); 7962306a36Sopenharmony_ci WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); 8062306a36Sopenharmony_ci} 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_cistatic inline void slb_shadow_clear(enum slb_index index) 8362306a36Sopenharmony_ci{ 8462306a36Sopenharmony_ci WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); 8562306a36Sopenharmony_ci} 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_cistatic inline void create_shadowed_slbe(unsigned long ea, int ssize, 8862306a36Sopenharmony_ci unsigned long flags, 8962306a36Sopenharmony_ci enum slb_index index) 9062306a36Sopenharmony_ci{ 9162306a36Sopenharmony_ci /* 9262306a36Sopenharmony_ci * Updating the shadow buffer before writing the SLB ensures 9362306a36Sopenharmony_ci * we don't get a stale entry here if we get preempted by PHYP 9462306a36Sopenharmony_ci * between these two statements. 9562306a36Sopenharmony_ci */ 9662306a36Sopenharmony_ci slb_shadow_update(ea, ssize, flags, index); 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci assert_slb_presence(false, ea); 9962306a36Sopenharmony_ci asm volatile("slbmte %0,%1" : 10062306a36Sopenharmony_ci : "r" (mk_vsid_data(ea, ssize, flags)), 10162306a36Sopenharmony_ci "r" (mk_esid_data(ea, ssize, index)) 10262306a36Sopenharmony_ci : "memory" ); 10362306a36Sopenharmony_ci} 10462306a36Sopenharmony_ci 10562306a36Sopenharmony_ci/* 10662306a36Sopenharmony_ci * Insert bolted entries into SLB (which may not be empty, so don't clear 10762306a36Sopenharmony_ci * slb_cache_ptr). 10862306a36Sopenharmony_ci */ 10962306a36Sopenharmony_civoid __slb_restore_bolted_realmode(void) 11062306a36Sopenharmony_ci{ 11162306a36Sopenharmony_ci struct slb_shadow *p = get_slb_shadow(); 11262306a36Sopenharmony_ci enum slb_index index; 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_ci /* No isync needed because realmode. */ 11562306a36Sopenharmony_ci for (index = 0; index < SLB_NUM_BOLTED; index++) { 11662306a36Sopenharmony_ci asm volatile("slbmte %0,%1" : 11762306a36Sopenharmony_ci : "r" (be64_to_cpu(p->save_area[index].vsid)), 11862306a36Sopenharmony_ci "r" (be64_to_cpu(p->save_area[index].esid))); 11962306a36Sopenharmony_ci } 12062306a36Sopenharmony_ci 12162306a36Sopenharmony_ci assert_slb_presence(true, local_paca->kstack); 12262306a36Sopenharmony_ci} 12362306a36Sopenharmony_ci 12462306a36Sopenharmony_ci/* 12562306a36Sopenharmony_ci * Insert the bolted entries into an empty SLB. 12662306a36Sopenharmony_ci */ 12762306a36Sopenharmony_civoid slb_restore_bolted_realmode(void) 12862306a36Sopenharmony_ci{ 12962306a36Sopenharmony_ci __slb_restore_bolted_realmode(); 13062306a36Sopenharmony_ci get_paca()->slb_cache_ptr = 0; 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_ci get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; 13362306a36Sopenharmony_ci get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; 13462306a36Sopenharmony_ci} 13562306a36Sopenharmony_ci 13662306a36Sopenharmony_ci/* 13762306a36Sopenharmony_ci * This flushes all SLB entries including 0, so it must be realmode. 13862306a36Sopenharmony_ci */ 13962306a36Sopenharmony_civoid slb_flush_all_realmode(void) 14062306a36Sopenharmony_ci{ 14162306a36Sopenharmony_ci asm volatile("slbmte %0,%0; slbia" : : "r" (0)); 14262306a36Sopenharmony_ci} 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_cistatic __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside) 14562306a36Sopenharmony_ci{ 14662306a36Sopenharmony_ci struct slb_shadow *p = get_slb_shadow(); 14762306a36Sopenharmony_ci unsigned long ksp_esid_data, ksp_vsid_data; 14862306a36Sopenharmony_ci u32 ih; 14962306a36Sopenharmony_ci 15062306a36Sopenharmony_ci /* 15162306a36Sopenharmony_ci * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside 15262306a36Sopenharmony_ci * information created with Class=0 entries, which we use for kernel 15362306a36Sopenharmony_ci * SLB entries (the SLB entries themselves are still invalidated). 15462306a36Sopenharmony_ci * 15562306a36Sopenharmony_ci * Older processors will ignore this optimisation. Over-invalidation 15662306a36Sopenharmony_ci * is fine because we never rely on lookaside information existing. 15762306a36Sopenharmony_ci */ 15862306a36Sopenharmony_ci if (preserve_kernel_lookaside) 15962306a36Sopenharmony_ci ih = 1; 16062306a36Sopenharmony_ci else 16162306a36Sopenharmony_ci ih = 0; 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ci ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid); 16462306a36Sopenharmony_ci ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); 16562306a36Sopenharmony_ci 16662306a36Sopenharmony_ci asm volatile(PPC_SLBIA(%0)" \n" 16762306a36Sopenharmony_ci "slbmte %1, %2 \n" 16862306a36Sopenharmony_ci :: "i" (ih), 16962306a36Sopenharmony_ci "r" (ksp_vsid_data), 17062306a36Sopenharmony_ci "r" (ksp_esid_data) 17162306a36Sopenharmony_ci : "memory"); 17262306a36Sopenharmony_ci} 17362306a36Sopenharmony_ci 17462306a36Sopenharmony_ci/* 17562306a36Sopenharmony_ci * This flushes non-bolted entries, it can be run in virtual mode. Must 17662306a36Sopenharmony_ci * be called with interrupts disabled. 17762306a36Sopenharmony_ci */ 17862306a36Sopenharmony_civoid slb_flush_and_restore_bolted(void) 17962306a36Sopenharmony_ci{ 18062306a36Sopenharmony_ci BUILD_BUG_ON(SLB_NUM_BOLTED != 2); 18162306a36Sopenharmony_ci 18262306a36Sopenharmony_ci WARN_ON(!irqs_disabled()); 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci /* 18562306a36Sopenharmony_ci * We can't take a PMU exception in the following code, so hard 18662306a36Sopenharmony_ci * disable interrupts. 18762306a36Sopenharmony_ci */ 18862306a36Sopenharmony_ci hard_irq_disable(); 18962306a36Sopenharmony_ci 19062306a36Sopenharmony_ci isync(); 19162306a36Sopenharmony_ci __slb_flush_and_restore_bolted(false); 19262306a36Sopenharmony_ci isync(); 19362306a36Sopenharmony_ci 19462306a36Sopenharmony_ci assert_slb_presence(true, get_paca()->kstack); 19562306a36Sopenharmony_ci 19662306a36Sopenharmony_ci get_paca()->slb_cache_ptr = 0; 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; 19962306a36Sopenharmony_ci get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; 20062306a36Sopenharmony_ci} 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_civoid slb_save_contents(struct slb_entry *slb_ptr) 20362306a36Sopenharmony_ci{ 20462306a36Sopenharmony_ci int i; 20562306a36Sopenharmony_ci unsigned long e, v; 20662306a36Sopenharmony_ci 20762306a36Sopenharmony_ci /* Save slb_cache_ptr value. */ 20862306a36Sopenharmony_ci get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; 20962306a36Sopenharmony_ci 21062306a36Sopenharmony_ci if (!slb_ptr) 21162306a36Sopenharmony_ci return; 21262306a36Sopenharmony_ci 21362306a36Sopenharmony_ci for (i = 0; i < mmu_slb_size; i++) { 21462306a36Sopenharmony_ci asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); 21562306a36Sopenharmony_ci asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); 21662306a36Sopenharmony_ci slb_ptr->esid = e; 21762306a36Sopenharmony_ci slb_ptr->vsid = v; 21862306a36Sopenharmony_ci slb_ptr++; 21962306a36Sopenharmony_ci } 22062306a36Sopenharmony_ci} 22162306a36Sopenharmony_ci 22262306a36Sopenharmony_civoid slb_dump_contents(struct slb_entry *slb_ptr) 22362306a36Sopenharmony_ci{ 22462306a36Sopenharmony_ci int i, n; 22562306a36Sopenharmony_ci unsigned long e, v; 22662306a36Sopenharmony_ci unsigned long llp; 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci if (!slb_ptr) 22962306a36Sopenharmony_ci return; 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); 23262306a36Sopenharmony_ci 23362306a36Sopenharmony_ci for (i = 0; i < mmu_slb_size; i++) { 23462306a36Sopenharmony_ci e = slb_ptr->esid; 23562306a36Sopenharmony_ci v = slb_ptr->vsid; 23662306a36Sopenharmony_ci slb_ptr++; 23762306a36Sopenharmony_ci 23862306a36Sopenharmony_ci if (!e && !v) 23962306a36Sopenharmony_ci continue; 24062306a36Sopenharmony_ci 24162306a36Sopenharmony_ci pr_err("%02d %016lx %016lx %s\n", i, e, v, 24262306a36Sopenharmony_ci (e & SLB_ESID_V) ? "VALID" : "NOT VALID"); 24362306a36Sopenharmony_ci 24462306a36Sopenharmony_ci if (!(e & SLB_ESID_V)) 24562306a36Sopenharmony_ci continue; 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci llp = v & SLB_VSID_LLP; 24862306a36Sopenharmony_ci if (v & SLB_VSID_B_1T) { 24962306a36Sopenharmony_ci pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", 25062306a36Sopenharmony_ci GET_ESID_1T(e), 25162306a36Sopenharmony_ci (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); 25262306a36Sopenharmony_ci } else { 25362306a36Sopenharmony_ci pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", 25462306a36Sopenharmony_ci GET_ESID(e), 25562306a36Sopenharmony_ci (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); 25662306a36Sopenharmony_ci } 25762306a36Sopenharmony_ci } 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { 26062306a36Sopenharmony_ci /* RR is not so useful as it's often not used for allocation */ 26162306a36Sopenharmony_ci pr_err("SLB RR allocator index %d\n", get_paca()->stab_rr); 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci /* Dump slb cache entires as well. */ 26462306a36Sopenharmony_ci pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); 26562306a36Sopenharmony_ci pr_err("Valid SLB cache entries:\n"); 26662306a36Sopenharmony_ci n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); 26762306a36Sopenharmony_ci for (i = 0; i < n; i++) 26862306a36Sopenharmony_ci pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); 26962306a36Sopenharmony_ci pr_err("Rest of SLB cache entries:\n"); 27062306a36Sopenharmony_ci for (i = n; i < SLB_CACHE_ENTRIES; i++) 27162306a36Sopenharmony_ci pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); 27262306a36Sopenharmony_ci } 27362306a36Sopenharmony_ci} 27462306a36Sopenharmony_ci 27562306a36Sopenharmony_civoid slb_vmalloc_update(void) 27662306a36Sopenharmony_ci{ 27762306a36Sopenharmony_ci /* 27862306a36Sopenharmony_ci * vmalloc is not bolted, so just have to flush non-bolted. 27962306a36Sopenharmony_ci */ 28062306a36Sopenharmony_ci slb_flush_and_restore_bolted(); 28162306a36Sopenharmony_ci} 28262306a36Sopenharmony_ci 28362306a36Sopenharmony_cistatic bool preload_hit(struct thread_info *ti, unsigned long esid) 28462306a36Sopenharmony_ci{ 28562306a36Sopenharmony_ci unsigned char i; 28662306a36Sopenharmony_ci 28762306a36Sopenharmony_ci for (i = 0; i < ti->slb_preload_nr; i++) { 28862306a36Sopenharmony_ci unsigned char idx; 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; 29162306a36Sopenharmony_ci if (esid == ti->slb_preload_esid[idx]) 29262306a36Sopenharmony_ci return true; 29362306a36Sopenharmony_ci } 29462306a36Sopenharmony_ci return false; 29562306a36Sopenharmony_ci} 29662306a36Sopenharmony_ci 29762306a36Sopenharmony_cistatic bool preload_add(struct thread_info *ti, unsigned long ea) 29862306a36Sopenharmony_ci{ 29962306a36Sopenharmony_ci unsigned char idx; 30062306a36Sopenharmony_ci unsigned long esid; 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_ci if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { 30362306a36Sopenharmony_ci /* EAs are stored >> 28 so 256MB segments don't need clearing */ 30462306a36Sopenharmony_ci if (ea & ESID_MASK_1T) 30562306a36Sopenharmony_ci ea &= ESID_MASK_1T; 30662306a36Sopenharmony_ci } 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ci esid = ea >> SID_SHIFT; 30962306a36Sopenharmony_ci 31062306a36Sopenharmony_ci if (preload_hit(ti, esid)) 31162306a36Sopenharmony_ci return false; 31262306a36Sopenharmony_ci 31362306a36Sopenharmony_ci idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; 31462306a36Sopenharmony_ci ti->slb_preload_esid[idx] = esid; 31562306a36Sopenharmony_ci if (ti->slb_preload_nr == SLB_PRELOAD_NR) 31662306a36Sopenharmony_ci ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; 31762306a36Sopenharmony_ci else 31862306a36Sopenharmony_ci ti->slb_preload_nr++; 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci return true; 32162306a36Sopenharmony_ci} 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_cistatic void preload_age(struct thread_info *ti) 32462306a36Sopenharmony_ci{ 32562306a36Sopenharmony_ci if (!ti->slb_preload_nr) 32662306a36Sopenharmony_ci return; 32762306a36Sopenharmony_ci ti->slb_preload_nr--; 32862306a36Sopenharmony_ci ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; 32962306a36Sopenharmony_ci} 33062306a36Sopenharmony_ci 33162306a36Sopenharmony_civoid slb_setup_new_exec(void) 33262306a36Sopenharmony_ci{ 33362306a36Sopenharmony_ci struct thread_info *ti = current_thread_info(); 33462306a36Sopenharmony_ci struct mm_struct *mm = current->mm; 33562306a36Sopenharmony_ci unsigned long exec = 0x10000000; 33662306a36Sopenharmony_ci 33762306a36Sopenharmony_ci WARN_ON(irqs_disabled()); 33862306a36Sopenharmony_ci 33962306a36Sopenharmony_ci /* 34062306a36Sopenharmony_ci * preload cache can only be used to determine whether a SLB 34162306a36Sopenharmony_ci * entry exists if it does not start to overflow. 34262306a36Sopenharmony_ci */ 34362306a36Sopenharmony_ci if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) 34462306a36Sopenharmony_ci return; 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci hard_irq_disable(); 34762306a36Sopenharmony_ci 34862306a36Sopenharmony_ci /* 34962306a36Sopenharmony_ci * We have no good place to clear the slb preload cache on exec, 35062306a36Sopenharmony_ci * flush_thread is about the earliest arch hook but that happens 35162306a36Sopenharmony_ci * after we switch to the mm and have already preloaded the SLBEs. 35262306a36Sopenharmony_ci * 35362306a36Sopenharmony_ci * For the most part that's probably okay to use entries from the 35462306a36Sopenharmony_ci * previous exec, they will age out if unused. It may turn out to 35562306a36Sopenharmony_ci * be an advantage to clear the cache before switching to it, 35662306a36Sopenharmony_ci * however. 35762306a36Sopenharmony_ci */ 35862306a36Sopenharmony_ci 35962306a36Sopenharmony_ci /* 36062306a36Sopenharmony_ci * preload some userspace segments into the SLB. 36162306a36Sopenharmony_ci * Almost all 32 and 64bit PowerPC executables are linked at 36262306a36Sopenharmony_ci * 0x10000000 so it makes sense to preload this segment. 36362306a36Sopenharmony_ci */ 36462306a36Sopenharmony_ci if (!is_kernel_addr(exec)) { 36562306a36Sopenharmony_ci if (preload_add(ti, exec)) 36662306a36Sopenharmony_ci slb_allocate_user(mm, exec); 36762306a36Sopenharmony_ci } 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_ci /* Libraries and mmaps. */ 37062306a36Sopenharmony_ci if (!is_kernel_addr(mm->mmap_base)) { 37162306a36Sopenharmony_ci if (preload_add(ti, mm->mmap_base)) 37262306a36Sopenharmony_ci slb_allocate_user(mm, mm->mmap_base); 37362306a36Sopenharmony_ci } 37462306a36Sopenharmony_ci 37562306a36Sopenharmony_ci /* see switch_slb */ 37662306a36Sopenharmony_ci asm volatile("isync" : : : "memory"); 37762306a36Sopenharmony_ci 37862306a36Sopenharmony_ci local_irq_enable(); 37962306a36Sopenharmony_ci} 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_civoid preload_new_slb_context(unsigned long start, unsigned long sp) 38262306a36Sopenharmony_ci{ 38362306a36Sopenharmony_ci struct thread_info *ti = current_thread_info(); 38462306a36Sopenharmony_ci struct mm_struct *mm = current->mm; 38562306a36Sopenharmony_ci unsigned long heap = mm->start_brk; 38662306a36Sopenharmony_ci 38762306a36Sopenharmony_ci WARN_ON(irqs_disabled()); 38862306a36Sopenharmony_ci 38962306a36Sopenharmony_ci /* see above */ 39062306a36Sopenharmony_ci if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) 39162306a36Sopenharmony_ci return; 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci hard_irq_disable(); 39462306a36Sopenharmony_ci 39562306a36Sopenharmony_ci /* Userspace entry address. */ 39662306a36Sopenharmony_ci if (!is_kernel_addr(start)) { 39762306a36Sopenharmony_ci if (preload_add(ti, start)) 39862306a36Sopenharmony_ci slb_allocate_user(mm, start); 39962306a36Sopenharmony_ci } 40062306a36Sopenharmony_ci 40162306a36Sopenharmony_ci /* Top of stack, grows down. */ 40262306a36Sopenharmony_ci if (!is_kernel_addr(sp)) { 40362306a36Sopenharmony_ci if (preload_add(ti, sp)) 40462306a36Sopenharmony_ci slb_allocate_user(mm, sp); 40562306a36Sopenharmony_ci } 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_ci /* Bottom of heap, grows up. */ 40862306a36Sopenharmony_ci if (heap && !is_kernel_addr(heap)) { 40962306a36Sopenharmony_ci if (preload_add(ti, heap)) 41062306a36Sopenharmony_ci slb_allocate_user(mm, heap); 41162306a36Sopenharmony_ci } 41262306a36Sopenharmony_ci 41362306a36Sopenharmony_ci /* see switch_slb */ 41462306a36Sopenharmony_ci asm volatile("isync" : : : "memory"); 41562306a36Sopenharmony_ci 41662306a36Sopenharmony_ci local_irq_enable(); 41762306a36Sopenharmony_ci} 41862306a36Sopenharmony_ci 41962306a36Sopenharmony_cistatic void slb_cache_slbie_kernel(unsigned int index) 42062306a36Sopenharmony_ci{ 42162306a36Sopenharmony_ci unsigned long slbie_data = get_paca()->slb_cache[index]; 42262306a36Sopenharmony_ci unsigned long ksp = get_paca()->kstack; 42362306a36Sopenharmony_ci 42462306a36Sopenharmony_ci slbie_data <<= SID_SHIFT; 42562306a36Sopenharmony_ci slbie_data |= 0xc000000000000000ULL; 42662306a36Sopenharmony_ci if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data) 42762306a36Sopenharmony_ci return; 42862306a36Sopenharmony_ci slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT; 42962306a36Sopenharmony_ci 43062306a36Sopenharmony_ci asm volatile("slbie %0" : : "r" (slbie_data)); 43162306a36Sopenharmony_ci} 43262306a36Sopenharmony_ci 43362306a36Sopenharmony_cistatic void slb_cache_slbie_user(unsigned int index) 43462306a36Sopenharmony_ci{ 43562306a36Sopenharmony_ci unsigned long slbie_data = get_paca()->slb_cache[index]; 43662306a36Sopenharmony_ci 43762306a36Sopenharmony_ci slbie_data <<= SID_SHIFT; 43862306a36Sopenharmony_ci slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT; 43962306a36Sopenharmony_ci slbie_data |= SLBIE_C; /* user slbs have C=1 */ 44062306a36Sopenharmony_ci 44162306a36Sopenharmony_ci asm volatile("slbie %0" : : "r" (slbie_data)); 44262306a36Sopenharmony_ci} 44362306a36Sopenharmony_ci 44462306a36Sopenharmony_ci/* Flush all user entries from the segment table of the current processor. */ 44562306a36Sopenharmony_civoid switch_slb(struct task_struct *tsk, struct mm_struct *mm) 44662306a36Sopenharmony_ci{ 44762306a36Sopenharmony_ci struct thread_info *ti = task_thread_info(tsk); 44862306a36Sopenharmony_ci unsigned char i; 44962306a36Sopenharmony_ci 45062306a36Sopenharmony_ci /* 45162306a36Sopenharmony_ci * We need interrupts hard-disabled here, not just soft-disabled, 45262306a36Sopenharmony_ci * so that a PMU interrupt can't occur, which might try to access 45362306a36Sopenharmony_ci * user memory (to get a stack trace) and possible cause an SLB miss 45462306a36Sopenharmony_ci * which would update the slb_cache/slb_cache_ptr fields in the PACA. 45562306a36Sopenharmony_ci */ 45662306a36Sopenharmony_ci hard_irq_disable(); 45762306a36Sopenharmony_ci isync(); 45862306a36Sopenharmony_ci if (stress_slb()) { 45962306a36Sopenharmony_ci __slb_flush_and_restore_bolted(false); 46062306a36Sopenharmony_ci isync(); 46162306a36Sopenharmony_ci get_paca()->slb_cache_ptr = 0; 46262306a36Sopenharmony_ci get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; 46362306a36Sopenharmony_ci 46462306a36Sopenharmony_ci } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { 46562306a36Sopenharmony_ci /* 46662306a36Sopenharmony_ci * SLBIA IH=3 invalidates all Class=1 SLBEs and their 46762306a36Sopenharmony_ci * associated lookaside structures, which matches what 46862306a36Sopenharmony_ci * switch_slb wants. So ARCH_300 does not use the slb 46962306a36Sopenharmony_ci * cache. 47062306a36Sopenharmony_ci */ 47162306a36Sopenharmony_ci asm volatile(PPC_SLBIA(3)); 47262306a36Sopenharmony_ci 47362306a36Sopenharmony_ci } else { 47462306a36Sopenharmony_ci unsigned long offset = get_paca()->slb_cache_ptr; 47562306a36Sopenharmony_ci 47662306a36Sopenharmony_ci if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && 47762306a36Sopenharmony_ci offset <= SLB_CACHE_ENTRIES) { 47862306a36Sopenharmony_ci /* 47962306a36Sopenharmony_ci * Could assert_slb_presence(true) here, but 48062306a36Sopenharmony_ci * hypervisor or machine check could have come 48162306a36Sopenharmony_ci * in and removed the entry at this point. 48262306a36Sopenharmony_ci */ 48362306a36Sopenharmony_ci 48462306a36Sopenharmony_ci for (i = 0; i < offset; i++) 48562306a36Sopenharmony_ci slb_cache_slbie_user(i); 48662306a36Sopenharmony_ci 48762306a36Sopenharmony_ci /* Workaround POWER5 < DD2.1 issue */ 48862306a36Sopenharmony_ci if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) 48962306a36Sopenharmony_ci slb_cache_slbie_user(0); 49062306a36Sopenharmony_ci 49162306a36Sopenharmony_ci } else { 49262306a36Sopenharmony_ci /* Flush but retain kernel lookaside information */ 49362306a36Sopenharmony_ci __slb_flush_and_restore_bolted(true); 49462306a36Sopenharmony_ci isync(); 49562306a36Sopenharmony_ci 49662306a36Sopenharmony_ci get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; 49762306a36Sopenharmony_ci } 49862306a36Sopenharmony_ci 49962306a36Sopenharmony_ci get_paca()->slb_cache_ptr = 0; 50062306a36Sopenharmony_ci } 50162306a36Sopenharmony_ci get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; 50262306a36Sopenharmony_ci 50362306a36Sopenharmony_ci copy_mm_to_paca(mm); 50462306a36Sopenharmony_ci 50562306a36Sopenharmony_ci /* 50662306a36Sopenharmony_ci * We gradually age out SLBs after a number of context switches to 50762306a36Sopenharmony_ci * reduce reload overhead of unused entries (like we do with FP/VEC 50862306a36Sopenharmony_ci * reload). Each time we wrap 256 switches, take an entry out of the 50962306a36Sopenharmony_ci * SLB preload cache. 51062306a36Sopenharmony_ci */ 51162306a36Sopenharmony_ci tsk->thread.load_slb++; 51262306a36Sopenharmony_ci if (!tsk->thread.load_slb) { 51362306a36Sopenharmony_ci unsigned long pc = KSTK_EIP(tsk); 51462306a36Sopenharmony_ci 51562306a36Sopenharmony_ci preload_age(ti); 51662306a36Sopenharmony_ci preload_add(ti, pc); 51762306a36Sopenharmony_ci } 51862306a36Sopenharmony_ci 51962306a36Sopenharmony_ci for (i = 0; i < ti->slb_preload_nr; i++) { 52062306a36Sopenharmony_ci unsigned char idx; 52162306a36Sopenharmony_ci unsigned long ea; 52262306a36Sopenharmony_ci 52362306a36Sopenharmony_ci idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; 52462306a36Sopenharmony_ci ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; 52562306a36Sopenharmony_ci 52662306a36Sopenharmony_ci slb_allocate_user(mm, ea); 52762306a36Sopenharmony_ci } 52862306a36Sopenharmony_ci 52962306a36Sopenharmony_ci /* 53062306a36Sopenharmony_ci * Synchronize slbmte preloads with possible subsequent user memory 53162306a36Sopenharmony_ci * address accesses by the kernel (user mode won't happen until 53262306a36Sopenharmony_ci * rfid, which is safe). 53362306a36Sopenharmony_ci */ 53462306a36Sopenharmony_ci isync(); 53562306a36Sopenharmony_ci} 53662306a36Sopenharmony_ci 53762306a36Sopenharmony_civoid slb_set_size(u16 size) 53862306a36Sopenharmony_ci{ 53962306a36Sopenharmony_ci mmu_slb_size = size; 54062306a36Sopenharmony_ci} 54162306a36Sopenharmony_ci 54262306a36Sopenharmony_civoid slb_initialize(void) 54362306a36Sopenharmony_ci{ 54462306a36Sopenharmony_ci unsigned long linear_llp, vmalloc_llp, io_llp; 54562306a36Sopenharmony_ci unsigned long lflags; 54662306a36Sopenharmony_ci static int slb_encoding_inited; 54762306a36Sopenharmony_ci#ifdef CONFIG_SPARSEMEM_VMEMMAP 54862306a36Sopenharmony_ci unsigned long vmemmap_llp; 54962306a36Sopenharmony_ci#endif 55062306a36Sopenharmony_ci 55162306a36Sopenharmony_ci /* Prepare our SLB miss handler based on our page size */ 55262306a36Sopenharmony_ci linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 55362306a36Sopenharmony_ci io_llp = mmu_psize_defs[mmu_io_psize].sllp; 55462306a36Sopenharmony_ci vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 55562306a36Sopenharmony_ci get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 55662306a36Sopenharmony_ci#ifdef CONFIG_SPARSEMEM_VMEMMAP 55762306a36Sopenharmony_ci vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; 55862306a36Sopenharmony_ci#endif 55962306a36Sopenharmony_ci if (!slb_encoding_inited) { 56062306a36Sopenharmony_ci slb_encoding_inited = 1; 56162306a36Sopenharmony_ci pr_devel("SLB: linear LLP = %04lx\n", linear_llp); 56262306a36Sopenharmony_ci pr_devel("SLB: io LLP = %04lx\n", io_llp); 56362306a36Sopenharmony_ci#ifdef CONFIG_SPARSEMEM_VMEMMAP 56462306a36Sopenharmony_ci pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); 56562306a36Sopenharmony_ci#endif 56662306a36Sopenharmony_ci } 56762306a36Sopenharmony_ci 56862306a36Sopenharmony_ci get_paca()->stab_rr = SLB_NUM_BOLTED - 1; 56962306a36Sopenharmony_ci get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; 57062306a36Sopenharmony_ci get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; 57162306a36Sopenharmony_ci 57262306a36Sopenharmony_ci lflags = SLB_VSID_KERNEL | linear_llp; 57362306a36Sopenharmony_ci 57462306a36Sopenharmony_ci /* Invalidate the entire SLB (even entry 0) & all the ERATS */ 57562306a36Sopenharmony_ci asm volatile("isync":::"memory"); 57662306a36Sopenharmony_ci asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 57762306a36Sopenharmony_ci asm volatile("isync; slbia; isync":::"memory"); 57862306a36Sopenharmony_ci create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); 57962306a36Sopenharmony_ci 58062306a36Sopenharmony_ci /* 58162306a36Sopenharmony_ci * For the boot cpu, we're running on the stack in init_thread_union, 58262306a36Sopenharmony_ci * which is in the first segment of the linear mapping, and also 58362306a36Sopenharmony_ci * get_paca()->kstack hasn't been initialized yet. 58462306a36Sopenharmony_ci * For secondary cpus, we need to bolt the kernel stack entry now. 58562306a36Sopenharmony_ci */ 58662306a36Sopenharmony_ci slb_shadow_clear(KSTACK_INDEX); 58762306a36Sopenharmony_ci if (raw_smp_processor_id() != boot_cpuid && 58862306a36Sopenharmony_ci (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) 58962306a36Sopenharmony_ci create_shadowed_slbe(get_paca()->kstack, 59062306a36Sopenharmony_ci mmu_kernel_ssize, lflags, KSTACK_INDEX); 59162306a36Sopenharmony_ci 59262306a36Sopenharmony_ci asm volatile("isync":::"memory"); 59362306a36Sopenharmony_ci} 59462306a36Sopenharmony_ci 59562306a36Sopenharmony_cistatic void slb_cache_update(unsigned long esid_data) 59662306a36Sopenharmony_ci{ 59762306a36Sopenharmony_ci int slb_cache_index; 59862306a36Sopenharmony_ci 59962306a36Sopenharmony_ci if (cpu_has_feature(CPU_FTR_ARCH_300)) 60062306a36Sopenharmony_ci return; /* ISAv3.0B and later does not use slb_cache */ 60162306a36Sopenharmony_ci 60262306a36Sopenharmony_ci if (stress_slb()) 60362306a36Sopenharmony_ci return; 60462306a36Sopenharmony_ci 60562306a36Sopenharmony_ci /* 60662306a36Sopenharmony_ci * Now update slb cache entries 60762306a36Sopenharmony_ci */ 60862306a36Sopenharmony_ci slb_cache_index = local_paca->slb_cache_ptr; 60962306a36Sopenharmony_ci if (slb_cache_index < SLB_CACHE_ENTRIES) { 61062306a36Sopenharmony_ci /* 61162306a36Sopenharmony_ci * We have space in slb cache for optimized switch_slb(). 61262306a36Sopenharmony_ci * Top 36 bits from esid_data as per ISA 61362306a36Sopenharmony_ci */ 61462306a36Sopenharmony_ci local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; 61562306a36Sopenharmony_ci local_paca->slb_cache_ptr++; 61662306a36Sopenharmony_ci } else { 61762306a36Sopenharmony_ci /* 61862306a36Sopenharmony_ci * Our cache is full and the current cache content strictly 61962306a36Sopenharmony_ci * doesn't indicate the active SLB contents. Bump the ptr 62062306a36Sopenharmony_ci * so that switch_slb() will ignore the cache. 62162306a36Sopenharmony_ci */ 62262306a36Sopenharmony_ci local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; 62362306a36Sopenharmony_ci } 62462306a36Sopenharmony_ci} 62562306a36Sopenharmony_ci 62662306a36Sopenharmony_cistatic enum slb_index alloc_slb_index(bool kernel) 62762306a36Sopenharmony_ci{ 62862306a36Sopenharmony_ci enum slb_index index; 62962306a36Sopenharmony_ci 63062306a36Sopenharmony_ci /* 63162306a36Sopenharmony_ci * The allocation bitmaps can become out of synch with the SLB 63262306a36Sopenharmony_ci * when the _switch code does slbie when bolting a new stack 63362306a36Sopenharmony_ci * segment and it must not be anywhere else in the SLB. This leaves 63462306a36Sopenharmony_ci * a kernel allocated entry that is unused in the SLB. With very 63562306a36Sopenharmony_ci * large systems or small segment sizes, the bitmaps could slowly 63662306a36Sopenharmony_ci * fill with these entries. They will eventually be cleared out 63762306a36Sopenharmony_ci * by the round robin allocator in that case, so it's probably not 63862306a36Sopenharmony_ci * worth accounting for. 63962306a36Sopenharmony_ci */ 64062306a36Sopenharmony_ci 64162306a36Sopenharmony_ci /* 64262306a36Sopenharmony_ci * SLBs beyond 32 entries are allocated with stab_rr only 64362306a36Sopenharmony_ci * POWER7/8/9 have 32 SLB entries, this could be expanded if a 64462306a36Sopenharmony_ci * future CPU has more. 64562306a36Sopenharmony_ci */ 64662306a36Sopenharmony_ci if (local_paca->slb_used_bitmap != U32_MAX) { 64762306a36Sopenharmony_ci index = ffz(local_paca->slb_used_bitmap); 64862306a36Sopenharmony_ci local_paca->slb_used_bitmap |= 1U << index; 64962306a36Sopenharmony_ci if (kernel) 65062306a36Sopenharmony_ci local_paca->slb_kern_bitmap |= 1U << index; 65162306a36Sopenharmony_ci } else { 65262306a36Sopenharmony_ci /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ 65362306a36Sopenharmony_ci index = local_paca->stab_rr; 65462306a36Sopenharmony_ci if (index < (mmu_slb_size - 1)) 65562306a36Sopenharmony_ci index++; 65662306a36Sopenharmony_ci else 65762306a36Sopenharmony_ci index = SLB_NUM_BOLTED; 65862306a36Sopenharmony_ci local_paca->stab_rr = index; 65962306a36Sopenharmony_ci if (index < 32) { 66062306a36Sopenharmony_ci if (kernel) 66162306a36Sopenharmony_ci local_paca->slb_kern_bitmap |= 1U << index; 66262306a36Sopenharmony_ci else 66362306a36Sopenharmony_ci local_paca->slb_kern_bitmap &= ~(1U << index); 66462306a36Sopenharmony_ci } 66562306a36Sopenharmony_ci } 66662306a36Sopenharmony_ci BUG_ON(index < SLB_NUM_BOLTED); 66762306a36Sopenharmony_ci 66862306a36Sopenharmony_ci return index; 66962306a36Sopenharmony_ci} 67062306a36Sopenharmony_ci 67162306a36Sopenharmony_cistatic long slb_insert_entry(unsigned long ea, unsigned long context, 67262306a36Sopenharmony_ci unsigned long flags, int ssize, bool kernel) 67362306a36Sopenharmony_ci{ 67462306a36Sopenharmony_ci unsigned long vsid; 67562306a36Sopenharmony_ci unsigned long vsid_data, esid_data; 67662306a36Sopenharmony_ci enum slb_index index; 67762306a36Sopenharmony_ci 67862306a36Sopenharmony_ci vsid = get_vsid(context, ea, ssize); 67962306a36Sopenharmony_ci if (!vsid) 68062306a36Sopenharmony_ci return -EFAULT; 68162306a36Sopenharmony_ci 68262306a36Sopenharmony_ci /* 68362306a36Sopenharmony_ci * There must not be a kernel SLB fault in alloc_slb_index or before 68462306a36Sopenharmony_ci * slbmte here or the allocation bitmaps could get out of whack with 68562306a36Sopenharmony_ci * the SLB. 68662306a36Sopenharmony_ci * 68762306a36Sopenharmony_ci * User SLB faults or preloads take this path which might get inlined 68862306a36Sopenharmony_ci * into the caller, so add compiler barriers here to ensure unsafe 68962306a36Sopenharmony_ci * memory accesses do not come between. 69062306a36Sopenharmony_ci */ 69162306a36Sopenharmony_ci barrier(); 69262306a36Sopenharmony_ci 69362306a36Sopenharmony_ci index = alloc_slb_index(kernel); 69462306a36Sopenharmony_ci 69562306a36Sopenharmony_ci vsid_data = __mk_vsid_data(vsid, ssize, flags); 69662306a36Sopenharmony_ci esid_data = mk_esid_data(ea, ssize, index); 69762306a36Sopenharmony_ci 69862306a36Sopenharmony_ci /* 69962306a36Sopenharmony_ci * No need for an isync before or after this slbmte. The exception 70062306a36Sopenharmony_ci * we enter with and the rfid we exit with are context synchronizing. 70162306a36Sopenharmony_ci * User preloads should add isync afterwards in case the kernel 70262306a36Sopenharmony_ci * accesses user memory before it returns to userspace with rfid. 70362306a36Sopenharmony_ci */ 70462306a36Sopenharmony_ci assert_slb_presence(false, ea); 70562306a36Sopenharmony_ci if (stress_slb()) { 70662306a36Sopenharmony_ci int slb_cache_index = local_paca->slb_cache_ptr; 70762306a36Sopenharmony_ci 70862306a36Sopenharmony_ci /* 70962306a36Sopenharmony_ci * stress_slb() does not use slb cache, repurpose as a 71062306a36Sopenharmony_ci * cache of inserted (non-bolted) kernel SLB entries. All 71162306a36Sopenharmony_ci * non-bolted kernel entries are flushed on any user fault, 71262306a36Sopenharmony_ci * or if there are already 3 non-boled kernel entries. 71362306a36Sopenharmony_ci */ 71462306a36Sopenharmony_ci BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3); 71562306a36Sopenharmony_ci if (!kernel || slb_cache_index == 3) { 71662306a36Sopenharmony_ci int i; 71762306a36Sopenharmony_ci 71862306a36Sopenharmony_ci for (i = 0; i < slb_cache_index; i++) 71962306a36Sopenharmony_ci slb_cache_slbie_kernel(i); 72062306a36Sopenharmony_ci slb_cache_index = 0; 72162306a36Sopenharmony_ci } 72262306a36Sopenharmony_ci 72362306a36Sopenharmony_ci if (kernel) 72462306a36Sopenharmony_ci local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; 72562306a36Sopenharmony_ci local_paca->slb_cache_ptr = slb_cache_index; 72662306a36Sopenharmony_ci } 72762306a36Sopenharmony_ci asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); 72862306a36Sopenharmony_ci 72962306a36Sopenharmony_ci barrier(); 73062306a36Sopenharmony_ci 73162306a36Sopenharmony_ci if (!kernel) 73262306a36Sopenharmony_ci slb_cache_update(esid_data); 73362306a36Sopenharmony_ci 73462306a36Sopenharmony_ci return 0; 73562306a36Sopenharmony_ci} 73662306a36Sopenharmony_ci 73762306a36Sopenharmony_cistatic long slb_allocate_kernel(unsigned long ea, unsigned long id) 73862306a36Sopenharmony_ci{ 73962306a36Sopenharmony_ci unsigned long context; 74062306a36Sopenharmony_ci unsigned long flags; 74162306a36Sopenharmony_ci int ssize; 74262306a36Sopenharmony_ci 74362306a36Sopenharmony_ci if (id == LINEAR_MAP_REGION_ID) { 74462306a36Sopenharmony_ci 74562306a36Sopenharmony_ci /* We only support upto H_MAX_PHYSMEM_BITS */ 74662306a36Sopenharmony_ci if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) 74762306a36Sopenharmony_ci return -EFAULT; 74862306a36Sopenharmony_ci 74962306a36Sopenharmony_ci flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; 75062306a36Sopenharmony_ci 75162306a36Sopenharmony_ci#ifdef CONFIG_SPARSEMEM_VMEMMAP 75262306a36Sopenharmony_ci } else if (id == VMEMMAP_REGION_ID) { 75362306a36Sopenharmony_ci 75462306a36Sopenharmony_ci if (ea >= H_VMEMMAP_END) 75562306a36Sopenharmony_ci return -EFAULT; 75662306a36Sopenharmony_ci 75762306a36Sopenharmony_ci flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp; 75862306a36Sopenharmony_ci#endif 75962306a36Sopenharmony_ci } else if (id == VMALLOC_REGION_ID) { 76062306a36Sopenharmony_ci 76162306a36Sopenharmony_ci if (ea >= H_VMALLOC_END) 76262306a36Sopenharmony_ci return -EFAULT; 76362306a36Sopenharmony_ci 76462306a36Sopenharmony_ci flags = local_paca->vmalloc_sllp; 76562306a36Sopenharmony_ci 76662306a36Sopenharmony_ci } else if (id == IO_REGION_ID) { 76762306a36Sopenharmony_ci 76862306a36Sopenharmony_ci if (ea >= H_KERN_IO_END) 76962306a36Sopenharmony_ci return -EFAULT; 77062306a36Sopenharmony_ci 77162306a36Sopenharmony_ci flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; 77262306a36Sopenharmony_ci 77362306a36Sopenharmony_ci } else { 77462306a36Sopenharmony_ci return -EFAULT; 77562306a36Sopenharmony_ci } 77662306a36Sopenharmony_ci 77762306a36Sopenharmony_ci ssize = MMU_SEGSIZE_1T; 77862306a36Sopenharmony_ci if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) 77962306a36Sopenharmony_ci ssize = MMU_SEGSIZE_256M; 78062306a36Sopenharmony_ci 78162306a36Sopenharmony_ci context = get_kernel_context(ea); 78262306a36Sopenharmony_ci 78362306a36Sopenharmony_ci return slb_insert_entry(ea, context, flags, ssize, true); 78462306a36Sopenharmony_ci} 78562306a36Sopenharmony_ci 78662306a36Sopenharmony_cistatic long slb_allocate_user(struct mm_struct *mm, unsigned long ea) 78762306a36Sopenharmony_ci{ 78862306a36Sopenharmony_ci unsigned long context; 78962306a36Sopenharmony_ci unsigned long flags; 79062306a36Sopenharmony_ci int bpsize; 79162306a36Sopenharmony_ci int ssize; 79262306a36Sopenharmony_ci 79362306a36Sopenharmony_ci /* 79462306a36Sopenharmony_ci * consider this as bad access if we take a SLB miss 79562306a36Sopenharmony_ci * on an address above addr limit. 79662306a36Sopenharmony_ci */ 79762306a36Sopenharmony_ci if (ea >= mm_ctx_slb_addr_limit(&mm->context)) 79862306a36Sopenharmony_ci return -EFAULT; 79962306a36Sopenharmony_ci 80062306a36Sopenharmony_ci context = get_user_context(&mm->context, ea); 80162306a36Sopenharmony_ci if (!context) 80262306a36Sopenharmony_ci return -EFAULT; 80362306a36Sopenharmony_ci 80462306a36Sopenharmony_ci if (unlikely(ea >= H_PGTABLE_RANGE)) { 80562306a36Sopenharmony_ci WARN_ON(1); 80662306a36Sopenharmony_ci return -EFAULT; 80762306a36Sopenharmony_ci } 80862306a36Sopenharmony_ci 80962306a36Sopenharmony_ci ssize = user_segment_size(ea); 81062306a36Sopenharmony_ci 81162306a36Sopenharmony_ci bpsize = get_slice_psize(mm, ea); 81262306a36Sopenharmony_ci flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; 81362306a36Sopenharmony_ci 81462306a36Sopenharmony_ci return slb_insert_entry(ea, context, flags, ssize, false); 81562306a36Sopenharmony_ci} 81662306a36Sopenharmony_ci 81762306a36Sopenharmony_ciDEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault) 81862306a36Sopenharmony_ci{ 81962306a36Sopenharmony_ci unsigned long ea = regs->dar; 82062306a36Sopenharmony_ci unsigned long id = get_region_id(ea); 82162306a36Sopenharmony_ci 82262306a36Sopenharmony_ci /* IRQs are not reconciled here, so can't check irqs_disabled */ 82362306a36Sopenharmony_ci VM_WARN_ON(mfmsr() & MSR_EE); 82462306a36Sopenharmony_ci 82562306a36Sopenharmony_ci if (regs_is_unrecoverable(regs)) 82662306a36Sopenharmony_ci return -EINVAL; 82762306a36Sopenharmony_ci 82862306a36Sopenharmony_ci /* 82962306a36Sopenharmony_ci * SLB kernel faults must be very careful not to touch anything that is 83062306a36Sopenharmony_ci * not bolted. E.g., PACA and global variables are okay, mm->context 83162306a36Sopenharmony_ci * stuff is not. SLB user faults may access all of memory (and induce 83262306a36Sopenharmony_ci * one recursive SLB kernel fault), so the kernel fault must not 83362306a36Sopenharmony_ci * trample on the user fault state at those points. 83462306a36Sopenharmony_ci */ 83562306a36Sopenharmony_ci 83662306a36Sopenharmony_ci /* 83762306a36Sopenharmony_ci * This is a raw interrupt handler, for performance, so that 83862306a36Sopenharmony_ci * fast_interrupt_return can be used. The handler must not touch local 83962306a36Sopenharmony_ci * irq state, or schedule. We could test for usermode and upgrade to a 84062306a36Sopenharmony_ci * normal process context (synchronous) interrupt for those, which 84162306a36Sopenharmony_ci * would make them first-class kernel code and able to be traced and 84262306a36Sopenharmony_ci * instrumented, although performance would suffer a bit, it would 84362306a36Sopenharmony_ci * probably be a good tradeoff. 84462306a36Sopenharmony_ci */ 84562306a36Sopenharmony_ci if (id >= LINEAR_MAP_REGION_ID) { 84662306a36Sopenharmony_ci long err; 84762306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_VM 84862306a36Sopenharmony_ci /* Catch recursive kernel SLB faults. */ 84962306a36Sopenharmony_ci BUG_ON(local_paca->in_kernel_slb_handler); 85062306a36Sopenharmony_ci local_paca->in_kernel_slb_handler = 1; 85162306a36Sopenharmony_ci#endif 85262306a36Sopenharmony_ci err = slb_allocate_kernel(ea, id); 85362306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_VM 85462306a36Sopenharmony_ci local_paca->in_kernel_slb_handler = 0; 85562306a36Sopenharmony_ci#endif 85662306a36Sopenharmony_ci return err; 85762306a36Sopenharmony_ci } else { 85862306a36Sopenharmony_ci struct mm_struct *mm = current->mm; 85962306a36Sopenharmony_ci long err; 86062306a36Sopenharmony_ci 86162306a36Sopenharmony_ci if (unlikely(!mm)) 86262306a36Sopenharmony_ci return -EFAULT; 86362306a36Sopenharmony_ci 86462306a36Sopenharmony_ci err = slb_allocate_user(mm, ea); 86562306a36Sopenharmony_ci if (!err) 86662306a36Sopenharmony_ci preload_add(current_thread_info(), ea); 86762306a36Sopenharmony_ci 86862306a36Sopenharmony_ci return err; 86962306a36Sopenharmony_ci } 87062306a36Sopenharmony_ci} 871