162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * This file contains the routines for flushing entries from the 462306a36Sopenharmony_ci * TLB and MMU hash table. 562306a36Sopenharmony_ci * 662306a36Sopenharmony_ci * Derived from arch/ppc64/mm/init.c: 762306a36Sopenharmony_ci * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 862306a36Sopenharmony_ci * 962306a36Sopenharmony_ci * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 1062306a36Sopenharmony_ci * and Cort Dougan (PReP) (cort@cs.nmt.edu) 1162306a36Sopenharmony_ci * Copyright (C) 1996 Paul Mackerras 1262306a36Sopenharmony_ci * 1362306a36Sopenharmony_ci * Derived from "arch/i386/mm/init.c" 1462306a36Sopenharmony_ci * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 1562306a36Sopenharmony_ci * 1662306a36Sopenharmony_ci * Dave Engebretsen <engebret@us.ibm.com> 1762306a36Sopenharmony_ci * Rework for PPC64 port. 1862306a36Sopenharmony_ci */ 1962306a36Sopenharmony_ci 2062306a36Sopenharmony_ci#include <linux/kernel.h> 2162306a36Sopenharmony_ci#include <linux/mm.h> 2262306a36Sopenharmony_ci#include <linux/percpu.h> 2362306a36Sopenharmony_ci#include <linux/hardirq.h> 2462306a36Sopenharmony_ci#include <asm/tlbflush.h> 2562306a36Sopenharmony_ci#include <asm/tlb.h> 2662306a36Sopenharmony_ci#include <asm/bug.h> 2762306a36Sopenharmony_ci#include <asm/pte-walk.h> 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci 3062306a36Sopenharmony_ci#include <trace/events/thp.h> 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_ciDEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci/* 3562306a36Sopenharmony_ci * A linux PTE was changed and the corresponding hash table entry 3662306a36Sopenharmony_ci * neesd to be flushed. This function will either perform the flush 3762306a36Sopenharmony_ci * immediately or will batch it up if the current CPU has an active 3862306a36Sopenharmony_ci * batch on it. 3962306a36Sopenharmony_ci */ 4062306a36Sopenharmony_civoid hpte_need_flush(struct mm_struct *mm, unsigned long addr, 4162306a36Sopenharmony_ci pte_t *ptep, unsigned long pte, int huge) 4262306a36Sopenharmony_ci{ 4362306a36Sopenharmony_ci unsigned long vpn; 4462306a36Sopenharmony_ci struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 4562306a36Sopenharmony_ci unsigned long vsid; 4662306a36Sopenharmony_ci unsigned int psize; 4762306a36Sopenharmony_ci int ssize; 4862306a36Sopenharmony_ci real_pte_t rpte; 4962306a36Sopenharmony_ci int i, offset; 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_ci i = batch->index; 5262306a36Sopenharmony_ci 5362306a36Sopenharmony_ci /* 5462306a36Sopenharmony_ci * Get page size (maybe move back to caller). 5562306a36Sopenharmony_ci * 5662306a36Sopenharmony_ci * NOTE: when using special 64K mappings in 4K environment like 5762306a36Sopenharmony_ci * for SPEs, we obtain the page size from the slice, which thus 5862306a36Sopenharmony_ci * must still exist (and thus the VMA not reused) at the time 5962306a36Sopenharmony_ci * of this call 6062306a36Sopenharmony_ci */ 6162306a36Sopenharmony_ci if (huge) { 6262306a36Sopenharmony_ci#ifdef CONFIG_HUGETLB_PAGE 6362306a36Sopenharmony_ci psize = get_slice_psize(mm, addr); 6462306a36Sopenharmony_ci /* Mask the address for the correct page size */ 6562306a36Sopenharmony_ci addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); 6662306a36Sopenharmony_ci if (unlikely(psize == MMU_PAGE_16G)) 6762306a36Sopenharmony_ci offset = PTRS_PER_PUD; 6862306a36Sopenharmony_ci else 6962306a36Sopenharmony_ci offset = PTRS_PER_PMD; 7062306a36Sopenharmony_ci#else 7162306a36Sopenharmony_ci BUG(); 7262306a36Sopenharmony_ci psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 7362306a36Sopenharmony_ci#endif 7462306a36Sopenharmony_ci } else { 7562306a36Sopenharmony_ci psize = pte_pagesize_index(mm, addr, pte); 7662306a36Sopenharmony_ci /* 7762306a36Sopenharmony_ci * Mask the address for the standard page size. If we 7862306a36Sopenharmony_ci * have a 64k page kernel, but the hardware does not 7962306a36Sopenharmony_ci * support 64k pages, this might be different from the 8062306a36Sopenharmony_ci * hardware page size encoded in the slice table. 8162306a36Sopenharmony_ci */ 8262306a36Sopenharmony_ci addr &= PAGE_MASK; 8362306a36Sopenharmony_ci offset = PTRS_PER_PTE; 8462306a36Sopenharmony_ci } 8562306a36Sopenharmony_ci 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_ci /* Build full vaddr */ 8862306a36Sopenharmony_ci if (!is_kernel_addr(addr)) { 8962306a36Sopenharmony_ci ssize = user_segment_size(addr); 9062306a36Sopenharmony_ci vsid = get_user_vsid(&mm->context, addr, ssize); 9162306a36Sopenharmony_ci } else { 9262306a36Sopenharmony_ci vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 9362306a36Sopenharmony_ci ssize = mmu_kernel_ssize; 9462306a36Sopenharmony_ci } 9562306a36Sopenharmony_ci WARN_ON(vsid == 0); 9662306a36Sopenharmony_ci vpn = hpt_vpn(addr, vsid, ssize); 9762306a36Sopenharmony_ci rpte = __real_pte(__pte(pte), ptep, offset); 9862306a36Sopenharmony_ci 9962306a36Sopenharmony_ci /* 10062306a36Sopenharmony_ci * Check if we have an active batch on this CPU. If not, just 10162306a36Sopenharmony_ci * flush now and return. 10262306a36Sopenharmony_ci */ 10362306a36Sopenharmony_ci if (!batch->active) { 10462306a36Sopenharmony_ci flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm)); 10562306a36Sopenharmony_ci put_cpu_var(ppc64_tlb_batch); 10662306a36Sopenharmony_ci return; 10762306a36Sopenharmony_ci } 10862306a36Sopenharmony_ci 10962306a36Sopenharmony_ci /* 11062306a36Sopenharmony_ci * This can happen when we are in the middle of a TLB batch and 11162306a36Sopenharmony_ci * we encounter memory pressure (eg copy_page_range when it tries 11262306a36Sopenharmony_ci * to allocate a new pte). If we have to reclaim memory and end 11362306a36Sopenharmony_ci * up scanning and resetting referenced bits then our batch context 11462306a36Sopenharmony_ci * will change mid stream. 11562306a36Sopenharmony_ci * 11662306a36Sopenharmony_ci * We also need to ensure only one page size is present in a given 11762306a36Sopenharmony_ci * batch 11862306a36Sopenharmony_ci */ 11962306a36Sopenharmony_ci if (i != 0 && (mm != batch->mm || batch->psize != psize || 12062306a36Sopenharmony_ci batch->ssize != ssize)) { 12162306a36Sopenharmony_ci __flush_tlb_pending(batch); 12262306a36Sopenharmony_ci i = 0; 12362306a36Sopenharmony_ci } 12462306a36Sopenharmony_ci if (i == 0) { 12562306a36Sopenharmony_ci batch->mm = mm; 12662306a36Sopenharmony_ci batch->psize = psize; 12762306a36Sopenharmony_ci batch->ssize = ssize; 12862306a36Sopenharmony_ci } 12962306a36Sopenharmony_ci batch->pte[i] = rpte; 13062306a36Sopenharmony_ci batch->vpn[i] = vpn; 13162306a36Sopenharmony_ci batch->index = ++i; 13262306a36Sopenharmony_ci if (i >= PPC64_TLB_BATCH_NR) 13362306a36Sopenharmony_ci __flush_tlb_pending(batch); 13462306a36Sopenharmony_ci put_cpu_var(ppc64_tlb_batch); 13562306a36Sopenharmony_ci} 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci/* 13862306a36Sopenharmony_ci * This function is called when terminating an mmu batch or when a batch 13962306a36Sopenharmony_ci * is full. It will perform the flush of all the entries currently stored 14062306a36Sopenharmony_ci * in a batch. 14162306a36Sopenharmony_ci * 14262306a36Sopenharmony_ci * Must be called from within some kind of spinlock/non-preempt region... 14362306a36Sopenharmony_ci */ 14462306a36Sopenharmony_civoid __flush_tlb_pending(struct ppc64_tlb_batch *batch) 14562306a36Sopenharmony_ci{ 14662306a36Sopenharmony_ci int i, local; 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci i = batch->index; 14962306a36Sopenharmony_ci local = mm_is_thread_local(batch->mm); 15062306a36Sopenharmony_ci if (i == 1) 15162306a36Sopenharmony_ci flush_hash_page(batch->vpn[0], batch->pte[0], 15262306a36Sopenharmony_ci batch->psize, batch->ssize, local); 15362306a36Sopenharmony_ci else 15462306a36Sopenharmony_ci flush_hash_range(i, local); 15562306a36Sopenharmony_ci batch->index = 0; 15662306a36Sopenharmony_ci} 15762306a36Sopenharmony_ci 15862306a36Sopenharmony_civoid hash__tlb_flush(struct mmu_gather *tlb) 15962306a36Sopenharmony_ci{ 16062306a36Sopenharmony_ci struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_ci /* 16362306a36Sopenharmony_ci * If there's a TLB batch pending, then we must flush it because the 16462306a36Sopenharmony_ci * pages are going to be freed and we really don't want to have a CPU 16562306a36Sopenharmony_ci * access a freed page because it has a stale TLB 16662306a36Sopenharmony_ci */ 16762306a36Sopenharmony_ci if (tlbbatch->index) 16862306a36Sopenharmony_ci __flush_tlb_pending(tlbbatch); 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci put_cpu_var(ppc64_tlb_batch); 17162306a36Sopenharmony_ci} 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci/** 17462306a36Sopenharmony_ci * __flush_hash_table_range - Flush all HPTEs for a given address range 17562306a36Sopenharmony_ci * from the hash table (and the TLB). But keeps 17662306a36Sopenharmony_ci * the linux PTEs intact. 17762306a36Sopenharmony_ci * 17862306a36Sopenharmony_ci * @start : starting address 17962306a36Sopenharmony_ci * @end : ending address (not included in the flush) 18062306a36Sopenharmony_ci * 18162306a36Sopenharmony_ci * This function is mostly to be used by some IO hotplug code in order 18262306a36Sopenharmony_ci * to remove all hash entries from a given address range used to map IO 18362306a36Sopenharmony_ci * space on a removed PCI-PCI bidge without tearing down the full mapping 18462306a36Sopenharmony_ci * since 64K pages may overlap with other bridges when using 64K pages 18562306a36Sopenharmony_ci * with 4K HW pages on IO space. 18662306a36Sopenharmony_ci * 18762306a36Sopenharmony_ci * Because of that usage pattern, it is implemented for small size rather 18862306a36Sopenharmony_ci * than speed. 18962306a36Sopenharmony_ci */ 19062306a36Sopenharmony_civoid __flush_hash_table_range(unsigned long start, unsigned long end) 19162306a36Sopenharmony_ci{ 19262306a36Sopenharmony_ci int hugepage_shift; 19362306a36Sopenharmony_ci unsigned long flags; 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci start = ALIGN_DOWN(start, PAGE_SIZE); 19662306a36Sopenharmony_ci end = ALIGN(end, PAGE_SIZE); 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci 19962306a36Sopenharmony_ci /* 20062306a36Sopenharmony_ci * Note: Normally, we should only ever use a batch within a 20162306a36Sopenharmony_ci * PTE locked section. This violates the rule, but will work 20262306a36Sopenharmony_ci * since we don't actually modify the PTEs, we just flush the 20362306a36Sopenharmony_ci * hash while leaving the PTEs intact (including their reference 20462306a36Sopenharmony_ci * to being hashed). This is not the most performance oriented 20562306a36Sopenharmony_ci * way to do things but is fine for our needs here. 20662306a36Sopenharmony_ci */ 20762306a36Sopenharmony_ci local_irq_save(flags); 20862306a36Sopenharmony_ci arch_enter_lazy_mmu_mode(); 20962306a36Sopenharmony_ci for (; start < end; start += PAGE_SIZE) { 21062306a36Sopenharmony_ci pte_t *ptep = find_init_mm_pte(start, &hugepage_shift); 21162306a36Sopenharmony_ci unsigned long pte; 21262306a36Sopenharmony_ci 21362306a36Sopenharmony_ci if (ptep == NULL) 21462306a36Sopenharmony_ci continue; 21562306a36Sopenharmony_ci pte = pte_val(*ptep); 21662306a36Sopenharmony_ci if (!(pte & H_PAGE_HASHPTE)) 21762306a36Sopenharmony_ci continue; 21862306a36Sopenharmony_ci hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift); 21962306a36Sopenharmony_ci } 22062306a36Sopenharmony_ci arch_leave_lazy_mmu_mode(); 22162306a36Sopenharmony_ci local_irq_restore(flags); 22262306a36Sopenharmony_ci} 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_civoid flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) 22562306a36Sopenharmony_ci{ 22662306a36Sopenharmony_ci pte_t *pte; 22762306a36Sopenharmony_ci pte_t *start_pte; 22862306a36Sopenharmony_ci unsigned long flags; 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_ci addr = ALIGN_DOWN(addr, PMD_SIZE); 23162306a36Sopenharmony_ci /* 23262306a36Sopenharmony_ci * Note: Normally, we should only ever use a batch within a 23362306a36Sopenharmony_ci * PTE locked section. This violates the rule, but will work 23462306a36Sopenharmony_ci * since we don't actually modify the PTEs, we just flush the 23562306a36Sopenharmony_ci * hash while leaving the PTEs intact (including their reference 23662306a36Sopenharmony_ci * to being hashed). This is not the most performance oriented 23762306a36Sopenharmony_ci * way to do things but is fine for our needs here. 23862306a36Sopenharmony_ci */ 23962306a36Sopenharmony_ci local_irq_save(flags); 24062306a36Sopenharmony_ci arch_enter_lazy_mmu_mode(); 24162306a36Sopenharmony_ci start_pte = pte_offset_map(pmd, addr); 24262306a36Sopenharmony_ci if (!start_pte) 24362306a36Sopenharmony_ci goto out; 24462306a36Sopenharmony_ci for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { 24562306a36Sopenharmony_ci unsigned long pteval = pte_val(*pte); 24662306a36Sopenharmony_ci if (pteval & H_PAGE_HASHPTE) 24762306a36Sopenharmony_ci hpte_need_flush(mm, addr, pte, pteval, 0); 24862306a36Sopenharmony_ci addr += PAGE_SIZE; 24962306a36Sopenharmony_ci } 25062306a36Sopenharmony_ci pte_unmap(start_pte); 25162306a36Sopenharmony_ciout: 25262306a36Sopenharmony_ci arch_leave_lazy_mmu_mode(); 25362306a36Sopenharmony_ci local_irq_restore(flags); 25462306a36Sopenharmony_ci} 255