1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
4
5/*
6 * TLB flushing for 64-bit hash-MMU CPUs
7 */
8
9#include <linux/percpu.h>
10#include <asm/page.h>
11
12#define PPC64_TLB_BATCH_NR 192
13
14struct ppc64_tlb_batch {
15	int			active;
16	unsigned long		index;
17	struct mm_struct	*mm;
18	real_pte_t		pte[PPC64_TLB_BATCH_NR];
19	unsigned long		vpn[PPC64_TLB_BATCH_NR];
20	unsigned int		psize;
21	int			ssize;
22};
23DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24
25extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26
27#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28
29static inline void arch_enter_lazy_mmu_mode(void)
30{
31	struct ppc64_tlb_batch *batch;
32
33	if (radix_enabled())
34		return;
35	batch = this_cpu_ptr(&ppc64_tlb_batch);
36	batch->active = 1;
37}
38
39static inline void arch_leave_lazy_mmu_mode(void)
40{
41	struct ppc64_tlb_batch *batch;
42
43	if (radix_enabled())
44		return;
45	batch = this_cpu_ptr(&ppc64_tlb_batch);
46
47	if (batch->index)
48		__flush_tlb_pending(batch);
49	batch->active = 0;
50}
51
52#define arch_flush_lazy_mmu_mode()      do {} while (0)
53
54extern void hash__tlbiel_all(unsigned int action);
55
56extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
57			    int ssize, unsigned long flags);
58extern void flush_hash_range(unsigned long number, int local);
59extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
60				pmd_t *pmdp, unsigned int psize, int ssize,
61				unsigned long flags);
62static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
63{
64}
65
66static inline void hash__flush_tlb_mm(struct mm_struct *mm)
67{
68}
69
70static inline void hash__local_flush_all_mm(struct mm_struct *mm)
71{
72	/*
73	 * There's no Page Walk Cache for hash, so what is needed is
74	 * the same as flush_tlb_mm(), which doesn't really make sense
75	 * with hash. So the only thing we could do is flush the
76	 * entire LPID! Punt for now, as it's not being used.
77	 */
78	WARN_ON_ONCE(1);
79}
80
81static inline void hash__flush_all_mm(struct mm_struct *mm)
82{
83	/*
84	 * There's no Page Walk Cache for hash, so what is needed is
85	 * the same as flush_tlb_mm(), which doesn't really make sense
86	 * with hash. So the only thing we could do is flush the
87	 * entire LPID! Punt for now, as it's not being used.
88	 */
89	WARN_ON_ONCE(1);
90}
91
92static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
93					  unsigned long vmaddr)
94{
95}
96
97static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
98				    unsigned long vmaddr)
99{
100}
101
102static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
103				     unsigned long start, unsigned long end)
104{
105}
106
107static inline void hash__flush_tlb_kernel_range(unsigned long start,
108					    unsigned long end)
109{
110}
111
112
113struct mmu_gather;
114extern void hash__tlb_flush(struct mmu_gather *tlb);
115/* Private function for use by PCI IO mapping code */
116extern void __flush_hash_table_range(unsigned long start, unsigned long end);
117extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
118				unsigned long addr);
119#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
120