1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020 Loongson Technology Corporation Limited
4 *
5 * Switch a MMU context.
6 */
7#ifndef _ASM_MMU_CONTEXT_H
8#define _ASM_MMU_CONTEXT_H
9
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm_types.h>
13#include <linux/smp.h>
14#include <linux/slab.h>
15
16#include <asm/cacheflush.h>
17#include <asm/tlbflush.h>
18#include <asm-generic/mm_hooks.h>
19
20/*
21 *  All unused by hardware upper bits will be considered
22 *  as a software asid extension.
23 */
24static inline u64 asid_version_mask(unsigned int cpu)
25{
26	return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
27}
28
29static inline u64 asid_first_version(unsigned int cpu)
30{
31	return cpu_asid_mask(&cpu_data[cpu]) + 1;
32}
33
34#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
35#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
36#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
37
38static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
39{
40	if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
41		return 0;
42
43	return 1;
44}
45
46static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
47{
48}
49
50/* Normal, classic get_new_mmu_context */
51static inline void
52get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
53{
54	u64 asid = asid_cache(cpu);
55
56	if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
57		local_flush_tlb_user();	/* start new asid cycle */
58
59	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
60}
61
62/*
63 * Initialize the context related info for a new mm_struct
64 * instance.
65 */
66static inline int
67init_new_context(struct task_struct *tsk, struct mm_struct *mm)
68{
69	int i;
70
71	for_each_possible_cpu(i)
72		cpu_context(i, mm) = 0;
73
74	return 0;
75}
76
77static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
78				      struct task_struct *tsk)
79{
80	unsigned int cpu = smp_processor_id();
81
82	/* Check if our ASID is of an older version and thus invalid */
83	if (!asid_valid(next, cpu))
84		get_new_mmu_context(next, cpu);
85
86	write_csr_asid(cpu_asid(cpu, next));
87
88	if (next != &init_mm)
89		csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
90	else
91		csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
92
93	/*
94	 * Mark current->active_mm as not "active" anymore.
95	 * We don't want to mislead possible IPI tlb flush routines.
96	 */
97	cpumask_set_cpu(cpu, mm_cpumask(next));
98}
99
100#define switch_mm_irqs_off switch_mm_irqs_off
101
102static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
103			     struct task_struct *tsk)
104{
105	unsigned long flags;
106
107	local_irq_save(flags);
108	switch_mm_irqs_off(prev, next, tsk);
109	local_irq_restore(flags);
110}
111
112/*
113 * Destroy context related info for an mm_struct that is about
114 * to be put to rest.
115 */
116static inline void destroy_context(struct mm_struct *mm)
117{
118}
119
120#define activate_mm(prev, next)	switch_mm(prev, next, current)
121#define deactivate_mm(task, mm)	do { } while (0)
122
123/*
124 * If mm is currently active, we can't really drop it.
125 * Instead, we will get a new one for it.
126 */
127static inline void
128drop_mmu_context(struct mm_struct *mm, unsigned cpu)
129{
130	int asid;
131	unsigned long flags;
132
133	local_irq_save(flags);
134
135	asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
136
137	if (asid == cpu_asid(cpu, mm)) {
138		if (!current->mm || (current->mm == mm)) {
139			get_new_mmu_context(mm, cpu);
140			write_csr_asid(cpu_asid(cpu, mm));
141			goto out;
142		}
143	}
144
145	/* Will get a new context next time */
146	cpu_context(cpu, mm) = 0;
147	cpumask_clear_cpu(cpu, mm_cpumask(mm));
148out:
149	local_irq_restore(flags);
150}
151
152#endif /* _ASM_MMU_CONTEXT_H */
153