1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 Loongson Technology Corporation Limited
4  */
5 #ifndef _ASM_PROCESSOR_H
6 #define _ASM_PROCESSOR_H
7 
8 #include <linux/atomic.h>
9 #include <linux/cpumask.h>
10 #include <linux/sizes.h>
11 
12 #include <asm/cpu.h>
13 #include <asm/cpu-info.h>
14 #include <asm/loongarchregs.h>
15 #include <asm/vdso/processor.h>
16 #include <uapi/asm/ptrace.h>
17 #include <uapi/asm/sigcontext.h>
18 
19 #ifdef CONFIG_32BIT
20 
21 #define TASK_SIZE	0x80000000UL
22 #define TASK_SIZE_MIN	TASK_SIZE
23 #define STACK_TOP_MAX	TASK_SIZE
24 
25 #define TASK_IS_32BIT_ADDR 1
26 
27 #endif
28 
29 #ifdef CONFIG_64BIT
30 
31 #define TASK_SIZE32	0x100000000UL
32 #define TASK_SIZE64     (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
33 
34 #define TASK_SIZE	(test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
35 #define TASK_SIZE_MIN	TASK_SIZE32
36 #define STACK_TOP_MAX	TASK_SIZE64
37 
38 #define TASK_SIZE_OF(tsk)						\
39 	(test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
40 
41 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
42 
43 #endif
44 
45 #define VDSO_RANDOMIZE_SIZE	(TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
46 
47 unsigned long stack_top(void);
48 #define STACK_TOP stack_top()
49 
50 /*
51  * This decides where the kernel will search for a free chunk of vm
52  * space during mmap's.
53  */
54 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
55 
56 #define FPU_REG_WIDTH		256
57 #define FPU_ALIGN		__attribute__((aligned(32)))
58 
59 union fpureg {
60 	__u32	val32[FPU_REG_WIDTH / 32];
61 	__u64	val64[FPU_REG_WIDTH / 64];
62 };
63 
64 #define FPR_IDX(width, idx)	(idx)
65 
66 #define BUILD_FPR_ACCESS(width) \
67 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx)	\
68 {									\
69 	return fpr->val##width[FPR_IDX(width, idx)];			\
70 }									\
71 									\
72 static inline void set_fpr##width(union fpureg *fpr, unsigned idx,	\
73 				  u##width val)				\
74 {									\
75 	fpr->val##width[FPR_IDX(width, idx)] = val;			\
76 }
77 
78 BUILD_FPR_ACCESS(32)
79 BUILD_FPR_ACCESS(64)
80 
81 struct loongarch_fpu {
82 	unsigned int	fcsr;
83 	uint64_t	fcc;	/* 8x8 */
84 	uint64_t	ftop;
85 	union fpureg	fpr[NUM_FPU_REGS];
86 };
87 
88 struct loongarch_lbt {
89 	/* Scratch registers */
90 	unsigned long scr0;
91 	unsigned long scr1;
92 	unsigned long scr2;
93 	unsigned long scr3;
94 	/* Eflags register */
95 	unsigned long eflags;
96 };
97 
98 #define INIT_CPUMASK { \
99 	{0,} \
100 }
101 
102 struct loongarch_watch_reg_state {
103 	unsigned long addr[NUM_WATCH_REGS];
104 	unsigned long mask[NUM_WATCH_REGS];
105 	unsigned char irw[NUM_WATCH_REGS];
106 	unsigned char irwstat[NUM_WATCH_REGS];
107 	unsigned char irwmask[NUM_WATCH_REGS];
108 };
109 
110 #define ARCH_MIN_TASKALIGN	32
111 
112 struct loongarch_vdso_info;
113 
114 /*
115  * If you change thread_struct remember to change the #defines below too!
116  */
117 struct thread_struct {
118 	/* Main processor registers. */
119 	unsigned long reg01, reg03, reg22; /* ra sp fp */
120 	unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
121 	unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
122 
123 	/* __schedule() return address / call frame address */
124 	unsigned long sched_ra;
125 	unsigned long sched_cfa;
126 
127 	/* CSR registers */
128 	unsigned long csr_prmd;
129 	unsigned long csr_crmd;
130 	unsigned long csr_euen;
131 	unsigned long csr_ecfg;
132 	unsigned long csr_badvaddr;	/* Last user fault */
133 
134 	/* Used by ptrace single_step */
135 	unsigned long single_step;
136 
137 	/* Watch register state, if available. */
138 	struct loongarch_watch_reg_state watch;
139 
140 	/* Other stuff associated with the thread. */
141 	unsigned long trap_nr;
142 	unsigned long error_code;
143 	struct loongarch_lbt lbt;
144 	struct loongarch_vdso_info *vdso;
145 
146 	/*
147 	 * FPU & vector registers, must be at last because
148 	 * they are conditionally copied at fork().
149 	 */
150 	struct loongarch_fpu fpu FPU_ALIGN;
151 };
152 
153 #define thread_saved_ra(tsk)	(tsk->thread.sched_ra)
154 #define thread_saved_fp(tsk)	(tsk->thread.sched_cfa)
155 
156 #define INIT_THREAD  {						\
157 	/*							\
158 	 * Main processor registers				\
159 	 */							\
160 	.reg01			= 0,				\
161 	.reg03			= 0,				\
162 	.reg22			= 0,				\
163 	.reg23			= 0,				\
164 	.reg24			= 0,				\
165 	.reg25			= 0,				\
166 	.reg26			= 0,				\
167 	.reg27			= 0,				\
168 	.reg28			= 0,				\
169 	.reg29			= 0,				\
170 	.reg30			= 0,				\
171 	.reg31			= 0,				\
172 	.sched_ra		= 0,				\
173 	.sched_cfa		= 0,				\
174 	.csr_crmd		= 0,				\
175 	.csr_prmd		= 0,				\
176 	.csr_euen		= 0,				\
177 	.csr_ecfg		= 0,				\
178 	.csr_badvaddr		= 0,				\
179 	/*							\
180 	 * Saved watch register stuff				\
181 	 */							\
182 	.watch = {{0,},},					\
183 	/*							\
184 	 * Other stuff associated with the process		\
185 	 */							\
186 	.trap_nr		= 0,				\
187 	.error_code		= 0,				\
188 	/*							\
189 	 * FPU & vector registers				\
190 	 */							\
191 	.fpu			= {				\
192 		.fcsr		= 0,				\
193 		.fcc		= 0,				\
194 		.fpr		= {{{0,},},},			\
195 	},							\
196 }
197 
198 struct task_struct;
199 
200 /* Free all resources held by a thread. */
201 #define release_thread(thread) do { } while(0)
202 
203 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
204 			 IDLE_POLL};
205 
206 extern unsigned long		boot_option_idle_override;
207 /*
208  * Do necessary setup to start up a newly executed thread.
209  */
210 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
211 
flush_thread(void)212 static inline void flush_thread(void)
213 {
214 }
215 
216 unsigned long get_wchan(struct task_struct *p);
217 
218 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
219 			 THREAD_SIZE - sizeof(struct pt_regs))
220 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
221 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
222 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
223 #define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
224 #define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
225 
226 #define return_address() ({__asm__ __volatile__("":::"$1");__builtin_return_address(0);})
227 
228 #ifdef CONFIG_CPU_HAS_PREFETCH
229 
230 #define ARCH_HAS_PREFETCH
231 #define prefetch(x) __builtin_prefetch((x), 0, 1)
232 
233 #define ARCH_HAS_PREFETCHW
234 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
235 
236 #endif
237 
238 #endif /* _ASM_PROCESSOR_H */
239