1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PERCPU_H
3#define _ASM_X86_PERCPU_H
4
5#ifdef CONFIG_X86_64
6#define __percpu_seg		gs
7#else
8#define __percpu_seg		fs
9#endif
10
11#ifdef __ASSEMBLY__
12
13#ifdef CONFIG_SMP
14#define PER_CPU_VAR(var)	%__percpu_seg:var
15#else /* ! SMP */
16#define PER_CPU_VAR(var)	var
17#endif	/* SMP */
18
19#ifdef CONFIG_X86_64_SMP
20#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
21#else
22#define INIT_PER_CPU_VAR(var)  var
23#endif
24
25#else /* ...!ASSEMBLY */
26
27#include <linux/kernel.h>
28#include <linux/stringify.h>
29
30#ifdef CONFIG_SMP
31#define __percpu_prefix		"%%"__stringify(__percpu_seg)":"
32#define __my_cpu_offset		this_cpu_read(this_cpu_off)
33
34/*
35 * Compared to the generic __my_cpu_offset version, the following
36 * saves one instruction and avoids clobbering a temp register.
37 */
38#define arch_raw_cpu_ptr(ptr)				\
39({							\
40	unsigned long tcp_ptr__;			\
41	asm ("add " __percpu_arg(1) ", %0"		\
42	     : "=r" (tcp_ptr__)				\
43	     : "m" (this_cpu_off), "0" (ptr));		\
44	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;	\
45})
46#else
47#define __percpu_prefix		""
48#endif
49
50#define __percpu_arg(x)		__percpu_prefix "%" #x
51
52/*
53 * Initialized pointers to per-cpu variables needed for the boot
54 * processor need to use these macros to get the proper address
55 * offset from __per_cpu_load on SMP.
56 *
57 * There also must be an entry in vmlinux_64.lds.S
58 */
59#define DECLARE_INIT_PER_CPU(var) \
60       extern typeof(var) init_per_cpu_var(var)
61
62#ifdef CONFIG_X86_64_SMP
63#define init_per_cpu_var(var)  init_per_cpu__##var
64#else
65#define init_per_cpu_var(var)  var
66#endif
67
68/* For arch-specific code, we can use direct single-insn ops (they
69 * don't give an lvalue though). */
70
71#define __pcpu_type_1 u8
72#define __pcpu_type_2 u16
73#define __pcpu_type_4 u32
74#define __pcpu_type_8 u64
75
76#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
77#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
78#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
79#define __pcpu_cast_8(val) ((u64)(val))
80
81#define __pcpu_op1_1(op, dst) op "b " dst
82#define __pcpu_op1_2(op, dst) op "w " dst
83#define __pcpu_op1_4(op, dst) op "l " dst
84#define __pcpu_op1_8(op, dst) op "q " dst
85
86#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
87#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
88#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
89#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
90
91#define __pcpu_reg_1(mod, x) mod "q" (x)
92#define __pcpu_reg_2(mod, x) mod "r" (x)
93#define __pcpu_reg_4(mod, x) mod "r" (x)
94#define __pcpu_reg_8(mod, x) mod "r" (x)
95
96#define __pcpu_reg_imm_1(x) "qi" (x)
97#define __pcpu_reg_imm_2(x) "ri" (x)
98#define __pcpu_reg_imm_4(x) "ri" (x)
99#define __pcpu_reg_imm_8(x) "re" (x)
100
101#define percpu_to_op(size, qual, op, _var, _val)			\
102do {									\
103	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
104	if (0) {		                                        \
105		typeof(_var) pto_tmp__;					\
106		pto_tmp__ = (_val);					\
107		(void)pto_tmp__;					\
108	}								\
109	asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var]))	\
110	    : [var] "+m" (_var)						\
111	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
112} while (0)
113
114#define percpu_unary_op(size, qual, op, _var)				\
115({									\
116	asm qual (__pcpu_op1_##size(op, __percpu_arg([var]))		\
117	    : [var] "+m" (_var));					\
118})
119
120/*
121 * Generate a percpu add to memory instruction and optimize code
122 * if one is added or subtracted.
123 */
124#define percpu_add_op(size, qual, var, val)				\
125do {									\
126	const int pao_ID__ = (__builtin_constant_p(val) &&		\
127			      ((val) == 1 || (val) == -1)) ?		\
128				(int)(val) : 0;				\
129	if (0) {							\
130		typeof(var) pao_tmp__;					\
131		pao_tmp__ = (val);					\
132		(void)pao_tmp__;					\
133	}								\
134	if (pao_ID__ == 1)						\
135		percpu_unary_op(size, qual, "inc", var);		\
136	else if (pao_ID__ == -1)					\
137		percpu_unary_op(size, qual, "dec", var);		\
138	else								\
139		percpu_to_op(size, qual, "add", var, val);		\
140} while (0)
141
142#define percpu_from_op(size, qual, op, _var)				\
143({									\
144	__pcpu_type_##size pfo_val__;					\
145	asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]")	\
146	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
147	    : [var] "m" (_var));					\
148	(typeof(_var))(unsigned long) pfo_val__;			\
149})
150
151#define percpu_stable_op(size, op, _var)				\
152({									\
153	__pcpu_type_##size pfo_val__;					\
154	asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]")	\
155	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
156	    : [var] "p" (&(_var)));					\
157	(typeof(_var))(unsigned long) pfo_val__;			\
158})
159
160/*
161 * Add return operation
162 */
163#define percpu_add_return_op(size, qual, _var, _val)			\
164({									\
165	__pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);	\
166	asm qual (__pcpu_op2_##size("xadd", "%[tmp]",			\
167				     __percpu_arg([var]))		\
168		  : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\
169		    [var] "+m" (_var)					\
170		  : : "memory");					\
171	(typeof(_var))(unsigned long) (paro_tmp__ + _val);		\
172})
173
174/*
175 * xchg is implemented using cmpxchg without a lock prefix. xchg is
176 * expensive due to the implied lock prefix.  The processor cannot prefetch
177 * cachelines if xchg is used.
178 */
179#define percpu_xchg_op(size, qual, _var, _nval)				\
180({									\
181	__pcpu_type_##size pxo_old__;					\
182	__pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval);	\
183	asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]),		\
184				    "%[oval]")				\
185		  "\n1:\t"						\
186		  __pcpu_op2_##size("cmpxchg", "%[nval]",		\
187				    __percpu_arg([var]))		\
188		  "\n\tjnz 1b"						\
189		  : [oval] "=&a" (pxo_old__),				\
190		    [var] "+m" (_var)					\
191		  : [nval] __pcpu_reg_##size(, pxo_new__)		\
192		  : "memory");						\
193	(typeof(_var))(unsigned long) pxo_old__;			\
194})
195
196/*
197 * cmpxchg has no such implied lock semantics as a result it is much
198 * more efficient for cpu local operations.
199 */
200#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)		\
201({									\
202	__pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);	\
203	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
204	asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",		\
205				    __percpu_arg([var]))		\
206		  : [oval] "+a" (pco_old__),				\
207		    [var] "+m" (_var)					\
208		  : [nval] __pcpu_reg_##size(, pco_new__)		\
209		  : "memory");						\
210	(typeof(_var))(unsigned long) pco_old__;			\
211})
212
213#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
214#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)		\
215({									\
216	union {								\
217		u64 var;						\
218		struct {						\
219			u32 low, high;					\
220		};							\
221	} old__, new__;							\
222									\
223	old__.var = _oval;						\
224	new__.var = _nval;						\
225									\
226	asm qual (ALTERNATIVE("leal %P[var], %%esi; call this_cpu_cmpxchg8b_emu", \
227			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
228		  : [var] "+m" (_var),					\
229		    "+a" (old__.low),					\
230		    "+d" (old__.high)					\
231		  : "b" (new__.low),					\
232		    "c" (new__.high)					\
233		  : "memory", "esi");					\
234									\
235	old__.var;							\
236})
237
238#define raw_cpu_cmpxchg64(pcp, oval, nval)	percpu_cmpxchg64_op(8,         , pcp, oval, nval)
239#define this_cpu_cmpxchg64(pcp, oval, nval)	percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
240#endif
241
242#ifdef CONFIG_X86_64
243#define raw_cpu_cmpxchg64(pcp, oval, nval)	percpu_cmpxchg_op(8,         , pcp, oval, nval);
244#define this_cpu_cmpxchg64(pcp, oval, nval)	percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
245
246#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)		\
247({									\
248	union {								\
249		u128 var;						\
250		struct {						\
251			u64 low, high;					\
252		};							\
253	} old__, new__;							\
254									\
255	old__.var = _oval;						\
256	new__.var = _nval;						\
257									\
258	asm qual (ALTERNATIVE("leaq %P[var], %%rsi; call this_cpu_cmpxchg16b_emu", \
259			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
260		  : [var] "+m" (_var),					\
261		    "+a" (old__.low),					\
262		    "+d" (old__.high)					\
263		  : "b" (new__.low),					\
264		    "c" (new__.high)					\
265		  : "memory", "rsi");					\
266									\
267	old__.var;							\
268})
269
270#define raw_cpu_cmpxchg128(pcp, oval, nval)	percpu_cmpxchg128_op(16,         , pcp, oval, nval)
271#define this_cpu_cmpxchg128(pcp, oval, nval)	percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
272#endif
273
274/*
275 * this_cpu_read() makes gcc load the percpu variable every time it is
276 * accessed while this_cpu_read_stable() allows the value to be cached.
277 * this_cpu_read_stable() is more efficient and can be used if its value
278 * is guaranteed to be valid across cpus.  The current users include
279 * get_current() and get_thread_info() both of which are actually
280 * per-thread variables implemented as per-cpu variables and thus
281 * stable for the duration of the respective task.
282 */
283#define this_cpu_read_stable_1(pcp)	percpu_stable_op(1, "mov", pcp)
284#define this_cpu_read_stable_2(pcp)	percpu_stable_op(2, "mov", pcp)
285#define this_cpu_read_stable_4(pcp)	percpu_stable_op(4, "mov", pcp)
286#define this_cpu_read_stable_8(pcp)	percpu_stable_op(8, "mov", pcp)
287#define this_cpu_read_stable(pcp)	__pcpu_size_call_return(this_cpu_read_stable_, pcp)
288
289#define raw_cpu_read_1(pcp)		percpu_from_op(1, , "mov", pcp)
290#define raw_cpu_read_2(pcp)		percpu_from_op(2, , "mov", pcp)
291#define raw_cpu_read_4(pcp)		percpu_from_op(4, , "mov", pcp)
292
293#define raw_cpu_write_1(pcp, val)	percpu_to_op(1, , "mov", (pcp), val)
294#define raw_cpu_write_2(pcp, val)	percpu_to_op(2, , "mov", (pcp), val)
295#define raw_cpu_write_4(pcp, val)	percpu_to_op(4, , "mov", (pcp), val)
296#define raw_cpu_add_1(pcp, val)		percpu_add_op(1, , (pcp), val)
297#define raw_cpu_add_2(pcp, val)		percpu_add_op(2, , (pcp), val)
298#define raw_cpu_add_4(pcp, val)		percpu_add_op(4, , (pcp), val)
299#define raw_cpu_and_1(pcp, val)		percpu_to_op(1, , "and", (pcp), val)
300#define raw_cpu_and_2(pcp, val)		percpu_to_op(2, , "and", (pcp), val)
301#define raw_cpu_and_4(pcp, val)		percpu_to_op(4, , "and", (pcp), val)
302#define raw_cpu_or_1(pcp, val)		percpu_to_op(1, , "or", (pcp), val)
303#define raw_cpu_or_2(pcp, val)		percpu_to_op(2, , "or", (pcp), val)
304#define raw_cpu_or_4(pcp, val)		percpu_to_op(4, , "or", (pcp), val)
305
306/*
307 * raw_cpu_xchg() can use a load-store since it is not required to be
308 * IRQ-safe.
309 */
310#define raw_percpu_xchg_op(var, nval)					\
311({									\
312	typeof(var) pxo_ret__ = raw_cpu_read(var);			\
313	raw_cpu_write(var, (nval));					\
314	pxo_ret__;							\
315})
316
317#define raw_cpu_xchg_1(pcp, val)	raw_percpu_xchg_op(pcp, val)
318#define raw_cpu_xchg_2(pcp, val)	raw_percpu_xchg_op(pcp, val)
319#define raw_cpu_xchg_4(pcp, val)	raw_percpu_xchg_op(pcp, val)
320
321#define this_cpu_read_1(pcp)		percpu_from_op(1, volatile, "mov", pcp)
322#define this_cpu_read_2(pcp)		percpu_from_op(2, volatile, "mov", pcp)
323#define this_cpu_read_4(pcp)		percpu_from_op(4, volatile, "mov", pcp)
324#define this_cpu_write_1(pcp, val)	percpu_to_op(1, volatile, "mov", (pcp), val)
325#define this_cpu_write_2(pcp, val)	percpu_to_op(2, volatile, "mov", (pcp), val)
326#define this_cpu_write_4(pcp, val)	percpu_to_op(4, volatile, "mov", (pcp), val)
327#define this_cpu_add_1(pcp, val)	percpu_add_op(1, volatile, (pcp), val)
328#define this_cpu_add_2(pcp, val)	percpu_add_op(2, volatile, (pcp), val)
329#define this_cpu_add_4(pcp, val)	percpu_add_op(4, volatile, (pcp), val)
330#define this_cpu_and_1(pcp, val)	percpu_to_op(1, volatile, "and", (pcp), val)
331#define this_cpu_and_2(pcp, val)	percpu_to_op(2, volatile, "and", (pcp), val)
332#define this_cpu_and_4(pcp, val)	percpu_to_op(4, volatile, "and", (pcp), val)
333#define this_cpu_or_1(pcp, val)		percpu_to_op(1, volatile, "or", (pcp), val)
334#define this_cpu_or_2(pcp, val)		percpu_to_op(2, volatile, "or", (pcp), val)
335#define this_cpu_or_4(pcp, val)		percpu_to_op(4, volatile, "or", (pcp), val)
336#define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(1, volatile, pcp, nval)
337#define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(2, volatile, pcp, nval)
338#define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(4, volatile, pcp, nval)
339
340#define raw_cpu_add_return_1(pcp, val)		percpu_add_return_op(1, , pcp, val)
341#define raw_cpu_add_return_2(pcp, val)		percpu_add_return_op(2, , pcp, val)
342#define raw_cpu_add_return_4(pcp, val)		percpu_add_return_op(4, , pcp, val)
343#define raw_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(1, , pcp, oval, nval)
344#define raw_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(2, , pcp, oval, nval)
345#define raw_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(4, , pcp, oval, nval)
346
347#define this_cpu_add_return_1(pcp, val)		percpu_add_return_op(1, volatile, pcp, val)
348#define this_cpu_add_return_2(pcp, val)		percpu_add_return_op(2, volatile, pcp, val)
349#define this_cpu_add_return_4(pcp, val)		percpu_add_return_op(4, volatile, pcp, val)
350#define this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
351#define this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
352#define this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
353
354/*
355 * Per cpu atomic 64 bit operations are only available under 64 bit.
356 * 32 bit must fall back to generic operations.
357 */
358#ifdef CONFIG_X86_64
359#define raw_cpu_read_8(pcp)			percpu_from_op(8, , "mov", pcp)
360#define raw_cpu_write_8(pcp, val)		percpu_to_op(8, , "mov", (pcp), val)
361#define raw_cpu_add_8(pcp, val)			percpu_add_op(8, , (pcp), val)
362#define raw_cpu_and_8(pcp, val)			percpu_to_op(8, , "and", (pcp), val)
363#define raw_cpu_or_8(pcp, val)			percpu_to_op(8, , "or", (pcp), val)
364#define raw_cpu_add_return_8(pcp, val)		percpu_add_return_op(8, , pcp, val)
365#define raw_cpu_xchg_8(pcp, nval)		raw_percpu_xchg_op(pcp, nval)
366#define raw_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(8, , pcp, oval, nval)
367
368#define this_cpu_read_8(pcp)			percpu_from_op(8, volatile, "mov", pcp)
369#define this_cpu_write_8(pcp, val)		percpu_to_op(8, volatile, "mov", (pcp), val)
370#define this_cpu_add_8(pcp, val)		percpu_add_op(8, volatile, (pcp), val)
371#define this_cpu_and_8(pcp, val)		percpu_to_op(8, volatile, "and", (pcp), val)
372#define this_cpu_or_8(pcp, val)			percpu_to_op(8, volatile, "or", (pcp), val)
373#define this_cpu_add_return_8(pcp, val)		percpu_add_return_op(8, volatile, pcp, val)
374#define this_cpu_xchg_8(pcp, nval)		percpu_xchg_op(8, volatile, pcp, nval)
375#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
376#endif
377
378static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
379                        const unsigned long __percpu *addr)
380{
381	unsigned long __percpu *a =
382		(unsigned long __percpu *)addr + nr / BITS_PER_LONG;
383
384#ifdef CONFIG_X86_64
385	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
386#else
387	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
388#endif
389}
390
391static inline bool x86_this_cpu_variable_test_bit(int nr,
392                        const unsigned long __percpu *addr)
393{
394	bool oldbit;
395
396	asm volatile("btl "__percpu_arg(2)",%1"
397			CC_SET(c)
398			: CC_OUT(c) (oldbit)
399			: "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
400
401	return oldbit;
402}
403
404#define x86_this_cpu_test_bit(nr, addr)			\
405	(__builtin_constant_p((nr))			\
406	 ? x86_this_cpu_constant_test_bit((nr), (addr))	\
407	 : x86_this_cpu_variable_test_bit((nr), (addr)))
408
409
410#include <asm-generic/percpu.h>
411
412/* We can use this directly for local CPU (faster). */
413DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
414
415#endif /* !__ASSEMBLY__ */
416
417#ifdef CONFIG_SMP
418
419/*
420 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
421 * variables that are initialized and accessed before there are per_cpu
422 * areas allocated.
423 */
424
425#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
426	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
427	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
428				{ [0 ... NR_CPUS-1] = _initvalue };	\
429	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
430
431#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
432	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
433	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
434				{ [0 ... NR_CPUS-1] = _initvalue };	\
435	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
436
437#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
438	EXPORT_PER_CPU_SYMBOL(_name)
439
440#define DECLARE_EARLY_PER_CPU(_type, _name)			\
441	DECLARE_PER_CPU(_type, _name);				\
442	extern __typeof__(_type) *_name##_early_ptr;		\
443	extern __typeof__(_type)  _name##_early_map[]
444
445#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
446	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);		\
447	extern __typeof__(_type) *_name##_early_ptr;		\
448	extern __typeof__(_type)  _name##_early_map[]
449
450#define	early_per_cpu_ptr(_name) (_name##_early_ptr)
451#define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
452#define	early_per_cpu(_name, _cpu) 				\
453	*(early_per_cpu_ptr(_name) ?				\
454		&early_per_cpu_ptr(_name)[_cpu] :		\
455		&per_cpu(_name, _cpu))
456
457#else	/* !CONFIG_SMP */
458#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
459	DEFINE_PER_CPU(_type, _name) = _initvalue
460
461#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
462	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
463
464#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
465	EXPORT_PER_CPU_SYMBOL(_name)
466
467#define DECLARE_EARLY_PER_CPU(_type, _name)			\
468	DECLARE_PER_CPU(_type, _name)
469
470#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
471	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
472
473#define	early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
474#define	early_per_cpu_ptr(_name) NULL
475/* no early_per_cpu_map() */
476
477#endif	/* !CONFIG_SMP */
478
479#endif /* _ASM_X86_PERCPU_H */
480