1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1998-2004 Hewlett-Packard Co
4 *	David Mosberger-Tang <davidm@hpl.hp.com>
5 *	Stephane Eranian <eranian@hpl.hp.com>
6 * Copyright (C) 2003 Intel Co
7 *	Suresh Siddha <suresh.b.siddha@intel.com>
8 *	Fenghua Yu <fenghua.yu@intel.com>
9 *	Arun Sharma <arun.sharma@intel.com>
10 *
11 * 12/07/98	S. Eranian	added pt_regs & switch_stack
12 * 12/21/98	D. Mosberger	updated to match latest code
13 *  6/17/99	D. Mosberger	added second unat member to "struct switch_stack"
14 *
15 */
16#ifndef _ASM_IA64_PTRACE_H
17#define _ASM_IA64_PTRACE_H
18
19#ifndef ASM_OFFSETS_C
20#include <asm/asm-offsets.h>
21#endif
22#include <uapi/asm/ptrace.h>
23
24/*
25 * Base-2 logarithm of number of pages to allocate per task structure
26 * (including register backing store and memory stack):
27 */
28#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
29# define KERNEL_STACK_SIZE_ORDER		3
30#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
31# define KERNEL_STACK_SIZE_ORDER		2
32#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
33# define KERNEL_STACK_SIZE_ORDER		1
34#else
35# define KERNEL_STACK_SIZE_ORDER		0
36#endif
37
38#define IA64_RBS_OFFSET			((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
39#define IA64_STK_OFFSET			((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
40
41#define KERNEL_STACK_SIZE		IA64_STK_OFFSET
42
43#ifndef __ASSEMBLY__
44
45#include <asm/current.h>
46#include <asm/page.h>
47
48/*
49 * We use the ia64_psr(regs)->ri to determine which of the three
50 * instructions in bundle (16 bytes) took the sample. Generate
51 * the canonical representation by adding to instruction pointer.
52 */
53# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
54# define instruction_pointer_set(regs, val)	\
55({						\
56	ia64_psr(regs)->ri = (val & 0xf);	\
57	regs->cr_iip = (val & ~0xfULL);		\
58})
59
60static inline unsigned long user_stack_pointer(struct pt_regs *regs)
61{
62	return regs->r12;
63}
64
65static inline int is_syscall_success(struct pt_regs *regs)
66{
67	return regs->r10 != -1;
68}
69
70static inline long regs_return_value(struct pt_regs *regs)
71{
72	if (is_syscall_success(regs))
73		return regs->r8;
74	else
75		return -regs->r8;
76}
77
78/* Conserve space in histogram by encoding slot bits in address
79 * bits 2 and 3 rather than bits 0 and 1.
80 */
81#define profile_pc(regs)						\
82({									\
83	unsigned long __ip = instruction_pointer(regs);			\
84	(__ip & ~3UL) + ((__ip & 3UL) << 2);				\
85})
86
87  /* given a pointer to a task_struct, return the user's pt_regs */
88# define task_pt_regs(t)		(((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
89# define ia64_psr(regs)			((struct ia64_psr *) &(regs)->cr_ipsr)
90# define user_mode(regs)		(((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
91# define user_stack(task,regs)	((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
92# define fsys_mode(task,regs)					\
93  ({								\
94	  struct task_struct *_task = (task);			\
95	  struct pt_regs *_regs = (regs);			\
96	  !user_mode(_regs) && user_stack(_task, _regs);	\
97  })
98
99  /*
100   * System call handlers that, upon successful completion, need to return a negative value
101   * should call force_successful_syscall_return() right before returning.  On architectures
102   * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
103   * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
104   * flag will not get set.  On architectures which do not support a separate error flag,
105   * the macro is a no-op and the spurious error condition needs to be filtered out by some
106   * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
107   * or something along those lines).
108   *
109   * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
110   */
111# define force_successful_syscall_return()	(task_pt_regs(current)->r8 = 0)
112
113  struct task_struct;			/* forward decl */
114  struct unw_frame_info;		/* forward decl */
115
116  extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
117					      unsigned long *);
118  extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
119			 unsigned long, long *);
120  extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
121			 unsigned long, long);
122  extern void ia64_flush_fph (struct task_struct *);
123  extern void ia64_sync_fph (struct task_struct *);
124  extern void ia64_sync_krbs(void);
125  extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
126				  unsigned long, unsigned long);
127
128  /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
129  extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
130  /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
131  extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
132
133  extern void ia64_increment_ip (struct pt_regs *pt);
134  extern void ia64_decrement_ip (struct pt_regs *pt);
135
136  extern void ia64_ptrace_stop(void);
137  #define arch_ptrace_stop() \
138	ia64_ptrace_stop()
139  #define arch_ptrace_stop_needed() \
140	(!test_thread_flag(TIF_RESTORE_RSE))
141
142  #define arch_has_single_step()  (1)
143  #define arch_has_block_step()   (1)
144
145#endif /* !__ASSEMBLY__ */
146#endif /* _ASM_IA64_PTRACE_H */
147