1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Based on arch/arm/kernel/process.c
4 *
5 * Original Copyright (C) 1995  Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9
10#include <stdarg.h>
11
12#include <trace/events/power.h>
13#include <linux/compat.h>
14#include <linux/efi.h>
15#include <linux/elf.h>
16#include <linux/export.h>
17#include <linux/sched.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task.h>
20#include <linux/sched/task_stack.h>
21#include <linux/kernel.h>
22#include <linux/lockdep.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/nospec.h>
26#include <linux/stddef.h>
27#include <linux/sysctl.h>
28#include <linux/unistd.h>
29#include <linux/user.h>
30#include <linux/delay.h>
31#include <linux/reboot.h>
32#include <linux/interrupt.h>
33#include <linux/init.h>
34#include <linux/cpu.h>
35#include <linux/elfcore.h>
36#include <linux/pm.h>
37#include <linux/tick.h>
38#include <linux/utsname.h>
39#include <linux/uaccess.h>
40#include <linux/random.h>
41#include <linux/hw_breakpoint.h>
42#include <linux/personality.h>
43#include <linux/notifier.h>
44#include <linux/percpu.h>
45#include <linux/thread_info.h>
46#include <linux/prctl.h>
47
48#include <asm/alternative.h>
49#include <asm/arch_gicv3.h>
50#include <asm/compat.h>
51#include <asm/cpufeature.h>
52#include <asm/cacheflush.h>
53#include <asm/exec.h>
54#include <asm/fpsimd.h>
55#include <asm/mmu_context.h>
56#include <asm/mte.h>
57#include <asm/processor.h>
58#include <asm/pointer_auth.h>
59#include <asm/stacktrace.h>
60
61#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
62#include <linux/stackprotector.h>
63unsigned long __stack_chk_guard __ro_after_init;
64EXPORT_SYMBOL(__stack_chk_guard);
65#endif
66
67/*
68 * Function pointers to optional machine specific functions
69 */
70void (*pm_power_off)(void);
71EXPORT_SYMBOL_GPL(pm_power_off);
72
73void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
74
75static void noinstr __cpu_do_idle(void)
76{
77    dsb(sy);
78    wfi();
79}
80
81static void noinstr __cpu_do_idle_irqprio(void)
82{
83    unsigned long pmr;
84    unsigned long daif_bits;
85
86    daif_bits = read_sysreg(daif);
87    write_sysreg(daif_bits | PSR_I_BIT, daif);
88
89    /*
90     * Unmask PMR before going idle to make sure interrupts can
91     * be raised.
92     */
93    pmr = gic_read_pmr();
94    gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
95
96    __cpu_do_idle();
97
98    gic_write_pmr(pmr);
99    write_sysreg(daif_bits, daif);
100}
101
102/*
103 *    cpu_do_idle()
104 *
105 *    Idle the processor (wait for interrupt).
106 *
107 *    If the CPU supports priority masking we must do additional work to
108 *    ensure that interrupts are not masked at the PMR (because the core will
109 *    not wake up if we block the wake up signal in the interrupt controller).
110 */
111void noinstr cpu_do_idle(void)
112{
113    if (system_uses_irq_prio_masking()) {
114        __cpu_do_idle_irqprio();
115    } else {
116        __cpu_do_idle();
117    }
118}
119
120/*
121 * This is our default idle handler.
122 */
123void noinstr arch_cpu_idle(void)
124{
125    /*
126     * This should do all the clock switching and wait for interrupt
127     * tricks
128     */
129    cpu_do_idle();
130    raw_local_irq_enable();
131}
132
133#ifdef CONFIG_HOTPLUG_CPU
134void arch_cpu_idle_dead(void)
135{
136    cpu_die();
137}
138#endif
139
140/*
141 * Called by kexec, immediately prior to machine_kexec().
142 *
143 * This must completely disable all secondary CPUs; simply causing those CPUs
144 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
145 * kexec'd kernel to use any and all RAM as it sees fit, without having to
146 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
147 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
148 */
149void machine_shutdown(void)
150{
151    smp_shutdown_nonboot_cpus(reboot_cpu);
152}
153
154/*
155 * Halting simply requires that the secondary CPUs stop performing any
156 * activity (executing tasks, handling interrupts). smp_send_stop()
157 * achieves this.
158 */
159void machine_halt(void)
160{
161    local_irq_disable();
162    smp_send_stop();
163    while (1) {
164        ;
165    }
166}
167
168/*
169 * Power-off simply requires that the secondary CPUs stop performing any
170 * activity (executing tasks, handling interrupts). smp_send_stop()
171 * achieves this. When the system power is turned off, it will take all CPUs
172 * with it.
173 */
174void machine_power_off(void)
175{
176    local_irq_disable();
177    smp_send_stop();
178    if (pm_power_off) {
179        pm_power_off();
180    }
181}
182
183/*
184 * Restart requires that the secondary CPUs stop performing any activity
185 * while the primary CPU resets the system. Systems with multiple CPUs must
186 * provide a HW restart implementation, to ensure that all CPUs reset at once.
187 * This is required so that any code running after reset on the primary CPU
188 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
189 * executing pre-reset code, and using RAM that the primary CPU's code wishes
190 * to use. Implementing such co-ordination would be essentially impossible.
191 */
192void machine_restart(char *cmd)
193{
194    /* Disable interrupts first */
195    local_irq_disable();
196    smp_send_stop();
197
198    do_kernel_pre_restart(cmd);
199
200    /*
201     * UpdateCapsule() depends on the system being reset via
202     * ResetSystem().
203     */
204    if (efi_enabled(EFI_RUNTIME_SERVICES)) {
205        efi_reboot(reboot_mode, NULL);
206    }
207
208    /* Now call the architecture specific reboot code. */
209    if (arm_pm_restart) {
210        arm_pm_restart(reboot_mode, cmd);
211    } else {
212        do_kernel_restart(cmd);
213    }
214
215    /*
216     * Whoops - the architecture was unable to reboot.
217     */
218    printk("Reboot failed -- System halted\n");
219    while (1) {
220        ;
221    }
222}
223
224#define bstr(suffix, str) [PSR_BTYPE_##suffix >> PSR_BTYPE_SHIFT] = (str)
225static const char *const btypes[] = {bstr(NONE, "--"), bstr(JC, "jc"), bstr(C, "-c"), bstr(J, "j-")};
226#undef bstr
227
228static void print_pstate(struct pt_regs *regs)
229{
230    u64 pstate = regs->pstate;
231
232    if (compat_user_mode(regs)) {
233        printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", pstate, pstate & PSR_AA32_N_BIT ? 'N' : 'n',
234               pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', pstate & PSR_AA32_C_BIT ? 'C' : 'c',
235               pstate & PSR_AA32_V_BIT ? 'V' : 'v', pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
236               pstate & PSR_AA32_T_BIT ? "T32" : "A32", pstate & PSR_AA32_E_BIT ? "BE" : "LE",
237               pstate & PSR_AA32_A_BIT ? 'A' : 'a', pstate & PSR_AA32_I_BIT ? 'I' : 'i',
238               pstate & PSR_AA32_F_BIT ? 'F' : 'f');
239    } else {
240        const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> PSR_BTYPE_SHIFT];
241
242        printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n", pstate,
243               pstate & PSR_N_BIT ? 'N' : 'n', pstate & PSR_Z_BIT ? 'Z' : 'z', pstate & PSR_C_BIT ? 'C' : 'c',
244               pstate & PSR_V_BIT ? 'V' : 'v', pstate & PSR_D_BIT ? 'D' : 'd', pstate & PSR_A_BIT ? 'A' : 'a',
245               pstate & PSR_I_BIT ? 'I' : 'i', pstate & PSR_F_BIT ? 'F' : 'f', pstate & PSR_PAN_BIT ? '+' : '-',
246               pstate & PSR_UAO_BIT ? '+' : '-', pstate & PSR_TCO_BIT ? '+' : '-', btype_str);
247    }
248}
249
250void __show_regs(struct pt_regs *regs)
251{
252    int i, top_reg;
253    u64 lr, sp;
254
255    if (compat_user_mode(regs)) {
256        lr = regs->compat_lr;
257        sp = regs->compat_sp;
258        top_reg = 0x0c;
259    } else {
260        lr = regs->regs[0x1e];
261        sp = regs->sp;
262        top_reg = 0x1d;
263    }
264
265    show_regs_print_info(KERN_DEFAULT);
266    print_pstate(regs);
267
268    if (!user_mode(regs)) {
269        printk("pc : %pS\n", (void *)regs->pc);
270        printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
271    } else {
272        printk("pc : %016llx\n", regs->pc);
273        printk("lr : %016llx\n", lr);
274    }
275
276    printk("sp : %016llx\n", sp);
277
278    if (system_uses_irq_prio_masking()) {
279        printk("pmr_save: %08llx\n", regs->pmr_save);
280    }
281
282    i = top_reg;
283
284    while (i >= 0) {
285        printk("x%-2d: %016llx ", i, regs->regs[i]);
286        i--;
287
288        if (i % 0x02 == 0) {
289            pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
290            i--;
291        }
292
293        pr_cont("\n");
294    }
295}
296
297void show_regs(struct pt_regs *regs)
298{
299    __show_regs(regs);
300    dump_backtrace(regs, NULL, KERN_DEFAULT);
301}
302
303static void tls_thread_flush(void)
304{
305    write_sysreg(0, tpidr_el0);
306
307    if (is_compat_task()) {
308        current->thread.uw.tp_value = 0;
309
310        /*
311         * We need to ensure ordering between the shadow state and the
312         * hardware state, so that we don't corrupt the hardware state
313         * with a stale shadow state during context switch.
314         */
315        barrier();
316        write_sysreg(0, tpidrro_el0);
317    }
318}
319
320static void flush_tagged_addr_state(void)
321{
322    if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) {
323        clear_thread_flag(TIF_TAGGED_ADDR);
324    }
325}
326
327void flush_thread(void)
328{
329    fpsimd_flush_thread();
330    tls_thread_flush();
331    flush_ptrace_hw_breakpoint(current);
332    flush_tagged_addr_state();
333    flush_mte_state();
334}
335
336void release_thread(struct task_struct *dead_task)
337{
338}
339
340void arch_release_task_struct(struct task_struct *tsk)
341{
342    fpsimd_release_task(tsk);
343}
344
345int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
346{
347    if (current->mm) {
348        fpsimd_preserve_current_state();
349    }
350    *dst = *src;
351
352    /* We rely on the above assignment to initialize dst's thread_flags: */
353    BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
354
355    /*
356     * Detach src's sve_state (if any) from dst so that it does not
357     * get erroneously used or freed prematurely.  dst's sve_state
358     * will be allocated on demand later on if dst uses SVE.
359     * For consistency, also clear TIF_SVE here: this could be done
360     * later in copy_process(), but to avoid tripping up future
361     * maintainers it is best not to leave TIF_SVE and sve_state in
362     * an inconsistent state, even temporarily.
363     */
364    dst->thread.sve_state = NULL;
365    clear_tsk_thread_flag(dst, TIF_SVE);
366
367    /* clear any pending asynchronous tag fault raised by the parent */
368    clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
369
370    return 0;
371}
372
373asmlinkage void ret_from_fork(void) asm("ret_from_fork");
374
375int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p,
376                unsigned long tls)
377{
378    struct pt_regs *childregs = task_pt_regs(p);
379
380    memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
381
382    /*
383     * In case p was allocated the same task_struct pointer as some
384     * other recently-exited task, make sure p is disassociated from
385     * any cpu that may have run that now-exited task recently.
386     * Otherwise we could erroneously skip reloading the FPSIMD
387     * registers for p.
388     */
389    fpsimd_flush_task_state(p);
390
391    ptrauth_thread_init_kernel(p);
392
393    if (likely(!(p->flags & PF_KTHREAD))) {
394        *childregs = *current_pt_regs();
395        childregs->regs[0] = 0;
396
397        /*
398         * Read the current TLS pointer from tpidr_el0 as it may be
399         * out-of-sync with the saved value.
400         */
401        *task_user_tls(p) = read_sysreg(tpidr_el0);
402
403        if (stack_start) {
404            if (is_compat_thread(task_thread_info(p))) {
405                childregs->compat_sp = stack_start;
406            } else {
407                childregs->sp = stack_start;
408            }
409        }
410
411        /*
412         * If a TLS pointer was passed to clone, use it for the new
413         * thread.
414         */
415        if (clone_flags & CLONE_SETTLS) {
416            p->thread.uw.tp_value = tls;
417        }
418    } else {
419        memset(childregs, 0, sizeof(struct pt_regs));
420
421        childregs->pstate = PSR_MODE_EL1h;
422        if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_const_cap(ARM64_HAS_UAO)) {
423            childregs->pstate |= PSR_UAO_BIT;
424        }
425
426        spectre_v4_enable_task_mitigation(p);
427
428        if (system_uses_irq_prio_masking()) {
429            childregs->pmr_save = GIC_PRIO_IRQON;
430        }
431
432        p->thread.cpu_context.x19 = stack_start;
433        p->thread.cpu_context.x20 = stk_sz;
434    }
435    p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
436    p->thread.cpu_context.sp = (unsigned long)childregs;
437
438    ptrace_hw_copy_thread(p);
439
440    return 0;
441}
442
443void tls_preserve_current_state(void)
444{
445    *task_user_tls(current) = read_sysreg(tpidr_el0);
446}
447
448static void tls_thread_switch(struct task_struct *next)
449{
450    tls_preserve_current_state();
451
452    if (is_compat_thread(task_thread_info(next))) {
453        write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
454    } else if (!arm64_kernel_unmapped_at_el0()) {
455        write_sysreg(0, tpidrro_el0);
456    }
457
458    write_sysreg(*task_user_tls(next), tpidr_el0);
459}
460
461/* Restore the UAO state depending on next's addr_limit */
462void uao_thread_switch(struct task_struct *next)
463{
464    if (IS_ENABLED(CONFIG_ARM64_UAO)) {
465        if (task_thread_info(next)->addr_limit == KERNEL_DS) {
466            asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
467        } else {
468            asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
469        }
470    }
471}
472
473/*
474 * Force SSBS state on context-switch, since it may be lost after migrating
475 * from a CPU which treats the bit as RES0 in a heterogeneous system.
476 */
477static void ssbs_thread_switch(struct task_struct *next)
478{
479    /*
480     * Nothing to do for kernel threads, but 'regs' may be junk
481     * (e.g. idle task) so check the flags and bail early.
482     */
483    if (unlikely(next->flags & PF_KTHREAD)) {
484        return;
485    }
486
487    /*
488     * If all CPUs implement the SSBS extension, then we just need to
489     * context-switch the PSTATE field.
490     */
491    if (cpus_have_const_cap(ARM64_SSBS)) {
492        return;
493    }
494
495    spectre_v4_enable_task_mitigation(next);
496}
497
498/*
499 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
500 * shadow copy so that we can restore this upon entry from userspace.
501 *
502 * This is *only* for exception entry from EL0, and is not valid until we
503 * __switch_to() a user task.
504 */
505DEFINE_PER_CPU(struct task_struct *, __entry_task);
506
507static void entry_task_switch(struct task_struct *next)
508{
509    __this_cpu_write(__entry_task, next);
510}
511
512/*
513 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
514 * Assuming the virtual counter is enabled at the beginning of times:
515 *
516 * - disable access when switching from a 64bit task to a 32bit task
517 * - enable access when switching from a 32bit task to a 64bit task
518 */
519static void erratum_1418040_thread_switch(struct task_struct *next)
520{
521    if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
522        !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
523        return;
524
525    if (is_compat_thread(task_thread_info(next)))
526        sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
527    else
528        sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
529    }
530
531static void erratum_1418040_new_exec(void)
532{
533    preempt_disable();
534    erratum_1418040_thread_switch(current);
535    preempt_enable();
536}
537
538/*
539 * Thread switching.
540 */
541__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
542{
543    struct task_struct *last;
544
545    fpsimd_thread_switch(next);
546    tls_thread_switch(next);
547    hw_breakpoint_thread_switch(next);
548    contextidr_thread_switch(next);
549    entry_task_switch(next);
550    uao_thread_switch(next);
551    ssbs_thread_switch(next);
552    erratum_1418040_thread_switch(next);
553
554    /*
555     * Complete any pending TLB or cache maintenance on this CPU in case
556     * the thread migrates to a different CPU.
557     * This full barrier is also required by the membarrier system
558     * call.
559     */
560    dsb(ish);
561
562    /*
563     * MTE thread switching must happen after the DSB above to ensure that
564     * any asynchronous tag check faults have been logged in the TFSR*_EL1
565     * registers.
566     */
567    mte_thread_switch(next);
568
569    /* the actual thread switch */
570    last = cpu_switch_to(prev, next);
571
572    return last;
573}
574
575unsigned long get_wchan(struct task_struct *p)
576{
577    struct stackframe frame;
578    unsigned long stack_page, ret = 0;
579    int count = 0;
580    if (!p || p == current || p->state == TASK_RUNNING) {
581        return 0;
582    }
583
584    stack_page = (unsigned long)try_get_task_stack(p);
585    if (!stack_page) {
586        return 0;
587    }
588
589    start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
590
591    do {
592        if (unwind_frame(p, &frame)) {
593            goto out;
594        }
595        if (!in_sched_functions(frame.pc)) {
596            ret = frame.pc;
597            goto out;
598        }
599    } while (count++ < 0x10);
600
601out:
602    put_task_stack(p);
603    return ret;
604}
605
606unsigned long arch_align_stack(unsigned long sp)
607{
608    if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) {
609        sp -= get_random_int() & ~PAGE_MASK;
610    }
611    return sp & ~0xf;
612}
613
614/*
615 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
616 */
617void arch_setup_new_exec(void)
618{
619    current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
620
621    ptrauth_thread_init_user(current);
622    erratum_1418040_new_exec();
623
624    if (task_spec_ssb_noexec(current)) {
625        arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE);
626    }
627}
628
629#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
630/*
631 * Control the relaxed ABI allowing tagged user addresses into the kernel.
632 */
633static unsigned int tagged_addr_disabled;
634
635long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
636{
637    unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
638    struct thread_info *ti = task_thread_info(task);
639
640    if (is_compat_thread(ti)) {
641        return -EINVAL;
642    }
643
644    if (system_supports_mte()) {
645        valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
646    }
647
648    if (arg & ~valid_mask) {
649        return -EINVAL;
650    }
651
652    /*
653     * Do not allow the enabling of the tagged address ABI if globally
654     * disabled via sysctl abi.tagged_addr_disabled.
655     */
656    if ((arg & PR_TAGGED_ADDR_ENABLE) && tagged_addr_disabled) {
657        return -EINVAL;
658    }
659
660    if (set_mte_ctrl(task, arg) != 0) {
661        return -EINVAL;
662    }
663
664    update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
665
666    return 0;
667}
668
669long get_tagged_addr_ctrl(struct task_struct *task)
670{
671    long ret = 0;
672    struct thread_info *ti = task_thread_info(task);
673
674    if (is_compat_thread(ti)) {
675        return -EINVAL;
676    }
677
678    if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) {
679        ret = PR_TAGGED_ADDR_ENABLE;
680    }
681
682    ret |= get_mte_ctrl(task);
683
684    return ret;
685}
686
687/*
688 * Global sysctl to disable the tagged user addresses support. This control
689 * only prevents the tagged address ABI enabling via prctl() and does not
690 * disable it for tasks that already opted in to the relaxed ABI.
691 */
692
693static struct ctl_table tagged_addr_sysctl_table[] = {
694    {
695        .procname = "tagged_addr_disabled",
696        .mode = 0644,
697        .data = &tagged_addr_disabled,
698        .maxlen = sizeof(int),
699        .proc_handler = proc_dointvec_minmax,
700        .extra1 = SYSCTL_ZERO,
701        .extra2 = SYSCTL_ONE,
702    },
703    {}
704};
705
706static int __init tagged_addr_init(void)
707{
708    if (!register_sysctl("abi", tagged_addr_sysctl_table)) {
709        return -EINVAL;
710    }
711    return 0;
712}
713
714core_initcall(tagged_addr_init);
715#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
716
717asmlinkage void __sched arm64_preempt_schedule_irq(void)
718{
719    lockdep_assert_irqs_disabled();
720
721    /*
722     * Preempting a task from an IRQ means we leave copies of PSTATE
723     * on the stack. cpufeature's enable calls may modify PSTATE, but
724     * resuming one of these preempted tasks would undo those changes.
725     *
726     * Only allow a task to be preempted once cpufeatures have been
727     * enabled.
728     */
729    if (system_capabilities_finalized()) {
730        preempt_schedule_irq();
731    }
732}
733
734#ifdef CONFIG_BINFMT_ELF
735int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, bool has_interp, bool is_interp)
736{
737    /*
738     * For dynamically linked executables the interpreter is
739     * responsible for setting PROT_BTI on everything except
740     * itself.
741     */
742    if (is_interp != has_interp) {
743        return prot;
744    }
745
746    if (!(state->flags & ARM64_ELF_BTI)) {
747        return prot;
748    }
749
750    if (prot & PROT_EXEC) {
751        prot |= PROT_BTI;
752    }
753
754    return prot;
755}
756#endif
757