162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Copyright (C) IBM Corporation, 2011
562306a36Sopenharmony_ci *
662306a36Sopenharmony_ci * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
762306a36Sopenharmony_ci *          Anton Blanchard <anton@au.ibm.com>
862306a36Sopenharmony_ci */
962306a36Sopenharmony_ci#include <linux/uaccess.h>
1062306a36Sopenharmony_ci#include <linux/hardirq.h>
1162306a36Sopenharmony_ci#include <asm/switch_to.h>
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ciint enter_vmx_usercopy(void)
1462306a36Sopenharmony_ci{
1562306a36Sopenharmony_ci	if (in_interrupt())
1662306a36Sopenharmony_ci		return 0;
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_ci	preempt_disable();
1962306a36Sopenharmony_ci	/*
2062306a36Sopenharmony_ci	 * We need to disable page faults as they can call schedule and
2162306a36Sopenharmony_ci	 * thus make us lose the VMX context. So on page faults, we just
2262306a36Sopenharmony_ci	 * fail which will cause a fallback to the normal non-vmx copy.
2362306a36Sopenharmony_ci	 */
2462306a36Sopenharmony_ci	pagefault_disable();
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci	enable_kernel_altivec();
2762306a36Sopenharmony_ci
2862306a36Sopenharmony_ci	return 1;
2962306a36Sopenharmony_ci}
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_ci/*
3262306a36Sopenharmony_ci * This function must return 0 because we tail call optimise when calling
3362306a36Sopenharmony_ci * from __copy_tofrom_user_power7 which returns 0 on success.
3462306a36Sopenharmony_ci */
3562306a36Sopenharmony_ciint exit_vmx_usercopy(void)
3662306a36Sopenharmony_ci{
3762306a36Sopenharmony_ci	disable_kernel_altivec();
3862306a36Sopenharmony_ci	pagefault_enable();
3962306a36Sopenharmony_ci	preempt_enable_no_resched();
4062306a36Sopenharmony_ci	/*
4162306a36Sopenharmony_ci	 * Must never explicitly call schedule (including preempt_enable())
4262306a36Sopenharmony_ci	 * while in a kuap-unlocked user copy, because the AMR register will
4362306a36Sopenharmony_ci	 * not be saved and restored across context switch. However preempt
4462306a36Sopenharmony_ci	 * kernels need to be preempted as soon as possible if need_resched is
4562306a36Sopenharmony_ci	 * set and we are preemptible. The hack here is to schedule a
4662306a36Sopenharmony_ci	 * decrementer to fire here and reschedule for us if necessary.
4762306a36Sopenharmony_ci	 */
4862306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
4962306a36Sopenharmony_ci		set_dec(1);
5062306a36Sopenharmony_ci	return 0;
5162306a36Sopenharmony_ci}
5262306a36Sopenharmony_ci
5362306a36Sopenharmony_ciint enter_vmx_ops(void)
5462306a36Sopenharmony_ci{
5562306a36Sopenharmony_ci	if (in_interrupt())
5662306a36Sopenharmony_ci		return 0;
5762306a36Sopenharmony_ci
5862306a36Sopenharmony_ci	preempt_disable();
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_ci	enable_kernel_altivec();
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_ci	return 1;
6362306a36Sopenharmony_ci}
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_ci/*
6662306a36Sopenharmony_ci * All calls to this function will be optimised into tail calls. We are
6762306a36Sopenharmony_ci * passed a pointer to the destination which we return as required by a
6862306a36Sopenharmony_ci * memcpy implementation.
6962306a36Sopenharmony_ci */
7062306a36Sopenharmony_civoid *exit_vmx_ops(void *dest)
7162306a36Sopenharmony_ci{
7262306a36Sopenharmony_ci	disable_kernel_altivec();
7362306a36Sopenharmony_ci	preempt_enable();
7462306a36Sopenharmony_ci	return dest;
7562306a36Sopenharmony_ci}
76