162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-or-later */
262306a36Sopenharmony_ci#ifndef _ASM_POWERPC_PARAVIRT_H
362306a36Sopenharmony_ci#define _ASM_POWERPC_PARAVIRT_H
462306a36Sopenharmony_ci
562306a36Sopenharmony_ci#include <linux/jump_label.h>
662306a36Sopenharmony_ci#include <asm/smp.h>
762306a36Sopenharmony_ci#ifdef CONFIG_PPC64
862306a36Sopenharmony_ci#include <asm/paca.h>
962306a36Sopenharmony_ci#include <asm/lppaca.h>
1062306a36Sopenharmony_ci#include <asm/hvcall.h>
1162306a36Sopenharmony_ci#endif
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#ifdef CONFIG_PPC_SPLPAR
1462306a36Sopenharmony_ci#include <linux/smp.h>
1562306a36Sopenharmony_ci#include <asm/kvm_guest.h>
1662306a36Sopenharmony_ci#include <asm/cputhreads.h>
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_ciDECLARE_STATIC_KEY_FALSE(shared_processor);
1962306a36Sopenharmony_ci
2062306a36Sopenharmony_cistatic inline bool is_shared_processor(void)
2162306a36Sopenharmony_ci{
2262306a36Sopenharmony_ci	return static_branch_unlikely(&shared_processor);
2362306a36Sopenharmony_ci}
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
2662306a36Sopenharmony_ciextern struct static_key paravirt_steal_enabled;
2762306a36Sopenharmony_ciextern struct static_key paravirt_steal_rq_enabled;
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ciu64 pseries_paravirt_steal_clock(int cpu);
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_cistatic inline u64 paravirt_steal_clock(int cpu)
3262306a36Sopenharmony_ci{
3362306a36Sopenharmony_ci	return pseries_paravirt_steal_clock(cpu);
3462306a36Sopenharmony_ci}
3562306a36Sopenharmony_ci#endif
3662306a36Sopenharmony_ci
3762306a36Sopenharmony_ci/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
3862306a36Sopenharmony_cistatic inline u32 yield_count_of(int cpu)
3962306a36Sopenharmony_ci{
4062306a36Sopenharmony_ci	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
4162306a36Sopenharmony_ci	return be32_to_cpu(yield_count);
4262306a36Sopenharmony_ci}
4362306a36Sopenharmony_ci
4462306a36Sopenharmony_ci/*
4562306a36Sopenharmony_ci * Spinlock code confers and prods, so don't trace the hcalls because the
4662306a36Sopenharmony_ci * tracing code takes spinlocks which can cause recursion deadlocks.
4762306a36Sopenharmony_ci *
4862306a36Sopenharmony_ci * These calls are made while the lock is not held: the lock slowpath yields if
4962306a36Sopenharmony_ci * it can not acquire the lock, and unlock slow path might prod if a waiter has
5062306a36Sopenharmony_ci * yielded). So this may not be a problem for simple spin locks because the
5162306a36Sopenharmony_ci * tracing does not technically recurse on the lock, but we avoid it anyway.
5262306a36Sopenharmony_ci *
5362306a36Sopenharmony_ci * However the queued spin lock contended path is more strictly ordered: the
5462306a36Sopenharmony_ci * H_CONFER hcall is made after the task has queued itself on the lock, so then
5562306a36Sopenharmony_ci * recursing on that lock will cause the task to then queue up again behind the
5662306a36Sopenharmony_ci * first instance (or worse: queued spinlocks use tricks that assume a context
5762306a36Sopenharmony_ci * never waits on more than one spinlock, so such recursion may cause random
5862306a36Sopenharmony_ci * corruption in the lock code).
5962306a36Sopenharmony_ci */
6062306a36Sopenharmony_cistatic inline void yield_to_preempted(int cpu, u32 yield_count)
6162306a36Sopenharmony_ci{
6262306a36Sopenharmony_ci	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
6362306a36Sopenharmony_ci}
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_cistatic inline void prod_cpu(int cpu)
6662306a36Sopenharmony_ci{
6762306a36Sopenharmony_ci	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
6862306a36Sopenharmony_ci}
6962306a36Sopenharmony_ci
7062306a36Sopenharmony_cistatic inline void yield_to_any(void)
7162306a36Sopenharmony_ci{
7262306a36Sopenharmony_ci	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
7362306a36Sopenharmony_ci}
7462306a36Sopenharmony_ci#else
7562306a36Sopenharmony_cistatic inline bool is_shared_processor(void)
7662306a36Sopenharmony_ci{
7762306a36Sopenharmony_ci	return false;
7862306a36Sopenharmony_ci}
7962306a36Sopenharmony_ci
8062306a36Sopenharmony_cistatic inline u32 yield_count_of(int cpu)
8162306a36Sopenharmony_ci{
8262306a36Sopenharmony_ci	return 0;
8362306a36Sopenharmony_ci}
8462306a36Sopenharmony_ci
8562306a36Sopenharmony_ciextern void ___bad_yield_to_preempted(void);
8662306a36Sopenharmony_cistatic inline void yield_to_preempted(int cpu, u32 yield_count)
8762306a36Sopenharmony_ci{
8862306a36Sopenharmony_ci	___bad_yield_to_preempted(); /* This would be a bug */
8962306a36Sopenharmony_ci}
9062306a36Sopenharmony_ci
9162306a36Sopenharmony_ciextern void ___bad_yield_to_any(void);
9262306a36Sopenharmony_cistatic inline void yield_to_any(void)
9362306a36Sopenharmony_ci{
9462306a36Sopenharmony_ci	___bad_yield_to_any(); /* This would be a bug */
9562306a36Sopenharmony_ci}
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ciextern void ___bad_prod_cpu(void);
9862306a36Sopenharmony_cistatic inline void prod_cpu(int cpu)
9962306a36Sopenharmony_ci{
10062306a36Sopenharmony_ci	___bad_prod_cpu(); /* This would be a bug */
10162306a36Sopenharmony_ci}
10262306a36Sopenharmony_ci
10362306a36Sopenharmony_ci#endif
10462306a36Sopenharmony_ci
10562306a36Sopenharmony_ci#define vcpu_is_preempted vcpu_is_preempted
10662306a36Sopenharmony_cistatic inline bool vcpu_is_preempted(int cpu)
10762306a36Sopenharmony_ci{
10862306a36Sopenharmony_ci	/*
10962306a36Sopenharmony_ci	 * The dispatch/yield bit alone is an imperfect indicator of
11062306a36Sopenharmony_ci	 * whether the hypervisor has dispatched @cpu to run on a physical
11162306a36Sopenharmony_ci	 * processor. When it is clear, @cpu is definitely not preempted.
11262306a36Sopenharmony_ci	 * But when it is set, it means only that it *might* be, subject to
11362306a36Sopenharmony_ci	 * other conditions. So we check other properties of the VM and
11462306a36Sopenharmony_ci	 * @cpu first, resorting to the yield count last.
11562306a36Sopenharmony_ci	 */
11662306a36Sopenharmony_ci
11762306a36Sopenharmony_ci	/*
11862306a36Sopenharmony_ci	 * Hypervisor preemption isn't possible in dedicated processor
11962306a36Sopenharmony_ci	 * mode by definition.
12062306a36Sopenharmony_ci	 */
12162306a36Sopenharmony_ci	if (!is_shared_processor())
12262306a36Sopenharmony_ci		return false;
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_ci#ifdef CONFIG_PPC_SPLPAR
12562306a36Sopenharmony_ci	if (!is_kvm_guest()) {
12662306a36Sopenharmony_ci		int first_cpu;
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_ci		/*
12962306a36Sopenharmony_ci		 * The result of vcpu_is_preempted() is used in a
13062306a36Sopenharmony_ci		 * speculative way, and is always subject to invalidation
13162306a36Sopenharmony_ci		 * by events internal and external to Linux. While we can
13262306a36Sopenharmony_ci		 * be called in preemptable context (in the Linux sense),
13362306a36Sopenharmony_ci		 * we're not accessing per-cpu resources in a way that can
13462306a36Sopenharmony_ci		 * race destructively with Linux scheduler preemption and
13562306a36Sopenharmony_ci		 * migration, and callers can tolerate the potential for
13662306a36Sopenharmony_ci		 * error introduced by sampling the CPU index without
13762306a36Sopenharmony_ci		 * pinning the task to it. So it is permissible to use
13862306a36Sopenharmony_ci		 * raw_smp_processor_id() here to defeat the preempt debug
13962306a36Sopenharmony_ci		 * warnings that can arise from using smp_processor_id()
14062306a36Sopenharmony_ci		 * in arbitrary contexts.
14162306a36Sopenharmony_ci		 */
14262306a36Sopenharmony_ci		first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
14362306a36Sopenharmony_ci
14462306a36Sopenharmony_ci		/*
14562306a36Sopenharmony_ci		 * The PowerVM hypervisor dispatches VMs on a whole core
14662306a36Sopenharmony_ci		 * basis. So we know that a thread sibling of the local CPU
14762306a36Sopenharmony_ci		 * cannot have been preempted by the hypervisor, even if it
14862306a36Sopenharmony_ci		 * has called H_CONFER, which will set the yield bit.
14962306a36Sopenharmony_ci		 */
15062306a36Sopenharmony_ci		if (cpu_first_thread_sibling(cpu) == first_cpu)
15162306a36Sopenharmony_ci			return false;
15262306a36Sopenharmony_ci	}
15362306a36Sopenharmony_ci#endif
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	if (yield_count_of(cpu) & 1)
15662306a36Sopenharmony_ci		return true;
15762306a36Sopenharmony_ci	return false;
15862306a36Sopenharmony_ci}
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_cistatic inline bool pv_is_native_spin_unlock(void)
16162306a36Sopenharmony_ci{
16262306a36Sopenharmony_ci	return !is_shared_processor();
16362306a36Sopenharmony_ci}
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_ci#endif /* _ASM_POWERPC_PARAVIRT_H */
166