18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-or-later */
28c2ecf20Sopenharmony_ci#ifndef _ASM_POWERPC_PARAVIRT_H
38c2ecf20Sopenharmony_ci#define _ASM_POWERPC_PARAVIRT_H
48c2ecf20Sopenharmony_ci
58c2ecf20Sopenharmony_ci#include <linux/jump_label.h>
68c2ecf20Sopenharmony_ci#include <asm/smp.h>
78c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64
88c2ecf20Sopenharmony_ci#include <asm/paca.h>
98c2ecf20Sopenharmony_ci#include <asm/lppaca.h>
108c2ecf20Sopenharmony_ci#include <asm/hvcall.h>
118c2ecf20Sopenharmony_ci#endif
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC_SPLPAR
148c2ecf20Sopenharmony_ciDECLARE_STATIC_KEY_FALSE(shared_processor);
158c2ecf20Sopenharmony_ci
168c2ecf20Sopenharmony_cistatic inline bool is_shared_processor(void)
178c2ecf20Sopenharmony_ci{
188c2ecf20Sopenharmony_ci	return static_branch_unlikely(&shared_processor);
198c2ecf20Sopenharmony_ci}
208c2ecf20Sopenharmony_ci
218c2ecf20Sopenharmony_ci/* If bit 0 is set, the cpu has been preempted */
228c2ecf20Sopenharmony_cistatic inline u32 yield_count_of(int cpu)
238c2ecf20Sopenharmony_ci{
248c2ecf20Sopenharmony_ci	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
258c2ecf20Sopenharmony_ci	return be32_to_cpu(yield_count);
268c2ecf20Sopenharmony_ci}
278c2ecf20Sopenharmony_ci
288c2ecf20Sopenharmony_ci/*
298c2ecf20Sopenharmony_ci * Spinlock code confers and prods, so don't trace the hcalls because the
308c2ecf20Sopenharmony_ci * tracing code takes spinlocks which can cause recursion deadlocks.
318c2ecf20Sopenharmony_ci *
328c2ecf20Sopenharmony_ci * These calls are made while the lock is not held: the lock slowpath yields if
338c2ecf20Sopenharmony_ci * it can not acquire the lock, and unlock slow path might prod if a waiter has
348c2ecf20Sopenharmony_ci * yielded). So this may not be a problem for simple spin locks because the
358c2ecf20Sopenharmony_ci * tracing does not technically recurse on the lock, but we avoid it anyway.
368c2ecf20Sopenharmony_ci *
378c2ecf20Sopenharmony_ci * However the queued spin lock contended path is more strictly ordered: the
388c2ecf20Sopenharmony_ci * H_CONFER hcall is made after the task has queued itself on the lock, so then
398c2ecf20Sopenharmony_ci * recursing on that lock will cause the task to then queue up again behind the
408c2ecf20Sopenharmony_ci * first instance (or worse: queued spinlocks use tricks that assume a context
418c2ecf20Sopenharmony_ci * never waits on more than one spinlock, so such recursion may cause random
428c2ecf20Sopenharmony_ci * corruption in the lock code).
438c2ecf20Sopenharmony_ci */
448c2ecf20Sopenharmony_cistatic inline void yield_to_preempted(int cpu, u32 yield_count)
458c2ecf20Sopenharmony_ci{
468c2ecf20Sopenharmony_ci	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
478c2ecf20Sopenharmony_ci}
488c2ecf20Sopenharmony_ci
498c2ecf20Sopenharmony_cistatic inline void prod_cpu(int cpu)
508c2ecf20Sopenharmony_ci{
518c2ecf20Sopenharmony_ci	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
528c2ecf20Sopenharmony_ci}
538c2ecf20Sopenharmony_ci
548c2ecf20Sopenharmony_cistatic inline void yield_to_any(void)
558c2ecf20Sopenharmony_ci{
568c2ecf20Sopenharmony_ci	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
578c2ecf20Sopenharmony_ci}
588c2ecf20Sopenharmony_ci#else
598c2ecf20Sopenharmony_cistatic inline bool is_shared_processor(void)
608c2ecf20Sopenharmony_ci{
618c2ecf20Sopenharmony_ci	return false;
628c2ecf20Sopenharmony_ci}
638c2ecf20Sopenharmony_ci
648c2ecf20Sopenharmony_cistatic inline u32 yield_count_of(int cpu)
658c2ecf20Sopenharmony_ci{
668c2ecf20Sopenharmony_ci	return 0;
678c2ecf20Sopenharmony_ci}
688c2ecf20Sopenharmony_ci
698c2ecf20Sopenharmony_ciextern void ___bad_yield_to_preempted(void);
708c2ecf20Sopenharmony_cistatic inline void yield_to_preempted(int cpu, u32 yield_count)
718c2ecf20Sopenharmony_ci{
728c2ecf20Sopenharmony_ci	___bad_yield_to_preempted(); /* This would be a bug */
738c2ecf20Sopenharmony_ci}
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_ciextern void ___bad_yield_to_any(void);
768c2ecf20Sopenharmony_cistatic inline void yield_to_any(void)
778c2ecf20Sopenharmony_ci{
788c2ecf20Sopenharmony_ci	___bad_yield_to_any(); /* This would be a bug */
798c2ecf20Sopenharmony_ci}
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ciextern void ___bad_prod_cpu(void);
828c2ecf20Sopenharmony_cistatic inline void prod_cpu(int cpu)
838c2ecf20Sopenharmony_ci{
848c2ecf20Sopenharmony_ci	___bad_prod_cpu(); /* This would be a bug */
858c2ecf20Sopenharmony_ci}
868c2ecf20Sopenharmony_ci
878c2ecf20Sopenharmony_ci#endif
888c2ecf20Sopenharmony_ci
898c2ecf20Sopenharmony_ci#define vcpu_is_preempted vcpu_is_preempted
908c2ecf20Sopenharmony_cistatic inline bool vcpu_is_preempted(int cpu)
918c2ecf20Sopenharmony_ci{
928c2ecf20Sopenharmony_ci	if (!is_shared_processor())
938c2ecf20Sopenharmony_ci		return false;
948c2ecf20Sopenharmony_ci	if (yield_count_of(cpu) & 1)
958c2ecf20Sopenharmony_ci		return true;
968c2ecf20Sopenharmony_ci	return false;
978c2ecf20Sopenharmony_ci}
988c2ecf20Sopenharmony_ci
998c2ecf20Sopenharmony_cistatic inline bool pv_is_native_spin_unlock(void)
1008c2ecf20Sopenharmony_ci{
1018c2ecf20Sopenharmony_ci	return !is_shared_processor();
1028c2ecf20Sopenharmony_ci}
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_ci#endif /* _ASM_POWERPC_PARAVIRT_H */
105