162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci#define _GNU_SOURCE /* for program_invocation_short_name */
362306a36Sopenharmony_ci#include <errno.h>
462306a36Sopenharmony_ci#include <fcntl.h>
562306a36Sopenharmony_ci#include <pthread.h>
662306a36Sopenharmony_ci#include <sched.h>
762306a36Sopenharmony_ci#include <stdio.h>
862306a36Sopenharmony_ci#include <stdlib.h>
962306a36Sopenharmony_ci#include <string.h>
1062306a36Sopenharmony_ci#include <signal.h>
1162306a36Sopenharmony_ci#include <syscall.h>
1262306a36Sopenharmony_ci#include <sys/ioctl.h>
1362306a36Sopenharmony_ci#include <sys/sysinfo.h>
1462306a36Sopenharmony_ci#include <asm/barrier.h>
1562306a36Sopenharmony_ci#include <linux/atomic.h>
1662306a36Sopenharmony_ci#include <linux/rseq.h>
1762306a36Sopenharmony_ci#include <linux/unistd.h>
1862306a36Sopenharmony_ci
1962306a36Sopenharmony_ci#include "kvm_util.h"
2062306a36Sopenharmony_ci#include "processor.h"
2162306a36Sopenharmony_ci#include "test_util.h"
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_ci#include "../rseq/rseq.c"
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci/*
2662306a36Sopenharmony_ci * Any bug related to task migration is likely to be timing-dependent; perform
2762306a36Sopenharmony_ci * a large number of migrations to reduce the odds of a false negative.
2862306a36Sopenharmony_ci */
2962306a36Sopenharmony_ci#define NR_TASK_MIGRATIONS 100000
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_cistatic pthread_t migration_thread;
3262306a36Sopenharmony_cistatic cpu_set_t possible_mask;
3362306a36Sopenharmony_cistatic int min_cpu, max_cpu;
3462306a36Sopenharmony_cistatic bool done;
3562306a36Sopenharmony_ci
3662306a36Sopenharmony_cistatic atomic_t seq_cnt;
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_cistatic void guest_code(void)
3962306a36Sopenharmony_ci{
4062306a36Sopenharmony_ci	for (;;)
4162306a36Sopenharmony_ci		GUEST_SYNC(0);
4262306a36Sopenharmony_ci}
4362306a36Sopenharmony_ci
4462306a36Sopenharmony_cistatic int next_cpu(int cpu)
4562306a36Sopenharmony_ci{
4662306a36Sopenharmony_ci	/*
4762306a36Sopenharmony_ci	 * Advance to the next CPU, skipping those that weren't in the original
4862306a36Sopenharmony_ci	 * affinity set.  Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
4962306a36Sopenharmony_ci	 * data storage is considered as opaque.  Note, if this task is pinned
5062306a36Sopenharmony_ci	 * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
5162306a36Sopenharmony_ci	 * burn a lot cycles and the test will take longer than normal to
5262306a36Sopenharmony_ci	 * complete.
5362306a36Sopenharmony_ci	 */
5462306a36Sopenharmony_ci	do {
5562306a36Sopenharmony_ci		cpu++;
5662306a36Sopenharmony_ci		if (cpu > max_cpu) {
5762306a36Sopenharmony_ci			cpu = min_cpu;
5862306a36Sopenharmony_ci			TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
5962306a36Sopenharmony_ci				    "Min CPU = %d must always be usable", cpu);
6062306a36Sopenharmony_ci			break;
6162306a36Sopenharmony_ci		}
6262306a36Sopenharmony_ci	} while (!CPU_ISSET(cpu, &possible_mask));
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci	return cpu;
6562306a36Sopenharmony_ci}
6662306a36Sopenharmony_ci
6762306a36Sopenharmony_cistatic void *migration_worker(void *__rseq_tid)
6862306a36Sopenharmony_ci{
6962306a36Sopenharmony_ci	pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
7062306a36Sopenharmony_ci	cpu_set_t allowed_mask;
7162306a36Sopenharmony_ci	int r, i, cpu;
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_ci	CPU_ZERO(&allowed_mask);
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_ci	for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
7662306a36Sopenharmony_ci		CPU_SET(cpu, &allowed_mask);
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci		/*
7962306a36Sopenharmony_ci		 * Bump the sequence count twice to allow the reader to detect
8062306a36Sopenharmony_ci		 * that a migration may have occurred in between rseq and sched
8162306a36Sopenharmony_ci		 * CPU ID reads.  An odd sequence count indicates a migration
8262306a36Sopenharmony_ci		 * is in-progress, while a completely different count indicates
8362306a36Sopenharmony_ci		 * a migration occurred since the count was last read.
8462306a36Sopenharmony_ci		 */
8562306a36Sopenharmony_ci		atomic_inc(&seq_cnt);
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ci		/*
8862306a36Sopenharmony_ci		 * Ensure the odd count is visible while getcpu() isn't
8962306a36Sopenharmony_ci		 * stable, i.e. while changing affinity is in-progress.
9062306a36Sopenharmony_ci		 */
9162306a36Sopenharmony_ci		smp_wmb();
9262306a36Sopenharmony_ci		r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
9362306a36Sopenharmony_ci		TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
9462306a36Sopenharmony_ci			    errno, strerror(errno));
9562306a36Sopenharmony_ci		smp_wmb();
9662306a36Sopenharmony_ci		atomic_inc(&seq_cnt);
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci		CPU_CLR(cpu, &allowed_mask);
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ci		/*
10162306a36Sopenharmony_ci		 * Wait 1-10us before proceeding to the next iteration and more
10262306a36Sopenharmony_ci		 * specifically, before bumping seq_cnt again.  A delay is
10362306a36Sopenharmony_ci		 * needed on three fronts:
10462306a36Sopenharmony_ci		 *
10562306a36Sopenharmony_ci		 *  1. To allow sched_setaffinity() to prompt migration before
10662306a36Sopenharmony_ci		 *     ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME
10762306a36Sopenharmony_ci		 *     (or TIF_NEED_RESCHED, which indirectly leads to handling
10862306a36Sopenharmony_ci		 *     NOTIFY_RESUME) is handled in KVM context.
10962306a36Sopenharmony_ci		 *
11062306a36Sopenharmony_ci		 *     If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters
11162306a36Sopenharmony_ci		 *     the guest, the guest will trigger a IO/MMIO exit all the
11262306a36Sopenharmony_ci		 *     way to userspace and the TIF flags will be handled by
11362306a36Sopenharmony_ci		 *     the generic "exit to userspace" logic, not by KVM.  The
11462306a36Sopenharmony_ci		 *     exit to userspace is necessary to give the test a chance
11562306a36Sopenharmony_ci		 *     to check the rseq CPU ID (see #2).
11662306a36Sopenharmony_ci		 *
11762306a36Sopenharmony_ci		 *     Alternatively, guest_code() could include an instruction
11862306a36Sopenharmony_ci		 *     to trigger an exit that is handled by KVM, but any such
11962306a36Sopenharmony_ci		 *     exit requires architecture specific code.
12062306a36Sopenharmony_ci		 *
12162306a36Sopenharmony_ci		 *  2. To let ioctl(KVM_RUN) make its way back to the test
12262306a36Sopenharmony_ci		 *     before the next round of migration.  The test's check on
12362306a36Sopenharmony_ci		 *     the rseq CPU ID must wait for migration to complete in
12462306a36Sopenharmony_ci		 *     order to avoid false positive, thus any kernel rseq bug
12562306a36Sopenharmony_ci		 *     will be missed if the next migration starts before the
12662306a36Sopenharmony_ci		 *     check completes.
12762306a36Sopenharmony_ci		 *
12862306a36Sopenharmony_ci		 *  3. To ensure the read-side makes efficient forward progress,
12962306a36Sopenharmony_ci		 *     e.g. if getcpu() involves a syscall. Stalling the read-side
13062306a36Sopenharmony_ci		 *     means the test will spend more time waiting for getcpu()
13162306a36Sopenharmony_ci		 *     to stabilize and less time trying to hit the timing-dependent
13262306a36Sopenharmony_ci		 *     bug.
13362306a36Sopenharmony_ci		 *
13462306a36Sopenharmony_ci		 * Because any bug in this area is likely to be timing-dependent,
13562306a36Sopenharmony_ci		 * run with a range of delays at 1us intervals from 1us to 10us
13662306a36Sopenharmony_ci		 * as a best effort to avoid tuning the test to the point where
13762306a36Sopenharmony_ci		 * it can hit _only_ the original bug and not detect future
13862306a36Sopenharmony_ci		 * regressions.
13962306a36Sopenharmony_ci		 *
14062306a36Sopenharmony_ci		 * The original bug can reproduce with a delay up to ~500us on
14162306a36Sopenharmony_ci		 * x86-64, but starts to require more iterations to reproduce
14262306a36Sopenharmony_ci		 * as the delay creeps above ~10us, and the average runtime of
14362306a36Sopenharmony_ci		 * each iteration obviously increases as well.  Cap the delay
14462306a36Sopenharmony_ci		 * at 10us to keep test runtime reasonable while minimizing
14562306a36Sopenharmony_ci		 * potential coverage loss.
14662306a36Sopenharmony_ci		 *
14762306a36Sopenharmony_ci		 * The lower bound for reproducing the bug is likely below 1us,
14862306a36Sopenharmony_ci		 * e.g. failures occur on x86-64 with nanosleep(0), but at that
14962306a36Sopenharmony_ci		 * point the overhead of the syscall likely dominates the delay.
15062306a36Sopenharmony_ci		 * Use usleep() for simplicity and to avoid unnecessary kernel
15162306a36Sopenharmony_ci		 * dependencies.
15262306a36Sopenharmony_ci		 */
15362306a36Sopenharmony_ci		usleep((i % 10) + 1);
15462306a36Sopenharmony_ci	}
15562306a36Sopenharmony_ci	done = true;
15662306a36Sopenharmony_ci	return NULL;
15762306a36Sopenharmony_ci}
15862306a36Sopenharmony_ci
15962306a36Sopenharmony_cistatic void calc_min_max_cpu(void)
16062306a36Sopenharmony_ci{
16162306a36Sopenharmony_ci	int i, cnt, nproc;
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci	TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_ci	/*
16662306a36Sopenharmony_ci	 * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
16762306a36Sopenharmony_ci	 * this task is affined to in order to reduce the time spent querying
16862306a36Sopenharmony_ci	 * unusable CPUs, e.g. if this task is pinned to a small percentage of
16962306a36Sopenharmony_ci	 * total CPUs.
17062306a36Sopenharmony_ci	 */
17162306a36Sopenharmony_ci	nproc = get_nprocs_conf();
17262306a36Sopenharmony_ci	min_cpu = -1;
17362306a36Sopenharmony_ci	max_cpu = -1;
17462306a36Sopenharmony_ci	cnt = 0;
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_ci	for (i = 0; i < nproc; i++) {
17762306a36Sopenharmony_ci		if (!CPU_ISSET(i, &possible_mask))
17862306a36Sopenharmony_ci			continue;
17962306a36Sopenharmony_ci		if (min_cpu == -1)
18062306a36Sopenharmony_ci			min_cpu = i;
18162306a36Sopenharmony_ci		max_cpu = i;
18262306a36Sopenharmony_ci		cnt++;
18362306a36Sopenharmony_ci	}
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_ci	__TEST_REQUIRE(cnt >= 2,
18662306a36Sopenharmony_ci		       "Only one usable CPU, task migration not possible");
18762306a36Sopenharmony_ci}
18862306a36Sopenharmony_ci
18962306a36Sopenharmony_ciint main(int argc, char *argv[])
19062306a36Sopenharmony_ci{
19162306a36Sopenharmony_ci	int r, i, snapshot;
19262306a36Sopenharmony_ci	struct kvm_vm *vm;
19362306a36Sopenharmony_ci	struct kvm_vcpu *vcpu;
19462306a36Sopenharmony_ci	u32 cpu, rseq_cpu;
19562306a36Sopenharmony_ci
19662306a36Sopenharmony_ci	r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
19762306a36Sopenharmony_ci	TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
19862306a36Sopenharmony_ci		    strerror(errno));
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci	calc_min_max_cpu();
20162306a36Sopenharmony_ci
20262306a36Sopenharmony_ci	r = rseq_register_current_thread();
20362306a36Sopenharmony_ci	TEST_ASSERT(!r, "rseq_register_current_thread failed, errno = %d (%s)",
20462306a36Sopenharmony_ci		    errno, strerror(errno));
20562306a36Sopenharmony_ci
20662306a36Sopenharmony_ci	/*
20762306a36Sopenharmony_ci	 * Create and run a dummy VM that immediately exits to userspace via
20862306a36Sopenharmony_ci	 * GUEST_SYNC, while concurrently migrating the process by setting its
20962306a36Sopenharmony_ci	 * CPU affinity.
21062306a36Sopenharmony_ci	 */
21162306a36Sopenharmony_ci	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
21262306a36Sopenharmony_ci
21362306a36Sopenharmony_ci	pthread_create(&migration_thread, NULL, migration_worker,
21462306a36Sopenharmony_ci		       (void *)(unsigned long)syscall(SYS_gettid));
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci	for (i = 0; !done; i++) {
21762306a36Sopenharmony_ci		vcpu_run(vcpu);
21862306a36Sopenharmony_ci		TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
21962306a36Sopenharmony_ci			    "Guest failed?");
22062306a36Sopenharmony_ci
22162306a36Sopenharmony_ci		/*
22262306a36Sopenharmony_ci		 * Verify rseq's CPU matches sched's CPU.  Ensure migration
22362306a36Sopenharmony_ci		 * doesn't occur between getcpu() and reading the rseq cpu_id
22462306a36Sopenharmony_ci		 * by rereading both if the sequence count changes, or if the
22562306a36Sopenharmony_ci		 * count is odd (migration in-progress).
22662306a36Sopenharmony_ci		 */
22762306a36Sopenharmony_ci		do {
22862306a36Sopenharmony_ci			/*
22962306a36Sopenharmony_ci			 * Drop bit 0 to force a mismatch if the count is odd,
23062306a36Sopenharmony_ci			 * i.e. if a migration is in-progress.
23162306a36Sopenharmony_ci			 */
23262306a36Sopenharmony_ci			snapshot = atomic_read(&seq_cnt) & ~1;
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci			/*
23562306a36Sopenharmony_ci			 * Ensure calling getcpu() and reading rseq.cpu_id complete
23662306a36Sopenharmony_ci			 * in a single "no migration" window, i.e. are not reordered
23762306a36Sopenharmony_ci			 * across the seq_cnt reads.
23862306a36Sopenharmony_ci			 */
23962306a36Sopenharmony_ci			smp_rmb();
24062306a36Sopenharmony_ci			r = sys_getcpu(&cpu, NULL);
24162306a36Sopenharmony_ci			TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)",
24262306a36Sopenharmony_ci				    errno, strerror(errno));
24362306a36Sopenharmony_ci			rseq_cpu = rseq_current_cpu_raw();
24462306a36Sopenharmony_ci			smp_rmb();
24562306a36Sopenharmony_ci		} while (snapshot != atomic_read(&seq_cnt));
24662306a36Sopenharmony_ci
24762306a36Sopenharmony_ci		TEST_ASSERT(rseq_cpu == cpu,
24862306a36Sopenharmony_ci			    "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
24962306a36Sopenharmony_ci	}
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci	/*
25262306a36Sopenharmony_ci	 * Sanity check that the test was able to enter the guest a reasonable
25362306a36Sopenharmony_ci	 * number of times, e.g. didn't get stalled too often/long waiting for
25462306a36Sopenharmony_ci	 * getcpu() to stabilize.  A 2:1 migration:KVM_RUN ratio is a fairly
25562306a36Sopenharmony_ci	 * conservative ratio on x86-64, which can do _more_ KVM_RUNs than
25662306a36Sopenharmony_ci	 * migrations given the 1us+ delay in the migration task.
25762306a36Sopenharmony_ci	 */
25862306a36Sopenharmony_ci	TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
25962306a36Sopenharmony_ci		    "Only performed %d KVM_RUNs, task stalled too much?\n", i);
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_ci	pthread_join(migration_thread, NULL);
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_ci	kvm_vm_free(vm);
26462306a36Sopenharmony_ci
26562306a36Sopenharmony_ci	rseq_unregister_current_thread();
26662306a36Sopenharmony_ci
26762306a36Sopenharmony_ci	return 0;
26862306a36Sopenharmony_ci}
269