162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * VMID allocator. 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Based on Arm64 ASID allocator algorithm. 662306a36Sopenharmony_ci * Please refer arch/arm64/mm/context.c for detailed 762306a36Sopenharmony_ci * comments on algorithm. 862306a36Sopenharmony_ci * 962306a36Sopenharmony_ci * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 1062306a36Sopenharmony_ci * Copyright (C) 2012 ARM Ltd. 1162306a36Sopenharmony_ci */ 1262306a36Sopenharmony_ci 1362306a36Sopenharmony_ci#include <linux/bitfield.h> 1462306a36Sopenharmony_ci#include <linux/bitops.h> 1562306a36Sopenharmony_ci 1662306a36Sopenharmony_ci#include <asm/kvm_asm.h> 1762306a36Sopenharmony_ci#include <asm/kvm_mmu.h> 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_ciunsigned int __ro_after_init kvm_arm_vmid_bits; 2062306a36Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_vmid_lock); 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_cistatic atomic64_t vmid_generation; 2362306a36Sopenharmony_cistatic unsigned long *vmid_map; 2462306a36Sopenharmony_ci 2562306a36Sopenharmony_cistatic DEFINE_PER_CPU(atomic64_t, active_vmids); 2662306a36Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_vmids); 2762306a36Sopenharmony_ci 2862306a36Sopenharmony_ci#define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0)) 2962306a36Sopenharmony_ci#define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits) 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci#define NUM_USER_VMIDS VMID_FIRST_VERSION 3262306a36Sopenharmony_ci#define vmid2idx(vmid) ((vmid) & ~VMID_MASK) 3362306a36Sopenharmony_ci#define idx2vmid(idx) vmid2idx(idx) 3462306a36Sopenharmony_ci 3562306a36Sopenharmony_ci/* 3662306a36Sopenharmony_ci * As vmid #0 is always reserved, we will never allocate one 3762306a36Sopenharmony_ci * as below and can be treated as invalid. This is used to 3862306a36Sopenharmony_ci * set the active_vmids on vCPU schedule out. 3962306a36Sopenharmony_ci */ 4062306a36Sopenharmony_ci#define VMID_ACTIVE_INVALID VMID_FIRST_VERSION 4162306a36Sopenharmony_ci 4262306a36Sopenharmony_ci#define vmid_gen_match(vmid) \ 4362306a36Sopenharmony_ci (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits)) 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_cistatic void flush_context(void) 4662306a36Sopenharmony_ci{ 4762306a36Sopenharmony_ci int cpu; 4862306a36Sopenharmony_ci u64 vmid; 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci bitmap_zero(vmid_map, NUM_USER_VMIDS); 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 5362306a36Sopenharmony_ci vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci /* Preserve reserved VMID */ 5662306a36Sopenharmony_ci if (vmid == 0) 5762306a36Sopenharmony_ci vmid = per_cpu(reserved_vmids, cpu); 5862306a36Sopenharmony_ci __set_bit(vmid2idx(vmid), vmid_map); 5962306a36Sopenharmony_ci per_cpu(reserved_vmids, cpu) = vmid; 6062306a36Sopenharmony_ci } 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_ci /* 6362306a36Sopenharmony_ci * Unlike ASID allocator, we expect less frequent rollover in 6462306a36Sopenharmony_ci * case of VMIDs. Hence, instead of marking the CPU as 6562306a36Sopenharmony_ci * flush_pending and issuing a local context invalidation on 6662306a36Sopenharmony_ci * the next context-switch, we broadcast TLB flush + I-cache 6762306a36Sopenharmony_ci * invalidation over the inner shareable domain on rollover. 6862306a36Sopenharmony_ci */ 6962306a36Sopenharmony_ci kvm_call_hyp(__kvm_flush_vm_context); 7062306a36Sopenharmony_ci} 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_cistatic bool check_update_reserved_vmid(u64 vmid, u64 newvmid) 7362306a36Sopenharmony_ci{ 7462306a36Sopenharmony_ci int cpu; 7562306a36Sopenharmony_ci bool hit = false; 7662306a36Sopenharmony_ci 7762306a36Sopenharmony_ci /* 7862306a36Sopenharmony_ci * Iterate over the set of reserved VMIDs looking for a match 7962306a36Sopenharmony_ci * and update to use newvmid (i.e. the same VMID in the current 8062306a36Sopenharmony_ci * generation). 8162306a36Sopenharmony_ci */ 8262306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 8362306a36Sopenharmony_ci if (per_cpu(reserved_vmids, cpu) == vmid) { 8462306a36Sopenharmony_ci hit = true; 8562306a36Sopenharmony_ci per_cpu(reserved_vmids, cpu) = newvmid; 8662306a36Sopenharmony_ci } 8762306a36Sopenharmony_ci } 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci return hit; 9062306a36Sopenharmony_ci} 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_cistatic u64 new_vmid(struct kvm_vmid *kvm_vmid) 9362306a36Sopenharmony_ci{ 9462306a36Sopenharmony_ci static u32 cur_idx = 1; 9562306a36Sopenharmony_ci u64 vmid = atomic64_read(&kvm_vmid->id); 9662306a36Sopenharmony_ci u64 generation = atomic64_read(&vmid_generation); 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci if (vmid != 0) { 9962306a36Sopenharmony_ci u64 newvmid = generation | (vmid & ~VMID_MASK); 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_ci if (check_update_reserved_vmid(vmid, newvmid)) { 10262306a36Sopenharmony_ci atomic64_set(&kvm_vmid->id, newvmid); 10362306a36Sopenharmony_ci return newvmid; 10462306a36Sopenharmony_ci } 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) { 10762306a36Sopenharmony_ci atomic64_set(&kvm_vmid->id, newvmid); 10862306a36Sopenharmony_ci return newvmid; 10962306a36Sopenharmony_ci } 11062306a36Sopenharmony_ci } 11162306a36Sopenharmony_ci 11262306a36Sopenharmony_ci vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx); 11362306a36Sopenharmony_ci if (vmid != NUM_USER_VMIDS) 11462306a36Sopenharmony_ci goto set_vmid; 11562306a36Sopenharmony_ci 11662306a36Sopenharmony_ci /* We're out of VMIDs, so increment the global generation count */ 11762306a36Sopenharmony_ci generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION, 11862306a36Sopenharmony_ci &vmid_generation); 11962306a36Sopenharmony_ci flush_context(); 12062306a36Sopenharmony_ci 12162306a36Sopenharmony_ci /* We have more VMIDs than CPUs, so this will always succeed */ 12262306a36Sopenharmony_ci vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1); 12362306a36Sopenharmony_ci 12462306a36Sopenharmony_ciset_vmid: 12562306a36Sopenharmony_ci __set_bit(vmid, vmid_map); 12662306a36Sopenharmony_ci cur_idx = vmid; 12762306a36Sopenharmony_ci vmid = idx2vmid(vmid) | generation; 12862306a36Sopenharmony_ci atomic64_set(&kvm_vmid->id, vmid); 12962306a36Sopenharmony_ci return vmid; 13062306a36Sopenharmony_ci} 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_ci/* Called from vCPU sched out with preemption disabled */ 13362306a36Sopenharmony_civoid kvm_arm_vmid_clear_active(void) 13462306a36Sopenharmony_ci{ 13562306a36Sopenharmony_ci atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID); 13662306a36Sopenharmony_ci} 13762306a36Sopenharmony_ci 13862306a36Sopenharmony_civoid kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) 13962306a36Sopenharmony_ci{ 14062306a36Sopenharmony_ci unsigned long flags; 14162306a36Sopenharmony_ci u64 vmid, old_active_vmid; 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci vmid = atomic64_read(&kvm_vmid->id); 14462306a36Sopenharmony_ci 14562306a36Sopenharmony_ci /* 14662306a36Sopenharmony_ci * Please refer comments in check_and_switch_context() in 14762306a36Sopenharmony_ci * arch/arm64/mm/context.c. 14862306a36Sopenharmony_ci * 14962306a36Sopenharmony_ci * Unlike ASID allocator, we set the active_vmids to 15062306a36Sopenharmony_ci * VMID_ACTIVE_INVALID on vCPU schedule out to avoid 15162306a36Sopenharmony_ci * reserving the VMID space needlessly on rollover. 15262306a36Sopenharmony_ci * Hence explicitly check here for a "!= 0" to 15362306a36Sopenharmony_ci * handle the sync with a concurrent rollover. 15462306a36Sopenharmony_ci */ 15562306a36Sopenharmony_ci old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids)); 15662306a36Sopenharmony_ci if (old_active_vmid != 0 && vmid_gen_match(vmid) && 15762306a36Sopenharmony_ci 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), 15862306a36Sopenharmony_ci old_active_vmid, vmid)) 15962306a36Sopenharmony_ci return; 16062306a36Sopenharmony_ci 16162306a36Sopenharmony_ci raw_spin_lock_irqsave(&cpu_vmid_lock, flags); 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ci /* Check that our VMID belongs to the current generation. */ 16462306a36Sopenharmony_ci vmid = atomic64_read(&kvm_vmid->id); 16562306a36Sopenharmony_ci if (!vmid_gen_match(vmid)) 16662306a36Sopenharmony_ci vmid = new_vmid(kvm_vmid); 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci atomic64_set(this_cpu_ptr(&active_vmids), vmid); 16962306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); 17062306a36Sopenharmony_ci} 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_ci/* 17362306a36Sopenharmony_ci * Initialize the VMID allocator 17462306a36Sopenharmony_ci */ 17562306a36Sopenharmony_ciint __init kvm_arm_vmid_alloc_init(void) 17662306a36Sopenharmony_ci{ 17762306a36Sopenharmony_ci kvm_arm_vmid_bits = kvm_get_vmid_bits(); 17862306a36Sopenharmony_ci 17962306a36Sopenharmony_ci /* 18062306a36Sopenharmony_ci * Expect allocation after rollover to fail if we don't have 18162306a36Sopenharmony_ci * at least one more VMID than CPUs. VMID #0 is always reserved. 18262306a36Sopenharmony_ci */ 18362306a36Sopenharmony_ci WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus()); 18462306a36Sopenharmony_ci atomic64_set(&vmid_generation, VMID_FIRST_VERSION); 18562306a36Sopenharmony_ci vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL); 18662306a36Sopenharmony_ci if (!vmid_map) 18762306a36Sopenharmony_ci return -ENOMEM; 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_ci return 0; 19062306a36Sopenharmony_ci} 19162306a36Sopenharmony_ci 19262306a36Sopenharmony_civoid __init kvm_arm_vmid_alloc_free(void) 19362306a36Sopenharmony_ci{ 19462306a36Sopenharmony_ci bitmap_free(vmid_map); 19562306a36Sopenharmony_ci} 196