Lines Matching refs:vmid
32 #define vmid2idx(vmid) ((vmid) & ~VMID_MASK)
36 * As vmid #0 is always reserved, we will never allocate one
42 #define vmid_gen_match(vmid) \
43 (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
48 u64 vmid;
53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
56 if (vmid == 0)
57 vmid = per_cpu(reserved_vmids, cpu);
58 __set_bit(vmid2idx(vmid), vmid_map);
59 per_cpu(reserved_vmids, cpu) = vmid;
72 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
83 if (per_cpu(reserved_vmids, cpu) == vmid) {
95 u64 vmid = atomic64_read(&kvm_vmid->id);
98 if (vmid != 0) {
99 u64 newvmid = generation | (vmid & ~VMID_MASK);
101 if (check_update_reserved_vmid(vmid, newvmid)) {
106 if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
112 vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
113 if (vmid != NUM_USER_VMIDS)
122 vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
125 __set_bit(vmid, vmid_map);
126 cur_idx = vmid;
127 vmid = idx2vmid(vmid) | generation;
128 atomic64_set(&kvm_vmid->id, vmid);
129 return vmid;
141 u64 vmid, old_active_vmid;
143 vmid = atomic64_read(&kvm_vmid->id);
156 if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
158 old_active_vmid, vmid))
164 vmid = atomic64_read(&kvm_vmid->id);
165 if (!vmid_gen_match(vmid))
166 vmid = new_vmid(kvm_vmid);
168 atomic64_set(this_cpu_ptr(&active_vmids), vmid);