Lines Matching refs:best
76 struct kvm_cpuid_entry2 *best;
82 best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
83 if (best) {
84 int vaddr_bits = (best->eax & 0xff00) >> 8;
95 struct kvm_cpuid_entry2 *best;
97 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
103 if (best)
104 vcpu->arch.pv_cpuid.features = best->eax;
109 struct kvm_cpuid_entry2 *best;
111 best = kvm_find_cpuid_entry(vcpu, 1, 0);
112 if (best) {
115 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
118 cpuid_entry_change(best, X86_FEATURE_APIC,
122 best = kvm_find_cpuid_entry(vcpu, 7, 0);
123 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
124 cpuid_entry_change(best, X86_FEATURE_OSPKE,
127 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
128 if (best)
129 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
131 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
132 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
133 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
134 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
136 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
137 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
138 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
139 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
142 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
143 if (best)
144 cpuid_entry_change(best, X86_FEATURE_MWAIT,
153 struct kvm_cpuid_entry2 *best;
155 best = kvm_find_cpuid_entry(vcpu, 1, 0);
156 if (best && apic) {
157 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
165 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
166 if (!best)
170 (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
213 struct kvm_cpuid_entry2 *best;
215 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
216 if (!best || best->eax < 0x80000008)
218 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
219 if (best)
220 return best->eax & 0xff;