Lines Matching refs:entry

107 		 * If the index isn't significant, use the first entry with a
116 * Similarly, use the first matching entry if KVM is doing a
196 struct kvm_cpuid_entry2 *entry;
200 entry = kvm_find_cpuid_entry(vcpu, base);
202 if (entry) {
205 signature[0] = entry->ebx;
206 signature[1] = entry->ecx;
207 signature[2] = entry->edx;
211 cpuid.limit = entry->eax;
317 struct kvm_cpuid_entry2 *entry;
319 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
321 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
545 struct kvm_cpuid_entry2 entry;
550 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
552 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
772 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
827 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
829 if (!entry)
832 memset(entry, 0, sizeof(*entry));
833 entry->function = function;
834 entry->index = index;
838 return entry;
850 return entry;
858 cpuid_count(entry->function, entry->index,
859 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
862 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
864 return entry;
869 struct kvm_cpuid_entry2 *entry;
874 entry = &array->entries[array->nent];
875 entry->function = func;
876 entry->index = 0;
877 entry->flags = 0;
881 entry->eax = 7;
885 entry->ecx = F(MOVBE);
889 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
890 entry->eax = 0;
892 entry->ecx = F(RDPID);
904 struct kvm_cpuid_entry2 *entry;
912 entry = do_host_cpuid(array, function, 0);
913 if (!entry)
919 entry->eax = min(entry->eax, 0x1fU);
922 cpuid_entry_override(entry, CPUID_1_EDX);
923 cpuid_entry_override(entry, CPUID_1_ECX);
941 WARN_ON_ONCE((entry->eax & 0xff) > 1);
947 * Read entries until the cache type in the previous entry is
948 * zero, i.e. indicates an invalid entry.
950 for (i = 1; entry->eax & 0x1f; ++i) {
951 entry = do_host_cpuid(array, function, i);
952 if (!entry)
957 entry->eax = 0x4; /* allow ARAT */
958 entry->ebx = 0;
959 entry->ecx = 0;
960 entry->edx = 0;
964 max_idx = entry->eax = min(entry->eax, 2u);
965 cpuid_entry_override(entry, CPUID_7_0_EBX);
966 cpuid_entry_override(entry, CPUID_7_ECX);
967 cpuid_entry_override(entry, CPUID_7_EDX);
971 entry = do_host_cpuid(array, function, 1);
972 if (!entry)
975 cpuid_entry_override(entry, CPUID_7_1_EAX);
976 cpuid_entry_override(entry, CPUID_7_1_EDX);
977 entry->ebx = 0;
978 entry->ecx = 0;
981 entry = do_host_cpuid(array, function, 2);
982 if (!entry)
985 cpuid_entry_override(entry, CPUID_7_2_EDX);
986 entry->ecx = 0;
987 entry->ebx = 0;
988 entry->eax = 0;
996 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1012 entry->eax = eax.full;
1013 entry->ebx = kvm_pmu_cap.events_mask;
1014 entry->ecx = 0;
1015 entry->edx = edx.full;
1024 entry->eax = entry->ebx = entry->ecx = 0;
1030 entry->eax &= permitted_xcr0;
1031 entry->ebx = xstate_required_size(permitted_xcr0, false);
1032 entry->ecx = entry->ebx;
1033 entry->edx &= permitted_xcr0 >> 32;
1037 entry = do_host_cpuid(array, function, 1);
1038 if (!entry)
1041 cpuid_entry_override(entry, CPUID_D_1_EAX);
1042 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
1043 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1047 entry->ebx = 0;
1049 entry->ecx &= permitted_xss;
1050 entry->edx &= permitted_xss >> 32;
1061 entry = do_host_cpuid(array, function, i);
1062 if (!entry)
1073 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1079 entry->ecx &= ~BIT_ULL(2);
1080 entry->edx = 0;
1087 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1097 cpuid_entry_override(entry, CPUID_12_EAX);
1098 entry->ebx &= SGX_MISC_EXINFO;
1100 entry = do_host_cpuid(array, function, 1);
1101 if (!entry)
1111 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1112 entry->ebx &= 0;
1117 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1121 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1129 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1133 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1140 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1146 entry->eax = KVM_CPUID_FEATURES;
1147 entry->ebx = sigptr[0];
1148 entry->ecx = sigptr[1];
1149 entry->edx = sigptr[2];
1153 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1168 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1170 entry->ebx = 0;
1171 entry->ecx = 0;
1172 entry->edx = 0;
1175 entry->eax = min(entry->eax, 0x80000022);
1189 if (entry->eax >= 0x8000001d &&
1192 entry->eax = max(entry->eax, 0x80000021);
1195 entry->ebx &= ~GENMASK(27, 16);
1196 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1197 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1204 entry->edx &= ~GENMASK(17, 16);
1207 cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1210 entry->edx &= boot_cpu_data.x86_power;
1211 entry->eax = entry->ebx = entry->ecx = 0;
1214 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1215 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1216 unsigned phys_as = entry->eax & 0xff;
1233 entry->eax = g_phys_as | (virt_as << 8);
1234 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1235 entry->edx = 0;
1236 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1241 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1244 entry->eax = 1; /* SVM revision 1 */
1245 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1247 entry->ecx = 0; /* Reserved */
1248 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1251 entry->ecx = entry->edx = 0;
1254 entry->eax &= GENMASK(2, 0);
1255 entry->ebx = entry->ecx = entry->edx = 0;
1259 entry->eax = entry->ebx = entry->ecx = 0;
1260 entry->edx = 0; /* reserved */
1264 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1266 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1268 entry->ebx &= ~GENMASK(31, 12);
1273 entry->ebx &= ~GENMASK(11, 6);
1277 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1280 entry->ebx = entry->ecx = entry->edx = 0;
1281 cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1287 entry->ecx = entry->edx = 0;
1289 entry->eax = entry->ebx;
1293 cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1302 entry->ebx = ebx.full;
1308 entry->eax = min(entry->eax, 0xC0000004);
1311 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1319 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1464 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1506 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1507 * entry for CPUID.0xb.index (see below), then the output value for EDX
1514 * the effective CPUID entry is the max basic leaf. Note, the index of
1524 struct kvm_cpuid_entry2 *entry;
1527 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1528 exact = !!entry;
1530 if (!entry && !exact_only) {
1531 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1532 used_max_basic = !!entry;
1535 if (entry) {
1536 *eax = entry->eax;
1537 *ebx = entry->ebx;
1538 *ecx = entry->ecx;
1539 *edx = entry->edx;
1559 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1560 if (entry) {
1562 *edx = entry->edx;