Lines Matching defs:val
46 u64 val);
70 u64 val = 0x8badf00d8badf00d;
73 __vcpu_read_sys_reg_from_cpu(reg, &val))
74 return val;
79 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
82 __vcpu_write_sys_reg_to_cpu(val, reg))
85 __vcpu_sys_reg(vcpu, reg) = val;
152 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
154 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
158 if ((val & CCSIDR_EL1_RES0) ||
163 if (val == get_ccsidr(vcpu, csselr))
176 ccsidr[csselr] = val;
257 u64 val, mask, shift;
264 val = vcpu_read_sys_reg(vcpu, r->reg);
265 val &= ~mask;
267 val = 0;
270 val |= (p->regval & (mask >> shift)) << shift;
271 vcpu_write_sys_reg(vcpu, val, r->reg);
382 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
385 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
426 u64 val)
432 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
435 __vcpu_sys_reg(vcpu, rd->reg) = val;
505 u64 mask, shift, val;
509 val = *dbg_reg;
510 val &= ~mask;
511 val |= (p->regval & (mask >> shift)) << shift;
512 *dbg_reg = val;
545 u64 val)
547 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
552 u64 *val)
554 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
561 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
562 return rd->val;
582 u64 val)
584 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
589 u64 *val)
591 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
598 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
599 return rd->val;
620 u64 val)
622 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
627 u64 *val)
629 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
636 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
637 return rd->val;
657 u64 val)
659 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
664 u64 *val)
666 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
673 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
674 return rd->val;
815 u64 val;
825 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
826 val &= ~ARMV8_PMU_PMCR_MASK;
827 val |= p->regval & ARMV8_PMU_PMCR_MASK;
829 val |= ARMV8_PMU_PMCR_LC;
830 kvm_pmu_handle_pmcr(vcpu, val);
833 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
835 p->regval = val;
880 u64 pmcr, val;
883 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
884 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
893 u64 *val)
904 *val = kvm_pmu_get_counter_value(vcpu, idx);
1000 u64 val, mask;
1007 val = p->regval & mask;
1010 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1011 kvm_pmu_enable_counter_mask(vcpu, val);
1015 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1016 kvm_pmu_disable_counter_mask(vcpu, val);
1034 u64 val = p->regval & mask;
1038 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1041 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1235 * This function will check if each feature field of @val is the "safe" value
1237 * If a field value in @val is the same as the one in limit, it is always
1245 u64 val)
1250 u64 writable_mask = rd->val;
1260 return val ? -E2BIG : 0;
1276 f_val = arm64_ftr_value(ftrp, val);
1290 if ((val & ~mask) != (limit & ~mask))
1314 u64 val;
1319 val = read_sanitised_ftr_reg(id);
1324 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1326 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1330 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1337 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1340 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1341 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
1344 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1347 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1351 return val;
1440 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1443 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1454 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1455 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1458 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1459 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1463 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1464 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1467 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1469 return val;
1475 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1478 val &= ~ID_AA64DFR0_EL1_DebugVer_MASK;
1479 val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP);
1484 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1486 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1490 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1492 return val;
1497 u64 val)
1499 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1516 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1518 return set_id_reg(vcpu, rd, val);
1525 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1527 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1529 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1531 return val;
1536 u64 val)
1538 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1541 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1554 return set_id_reg(vcpu, rd, val);
1565 u64 *val)
1572 *val = read_id_reg(vcpu, rd);
1577 *val = read_id_reg(vcpu, rd);
1584 u64 val)
1596 if (val != read_id_reg(vcpu, rd))
1605 ret = arm64_check_features(vcpu, rd, val);
1607 IDREG(vcpu->kvm, id) = val;
1624 u64 *val)
1626 *val = 0;
1631 u64 val)
1713 u64 val)
1716 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1718 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1721 __vcpu_sys_reg(vcpu, rd->reg) = val;
1786 .val = v, \
1805 .val = v, \
1810 * Since reset() callback and field val are not used for idregs, they will be
1814 * The val would be used as a mask indicating writable fields for the idreg.
1828 .val = 0, \
1839 .val = 0, \
1854 .val = 0, \
1869 .val = 0, \
1983 .val = ID_DFR0_EL1_PerfMon_MASK, },
2017 .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
2032 .val = ID_AA64DFR0_EL1_PMUVer_MASK, },
2202 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2360 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3274 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3275 return ((struct sys_reg_desc *)r)->val; \
3284 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3285 return ((struct sys_reg_desc *)r)->val;
3288 /* ->val is filled in by kvm_sys_reg_table_init() */
3305 return put_user(r->val, uaddr);
3311 u64 val;
3318 if (get_user(val, uaddr))
3322 if (r->val != val)
3330 u32 val;
3342 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3344 if (val >= CSSELR_MAX)
3347 return put_user(get_ccsidr(vcpu, val), uval);
3355 u32 val, newval;
3367 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3369 if (val >= CSSELR_MAX)
3375 return set_ccsidr(vcpu, val, newval);
3386 u64 val;
3394 ret = (r->get_user)(vcpu, r, &val);
3396 val = __vcpu_sys_reg(vcpu, r->reg);
3401 ret = put_user(val, uaddr);
3427 u64 val;
3430 if (get_user(val, uaddr))
3441 ret = (r->set_user)(vcpu, r, val);
3443 __vcpu_sys_reg(vcpu, r->reg) = val;
3473 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3476 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3478 if (put_user(val | i, uindices))