Lines Matching defs:offset

106 	u64 offset = 0;
111 if (ctxt->offset.vm_offset)
112 offset += *ctxt->offset.vm_offset;
113 if (ctxt->offset.vcpu_offset)
114 offset += *ctxt->offset.vcpu_offset;
116 return offset;
163 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
165 if (!ctxt->offset.vm_offset) {
166 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
170 WRITE_ONCE(*ctxt->offset.vm_offset, offset);
532 * offset of zero, so no need to zero CNTVOFF_EL2
619 u64 cval, offset;
631 offset = timer_get_offset(ctx);
632 set_cntpoff(offset);
633 cval += offset;
749 * The virtual offset behaviour is "interresting", as it
755 struct arch_timer_offset *offs = &map->direct_vtimer->offset;
785 * We have two possibility to deal with a physical offset:
787 * - Either we have CNTPOFF (yay!) or the offset is 0:
792 * after correcting the physical offset
956 * A vcpu running at EL2 is in charge of the offset applied to
957 * the virtual timer, so use the physical VM offset, and point
958 * the vcpu offset to CNTVOFF_EL2.
961 struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
995 ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
997 ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
1160 val = *timer->offset.vcpu_offset;
1214 *timer->offset.vcpu_offset = val;
1655 struct kvm_arm_counter_offset *offset)
1659 if (offset->reserved)
1668 * If userspace decides to set the offset using this
1670 * values, the offset applies to both the virtual and
1673 kvm->arch.timer_data.voffset = offset->counter_offset;
1674 kvm->arch.timer_data.poffset = offset->counter_offset;