Lines Matching refs:arch
10 * This file is derived from arch/powerpc/kvm/44x.c,
137 vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
170 unsigned long old_pending = vcpu->arch.pending_exceptions;
173 &vcpu->arch.pending_exceptions);
175 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
184 &vcpu->arch.pending_exceptions);
237 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
271 vcpu->arch.external_oneshot = 1;
388 if (vcpu->arch.external_oneshot) {
389 vcpu->arch.external_oneshot = 0;
400 unsigned long *pending = &vcpu->arch.pending_exceptions;
401 unsigned long old_pending = vcpu->arch.pending_exceptions;
405 if (vcpu->arch.pending_exceptions)
406 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
412 clear_bit(priority, &vcpu->arch.pending_exceptions);
431 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
440 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
463 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
475 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
487 * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
488 * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
538 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
550 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
568 regs->pid = vcpu->arch.pid;
627 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
642 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
648 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
649 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
660 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
672 if (!vcpu->arch.xive_vcpu) {
683 *val = get_reg_val(id, vcpu->arch.fscr);
686 *val = get_reg_val(id, vcpu->arch.tar);
689 *val = get_reg_val(id, vcpu->arch.ebbhr);
692 *val = get_reg_val(id, vcpu->arch.ebbrr);
695 *val = get_reg_val(id, vcpu->arch.bescr);
698 *val = get_reg_val(id, vcpu->arch.ic);
715 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
730 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
736 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
737 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
745 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
757 if (!vcpu->arch.xive_vcpu) {
768 vcpu->arch.fscr = set_reg_val(id, *val);
771 vcpu->arch.tar = set_reg_val(id, *val);
774 vcpu->arch.ebbhr = set_reg_val(id, *val);
777 vcpu->arch.ebbrr = set_reg_val(id, *val);
780 vcpu->arch.bescr = set_reg_val(id, *val);
783 vcpu->arch.ic = set_reg_val(id, *val);
796 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
801 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
806 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
812 return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
838 return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
843 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
848 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
858 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
863 kvm->arch.kvm_ops->free_memslot(slot);
868 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
876 return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change);
884 kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change);
889 return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
894 return kvm->arch.kvm_ops->age_gfn(kvm, range);
899 return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
904 return kvm->arch.kvm_ops->set_spte_gfn(kvm, range);
911 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
912 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
913 mutex_init(&kvm->arch.rtas_token_lock);
916 return kvm->arch.kvm_ops->init_vm(kvm);
921 kvm->arch.kvm_ops->destroy_vm(kvm);
925 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
933 kfree(kvm->arch.xive_devices.native);
934 kvm->arch.xive_devices.native = NULL;
935 kfree(kvm->arch.xive_devices.xics_on_xive);
936 kvm->arch.xive_devices.xics_on_xive = NULL;
937 kfree(kvm->arch.xics_device);
938 kvm->arch.xics_device = NULL;
1026 return kvm->arch.kvm_ops->hcall_implemented(hcall);