Lines Matching defs:kvm

97 static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
104 ptep = (pte_t *)kvm->arch.pgd;
126 static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
134 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order);
137 static int gstage_set_pte(struct kvm *kvm, u32 level,
142 pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
172 gstage_remote_tlb_flush(kvm, current_level, addr);
177 static int gstage_map_page(struct kvm *kvm,
219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
228 static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
256 gstage_op_pte(kvm, addr + i * next_page_size,
265 gstage_remote_tlb_flush(kvm, ptep_level, addr);
269 static void gstage_unmap_range(struct kvm *kvm, gpa_t start,
280 found_leaf = gstage_get_leaf_entry(kvm, addr,
290 gstage_op_pte(kvm, addr, ptep,
297 * If the range is too large, release the kvm->mmu_lock
301 cond_resched_lock(&kvm->mmu_lock);
305 static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
315 found_leaf = gstage_get_leaf_entry(kvm, addr,
325 gstage_op_pte(kvm, addr, ptep,
333 static void gstage_wp_memory_region(struct kvm *kvm, int slot)
335 struct kvm_memslots *slots = kvm_memslots(kvm);
340 spin_lock(&kvm->mmu_lock);
341 gstage_wp_range(kvm, start, end);
342 spin_unlock(&kvm->mmu_lock);
343 kvm_flush_remote_tlbs(kvm);
346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
372 spin_lock(&kvm->mmu_lock);
373 ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
374 spin_unlock(&kvm->mmu_lock);
386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
388 spin_lock(&kvm->mmu_lock);
389 gstage_unmap_range(kvm, gpa, size, false);
390 spin_unlock(&kvm->mmu_lock);
393 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
402 gstage_wp_range(kvm, start, end);
405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
409 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
413 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
417 void kvm_arch_flush_shadow_all(struct kvm *kvm)
419 kvm_riscv_gstage_free_pgd(kvm);
422 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
428 spin_lock(&kvm->mmu_lock);
429 gstage_unmap_range(kvm, gpa, size, false);
430 spin_unlock(&kvm->mmu_lock);
433 void kvm_arch_commit_memory_region(struct kvm *kvm,
444 gstage_wp_memory_region(kvm, new->id);
447 int kvm_arch_prepare_memory_region(struct kvm *kvm,
522 ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
535 kvm_riscv_gstage_iounmap(kvm, base_gpa, size);
542 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
544 if (!kvm->arch.pgd)
547 gstage_unmap_range(kvm, range->start << PAGE_SHIFT,
553 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
558 if (!kvm->arch.pgd)
563 ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT,
573 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
579 if (!kvm->arch.pgd)
584 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
591 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
597 if (!kvm->arch.pgd)
602 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
619 struct kvm *kvm = vcpu->kvm;
655 * kvm->mmu_lock.
660 mmu_seq = kvm->mmu_invalidate_seq;
670 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
686 spin_lock(&kvm->mmu_lock);
688 if (mmu_invalidate_retry(kvm, mmu_seq))
693 mark_page_dirty(kvm, gfn);
694 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
697 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
705 spin_unlock(&kvm->mmu_lock);
711 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm)
715 if (kvm->arch.pgd != NULL) {
724 kvm->arch.pgd = page_to_virt(pgd_page);
725 kvm->arch.pgd_phys = page_to_phys(pgd_page);
730 void kvm_riscv_gstage_free_pgd(struct kvm *kvm)
734 spin_lock(&kvm->mmu_lock);
735 if (kvm->arch.pgd) {
736 gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false);
737 pgd = READ_ONCE(kvm->arch.pgd);
738 kvm->arch.pgd = NULL;
739 kvm->arch.pgd_phys = 0;
741 spin_unlock(&kvm->mmu_lock);
750 struct kvm_arch *k = &vcpu->kvm->arch;