Lines Matching refs:iter
352 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
356 u64 *root_pt = tdp_iter_root_pt(iter);
360 WRITE_ONCE(*iter->sptep, new_spte);
362 __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
363 iter->level);
365 handle_changed_spte_acc_track(iter->old_spte, new_spte,
366 iter->level);
368 handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
369 iter->old_spte, new_spte,
370 iter->level);
373 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
376 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
380 struct tdp_iter *iter,
383 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
387 struct tdp_iter *iter,
390 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
423 struct tdp_iter *iter, bool flush)
426 if (iter->next_last_level_gfn == iter->yielded_gfn)
435 WARN_ON(iter->gfn > iter->next_last_level_gfn);
437 tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
438 iter->root_level, iter->min_level,
439 iter->next_last_level_gfn);
463 struct tdp_iter iter;
465 tdp_root_for_each_pte(iter, root, start, end) {
467 tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
472 if (!is_shadow_present_pte(iter.old_spte))
480 if ((iter.gfn < start ||
481 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
482 !is_last_spte(iter.old_spte, iter.level))
485 tdp_mmu_set_spte(kvm, &iter, 0);
526 struct tdp_iter *iter,
534 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
535 trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte);
537 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
538 pfn, iter->old_spte, prefault, true,
542 if (new_spte == iter->old_spte)
545 tdp_mmu_set_spte(vcpu->kvm, iter, new_spte);
562 trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
582 struct tdp_iter iter;
600 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
602 disallowed_hugepage_adjust(iter.old_spte, gfn,
603 iter.level, &pfn, &level);
605 if (iter.level == level)
613 if (is_shadow_present_pte(iter.old_spte) &&
614 is_large_pte(iter.old_spte)) {
615 tdp_mmu_set_spte(vcpu->kvm, &iter, 0);
617 kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
618 KVM_PAGES_PER_HPAGE(iter.level));
621 * The iter must explicitly re-read the spte here
625 iter.old_spte = READ_ONCE(*iter.sptep);
628 if (!is_shadow_present_pte(iter.old_spte)) {
629 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
637 if (huge_page_disallowed && req_level >= iter.level)
640 tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
644 if (WARN_ON(iter.level != level))
647 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
715 struct tdp_iter iter;
719 tdp_root_for_each_leaf_pte(iter, root, start, end) {
724 if (!is_accessed_spte(iter.old_spte))
727 new_spte = iter.old_spte;
744 tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
762 struct tdp_iter iter;
764 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
765 if (is_accessed_spte(iter.old_spte))
787 struct tdp_iter iter;
797 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
798 if (iter.level != PG_LEVEL_4K)
801 if (!is_shadow_present_pte(iter.old_spte))
804 tdp_mmu_set_spte(kvm, &iter, 0);
806 kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
810 iter.old_spte, new_pfn);
812 tdp_mmu_set_spte(kvm, &iter, new_spte);
840 struct tdp_iter iter;
846 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
848 if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
851 if (!is_shadow_present_pte(iter.old_spte) ||
852 !is_last_spte(iter.old_spte, iter.level))
855 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
857 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
897 struct tdp_iter iter;
901 tdp_root_for_each_leaf_pte(iter, root, start, end) {
902 if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
905 if (!is_shadow_present_pte(iter.old_spte))
908 if (spte_ad_need_write_protect(iter.old_spte)) {
909 if (is_writable_pte(iter.old_spte))
910 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
914 if (iter.old_spte & shadow_dirty_mask)
915 new_spte = iter.old_spte & ~shadow_dirty_mask;
920 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
961 struct tdp_iter iter;
964 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
969 if (iter.level > PG_LEVEL_4K ||
970 !(mask & (1UL << (iter.gfn - gfn))))
973 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
974 if (is_writable_pte(iter.old_spte))
975 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
979 if (iter.old_spte & shadow_dirty_mask)
980 new_spte = iter.old_spte & ~shadow_dirty_mask;
985 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
987 mask &= ~(1UL << (iter.gfn - gfn));
1024 struct tdp_iter iter;
1028 tdp_root_for_each_pte(iter, root, start, end) {
1029 if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
1032 if (!is_shadow_present_pte(iter.old_spte))
1035 new_spte = iter.old_spte | shadow_dirty_mask;
1037 tdp_mmu_set_spte(kvm, &iter, new_spte);
1074 struct tdp_iter iter;
1078 tdp_root_for_each_pte(iter, root, start, end) {
1079 if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
1084 if (!is_shadow_present_pte(iter.old_spte) ||
1085 !is_last_spte(iter.old_spte, iter.level))
1088 pfn = spte_to_pfn(iter.old_spte);
1094 tdp_mmu_set_spte(kvm, &iter, 0);
1131 struct tdp_iter iter;
1135 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1136 new_spte = iter.old_spte &
1139 if (new_spte == iter.old_spte)
1142 tdp_mmu_set_spte(kvm, &iter, new_spte);
1179 struct tdp_iter iter;
1186 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1187 leaf = iter.level;
1188 sptes[leaf - 1] = iter.old_spte;