Lines Matching refs:iter
207 struct tdp_iter *iter)
212 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
516 * If setting the SPTE fails because it has changed, iter->old_spte will be
520 * @iter: a tdp_iter instance currently on the SPTE that should be set
525 * no side-effects other than setting iter->old_spte to the last
529 struct tdp_iter *iter,
532 u64 *sptep = rcu_dereference(iter->sptep);
540 WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
547 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
551 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
555 new_spte, iter->level, true);
561 struct tdp_iter *iter)
571 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
584 __kvm_tdp_mmu_write_spte(iter->sptep, 0);
623 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
626 WARN_ON_ONCE(iter->yielded);
627 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
628 iter->old_spte, new_spte,
629 iter->gfn, iter->level);
652 * If this function yields, iter->yielded is set and the caller must skip to
660 struct tdp_iter *iter,
663 WARN_ON_ONCE(iter->yielded);
666 if (iter->next_last_level_gfn == iter->yielded_gfn)
682 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
684 iter->yielded = true;
687 return iter->yielded;
704 struct tdp_iter iter;
709 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
711 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
714 if (!is_shadow_present_pte(iter.old_spte))
717 if (iter.level > zap_level)
721 tdp_mmu_iter_set_spte(kvm, &iter, 0);
722 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
794 struct tdp_iter iter;
802 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
804 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
809 if (!is_shadow_present_pte(iter.old_spte) ||
810 !is_last_spte(iter.old_spte, iter.level))
813 tdp_mmu_iter_set_spte(kvm, &iter, 0);
954 struct tdp_iter *iter)
956 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
965 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
967 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
968 fault->pfn, iter->old_spte, fault->prefetch, true,
971 if (new_spte == iter->old_spte)
973 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
975 else if (is_shadow_present_pte(iter->old_spte) &&
976 !is_last_spte(iter->old_spte, iter->level))
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
992 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
996 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
997 rcu_dereference(iter->sptep));
1008 * @iter: a tdp_iter instance currently on the SPTE that should be set
1015 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1026 tdp_mmu_iter_set_spte(kvm, iter, spte);
1034 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1045 struct tdp_iter iter;
1055 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1059 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1065 if (is_removed_spte(iter.old_spte))
1068 if (iter.level == fault->goal_level)
1072 if (is_shadow_present_pte(iter.old_spte) &&
1073 !is_large_pte(iter.old_spte))
1081 tdp_mmu_init_child_sp(sp, &iter);
1085 if (is_shadow_present_pte(iter.old_spte))
1086 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1088 r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1100 fault->req_level >= iter.level) {
1112 WARN_ON_ONCE(iter.level == fault->goal_level);
1116 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1135 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1143 struct tdp_iter iter;
1153 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1154 ret |= handler(kvm, &iter, range);
1170 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1176 if (!is_accessed_spte(iter->old_spte))
1179 if (spte_ad_enabled(iter->old_spte)) {
1180 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1181 iter->old_spte,
1183 iter->level);
1184 new_spte = iter->old_spte & ~shadow_accessed_mask;
1190 if (is_writable_pte(iter->old_spte))
1191 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1193 new_spte = mark_spte_for_access_track(iter->old_spte);
1194 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1195 iter->old_spte, new_spte,
1196 iter->level);
1199 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1200 iter->old_spte, new_spte);
1209 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1212 return is_accessed_spte(iter->old_spte);
1220 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1228 if (iter->level != PG_LEVEL_4K ||
1229 !is_shadow_present_pte(iter->old_spte))
1238 tdp_mmu_iter_set_spte(kvm, iter, 0);
1241 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1244 tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1274 struct tdp_iter iter;
1282 for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1284 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1287 if (!is_shadow_present_pte(iter.old_spte) ||
1288 !is_last_spte(iter.old_spte, iter.level) ||
1289 !(iter.old_spte & PT_WRITABLE_MASK))
1292 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1294 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1344 struct tdp_iter *iter,
1369 iter->yielded = true;
1383 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1386 const u64 huge_spte = iter->old_spte;
1387 const int level = iter->level;
1405 ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1417 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1427 struct tdp_iter iter;
1443 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1445 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1448 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1452 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1455 trace_kvm_mmu_split_huge_page(iter.gfn,
1456 iter.old_spte,
1457 iter.level, ret);
1461 if (iter.yielded)
1465 tdp_mmu_init_child_sp(sp, &iter);
1467 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1520 struct tdp_iter iter;
1525 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1527 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1530 if (!is_shadow_present_pte(iter.old_spte))
1534 spte_ad_need_write_protect(iter.old_spte));
1536 if (!(iter.old_spte & dbit))
1539 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1583 struct tdp_iter iter;
1589 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1595 spte_ad_need_write_protect(iter.old_spte));
1597 if (iter.level > PG_LEVEL_4K ||
1598 !(mask & (1UL << (iter.gfn - gfn))))
1601 mask &= ~(1UL << (iter.gfn - gfn));
1603 if (!(iter.old_spte & dbit))
1606 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1607 iter.old_spte, dbit,
1608 iter.level);
1610 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1611 iter.old_spte,
1612 iter.old_spte & ~dbit);
1613 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1643 struct tdp_iter iter;
1648 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1650 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1653 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1654 !is_shadow_present_pte(iter.old_spte))
1662 if (is_last_spte(iter.old_spte, iter.level))
1666 * If iter.gfn resides outside of the slot, i.e. the page for
1672 if (iter.gfn < start || iter.gfn >= end)
1676 iter.gfn, PG_LEVEL_NUM);
1677 if (max_mapping_level < iter.level)
1681 if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1711 struct tdp_iter iter;
1719 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1720 if (!is_shadow_present_pte(iter.old_spte) ||
1721 !is_last_spte(iter.old_spte, iter.level))
1724 new_spte = iter.old_spte &
1727 if (new_spte == iter.old_spte)
1730 tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1767 struct tdp_iter iter;
1774 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1775 leaf = iter.level;
1776 sptes[leaf] = iter.old_spte;
1796 struct tdp_iter iter;
1801 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1802 *spte = iter.old_spte;
1803 sptep = iter.sptep;