Lines Matching refs:iterator
1126 * information in the iterator may not be valid.
1153 * Must be used with a valid iterator: e.g. after rmap_get_first().
1505 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1508 iterator->level = level;
1509 iterator->gfn = iterator->start_gfn;
1510 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1511 iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1514 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1519 iterator->slot = slot;
1520 iterator->start_level = start_level;
1521 iterator->end_level = end_level;
1522 iterator->start_gfn = start_gfn;
1523 iterator->end_gfn = end_gfn;
1525 rmap_walk_init_level(iterator, iterator->start_level);
1528 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1530 return !!iterator->rmap;
1533 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1535 while (++iterator->rmap <= iterator->end_rmap) {
1536 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1538 if (iterator->rmap->val)
1542 if (++iterator->level > iterator->end_level) {
1543 iterator->rmap = NULL;
1547 rmap_walk_init_level(iterator, iterator->level);
1565 struct slot_rmap_walk_iterator iterator;
1569 range->start, range->end - 1, &iterator)
1570 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1571 iterator.level, range->arg.pte);
2360 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2364 iterator->addr = addr;
2365 iterator->shadow_addr = root;
2366 iterator->level = vcpu->arch.mmu->root_role.level;
2368 if (iterator->level >= PT64_ROOT_4LEVEL &&
2371 iterator->level = PT32E_ROOT_LEVEL;
2373 if (iterator->level == PT32E_ROOT_LEVEL) {
2380 iterator->shadow_addr
2382 iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2383 --iterator->level;
2384 if (!iterator->shadow_addr)
2385 iterator->level = 0;
2389 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2392 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2396 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2398 if (iterator->level < PG_LEVEL_4K)
2401 iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2402 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2406 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2409 if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2410 iterator->level = 0;
2414 iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2415 --iterator->level;
2418 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2420 __shadow_walk_next(iterator, *iterator->sptep);
3409 struct kvm_shadow_walk_iterator iterator;
3413 for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3414 sptep = iterator.sptep;
4079 struct kvm_shadow_walk_iterator iterator;
4083 for (shadow_walk_init(&iterator, vcpu, addr),
4084 *root_level = iterator.level;
4085 shadow_walk_okay(&iterator);
4086 __shadow_walk_next(&iterator, spte)) {
4087 leaf = iterator.level;
4088 spte = mmu_spte_get_lockless(iterator.sptep);
4201 struct kvm_shadow_walk_iterator iterator;
4205 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4206 clear_sp_write_flooding_count(iterator.sptep);
5771 struct kvm_shadow_walk_iterator iterator;
5787 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
5788 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
5791 int ret = kvm_sync_spte(vcpu, sp, iterator.index);
5794 mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
5796 kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
5916 struct slot_rmap_walk_iterator iterator;
5921 end_gfn, &iterator) {
5922 if (iterator.rmap)
5923 flush |= fn(kvm, iterator.rmap, slot);
5928 iterator.gfn - start_gfn + 1);
6438 * rmap iterator should be restarted because the MMU lock was
6486 * lock was dropped. Either way, restart the iterator to get it