Lines Matching refs:iterator

1001  * information in the iterator may not be valid.
1028 * Must be used with a valid iterator: e.g. after rmap_get_first().
1397 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1399 iterator->level = level;
1400 iterator->gfn = iterator->start_gfn;
1401 iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1402 iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1403 iterator->slot);
1407 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1411 iterator->slot = slot;
1412 iterator->start_level = start_level;
1413 iterator->end_level = end_level;
1414 iterator->start_gfn = start_gfn;
1415 iterator->end_gfn = end_gfn;
1417 rmap_walk_init_level(iterator, iterator->start_level);
1420 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1422 return !!iterator->rmap;
1425 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1427 if (++iterator->rmap <= iterator->end_rmap) {
1428 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1432 if (++iterator->level > iterator->end_level) {
1433 iterator->rmap = NULL;
1437 rmap_walk_init_level(iterator, iterator->level);
1460 struct slot_rmap_walk_iterator iterator;
1485 &iterator)
1486 ret |= handler(kvm, iterator.rmap, memslot,
1487 iterator.gfn, iterator.level, data);
2115 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2119 iterator->addr = addr;
2120 iterator->shadow_addr = root;
2121 iterator->level = vcpu->arch.mmu->shadow_root_level;
2123 if (iterator->level == PT64_ROOT_4LEVEL &&
2126 --iterator->level;
2128 if (iterator->level == PT32E_ROOT_LEVEL) {
2135 iterator->shadow_addr
2137 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2138 --iterator->level;
2139 if (!iterator->shadow_addr)
2140 iterator->level = 0;
2144 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2147 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2151 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2153 if (iterator->level < PG_LEVEL_4K)
2156 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2157 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2161 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2164 if (is_last_spte(spte, iterator->level)) {
2165 iterator->level = 0;
2169 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2170 --iterator->level;
2173 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2175 __shadow_walk_next(iterator, *iterator->sptep);
3037 struct kvm_shadow_walk_iterator iterator;
3051 for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3055 sp = sptep_to_sp(iterator.sptep);
3113 if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3127 trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3493 struct kvm_shadow_walk_iterator iterator;
3499 for (shadow_walk_init(&iterator, vcpu, addr),
3500 *root_level = iterator.level;
3501 shadow_walk_okay(&iterator);
3502 __shadow_walk_next(&iterator, spte)) {
3503 leaf = iterator.level;
3504 spte = mmu_spte_get_lockless(iterator.sptep);
3624 struct kvm_shadow_walk_iterator iterator;
3628 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3629 clear_sp_write_flooding_count(iterator.sptep);
5239 struct slot_rmap_walk_iterator iterator;
5243 end_gfn, &iterator) {
5244 if (iterator.rmap)
5245 flush |= fn(kvm, iterator.rmap);
5251 iterator.gfn - start_gfn + 1);