Lines Matching defs:slots
1143 struct kvm_memslots *slots;
1150 slots = kvm_memslots(kvm);
1151 if (!slots || kvm_memslots_empty(slots))
1158 /* mark all the pages in active slots as dirty */
1159 kvm_for_each_memslot(ms, bkt, slots) {
2215 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2218 return ____gfn_to_memslot(slots, gfn, true);
2221 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2224 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2226 struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2232 mnode = rb_first(&slots->gfn_tree);
2234 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2243 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2253 struct kvm_memslots *slots = kvm_memslots(kvm);
2256 if (unlikely(kvm_memslots_empty(slots)))
2259 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2265 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2266 mem_end = kvm_s390_get_gfn_end(slots);
2284 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
5768 /* When we are protected, we should not change the memory slots */
5774 * A few sanity checks. We can have memory slots which have to be