Lines Matching defs:slot
790 const struct kvm_memory_slot *slot, int level)
794 idx = gfn_to_index(gfn, slot->base_gfn, level);
795 return &slot->arch.lpage_info[level - 2][idx];
798 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
805 linfo = lpage_info_slot(gfn, slot, i);
811 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
813 update_gfn_disallow_lpage_count(slot, gfn, 1);
816 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
818 update_gfn_disallow_lpage_count(slot, gfn, -1);
824 struct kvm_memory_slot *slot;
830 slot = __gfn_to_memslot(slots, gfn);
834 return __kvm_write_track_add_gfn(kvm, slot, gfn);
836 kvm_mmu_gfn_disallow_lpage(slot, gfn);
838 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
872 struct kvm_memory_slot *slot;
878 slot = __gfn_to_memslot(slots, gfn);
880 return __kvm_write_track_remove_gfn(kvm, slot, gfn);
882 kvm_mmu_gfn_allow_lpage(slot, gfn);
905 struct kvm_memory_slot *slot;
907 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
908 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
910 if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
913 return slot;
1081 const struct kvm_memory_slot *slot)
1085 idx = gfn_to_index(gfn, slot->base_gfn, level);
1086 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1092 struct kvm_memory_slot *slot;
1107 slot = __gfn_to_memslot(slots, gfn);
1108 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1277 const struct kvm_memory_slot *slot)
1295 * @slot: slot to protect
1302 struct kvm_memory_slot *slot,
1308 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1309 slot->base_gfn + gfn_offset, mask, true);
1315 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1316 PG_LEVEL_4K, slot);
1328 * @slot: slot to clear D-bit
1335 struct kvm_memory_slot *slot,
1341 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1342 slot->base_gfn + gfn_offset, mask, false);
1348 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1349 PG_LEVEL_4K, slot);
1350 __rmap_clear_dirty(kvm, rmap_head, slot);
1368 struct kvm_memory_slot *slot,
1381 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1382 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1385 kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K);
1387 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1392 kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1398 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1400 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1409 struct kvm_memory_slot *slot, u64 gfn,
1418 rmap_head = gfn_to_rmap(gfn, i, slot);
1425 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1432 struct kvm_memory_slot *slot;
1434 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1435 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1439 const struct kvm_memory_slot *slot)
1445 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1448 return __kvm_zap_rmap(kvm, rmap_head, slot);
1452 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1490 const struct kvm_memory_slot *slot;
1510 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1511 iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1515 const struct kvm_memory_slot *slot,
1519 iterator->slot = slot;
1558 struct kvm_memory_slot *slot, gfn_t gfn,
1568 for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1570 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1587 range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1607 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1621 struct kvm_memory_slot *slot, gfn_t gfn,
1637 const struct kvm_memory_slot *slot,
1648 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1659 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1664 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
2799 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2810 if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2900 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
2942 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2962 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2976 struct kvm_memory_slot *slot;
2982 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2983 if (!slot)
2986 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2991 mmu_set_spte(vcpu, slot, start, access, gfn,
3077 const struct kvm_memory_slot *slot)
3095 hva = __gfn_to_hva_memslot(slot, gfn);
3141 const struct kvm_memory_slot *slot, gfn_t gfn,
3149 linfo = lpage_info_slot(gfn, slot, max_level);
3157 host_level = host_pfn_mapping_level(kvm, gfn, slot);
3163 struct kvm_memory_slot *slot = fault->slot;
3174 if (kvm_slot_dirty_track_enabled(slot))
3181 fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
3252 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3261 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3263 unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3284 kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3381 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3497 * if its slot has dirty logging enabled.
3503 kvm_slot_dirty_track_enabled(fault->slot))
3710 struct kvm_memory_slot *slot;
3736 kvm_for_each_memslot(slot, bkt, slots) {
3743 * The metadata is guaranteed to be freed when the slot
3747 r = memslot_rmap_alloc(slot, slot->npages);
3750 r = kvm_page_track_write_tracking_alloc(slot);
4193 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4256 struct kvm_memory_slot *slot = fault->slot;
4264 if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
4267 if (!kvm_is_visible_memslot(slot)) {
4270 fault->slot = NULL;
4281 if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
4287 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
4309 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL,
4330 if (unlikely(!fault->slot))
4360 return fault->slot &&
5907 const struct kvm_memory_slot *slot);
5910 const struct kvm_memory_slot *slot,
5920 for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
5923 flush |= fn(kvm, iterator.rmap, slot);
5939 const struct kvm_memory_slot *slot,
5944 return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
5945 slot->base_gfn, slot->base_gfn + slot->npages - 1,
5950 const struct kvm_memory_slot *slot,
5954 return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
6111 * not use any resource of the being-deleted slot or all slots
6220 memslot = iter.slot;
6265 const struct kvm_memory_slot *slot)
6369 const struct kvm_memory_slot *slot,
6408 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6415 const struct kvm_memory_slot *slot,
6446 shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
6455 const struct kvm_memory_slot *slot)
6482 r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
6500 const struct kvm_memory_slot *slot,
6513 __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
6570 const struct kvm_memory_slot *slot)
6589 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6606 const struct kvm_memory_slot *slot)
6612 if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
6614 kvm_flush_remote_tlbs_memslot(kvm, slot);
6618 const struct kvm_memory_slot *slot)
6622 kvm_rmap_zap_collapsible_sptes(kvm, slot);
6628 kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
6693 struct kvm_memory_slot *slot)
7013 struct kvm_memory_slot *slot;
7053 * back in as 4KiB pages. The NX Huge Pages in this slot will be
7054 * recovered, along with all the other huge pages in the slot,
7070 slot = NULL;
7075 slot = __gfn_to_memslot(slots, sp->gfn);
7076 WARN_ON_ONCE(!slot);
7079 if (slot && kvm_slot_dirty_track_enabled(slot))