Lines Matching refs:mmu_lock

2113 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2120 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2570 lockdep_assert_held_write(&kvm->mmu_lock);
2737 write_lock(&kvm->mmu_lock);
2748 write_unlock(&kvm->mmu_lock);
2758 write_lock(&kvm->mmu_lock);
2764 write_unlock(&kvm->mmu_lock);
2831 * run with mmu_lock held for read, not write, and the unsync
2834 * no meaningful penalty if mmu_lock is held for write.
2844 * possible as clearing sp->unsync _must_ hold mmu_lock
2846 * while this CPU holds mmu_lock for read (or write).
3060 * consuming it. In this case, mmu_lock doesn't need to be held during the
3063 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3069 * not required to hold mmu_lock (though it's highly likely the caller will
3070 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
3187 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3345 * by setting the Writable bit, which can be done out of mmu_lock.
3480 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3584 write_lock(&kvm->mmu_lock);
3611 write_unlock(&kvm->mmu_lock);
3668 write_lock(&vcpu->kvm->mmu_lock);
3703 write_unlock(&vcpu->kvm->mmu_lock);
3786 * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
3803 write_lock(&vcpu->kvm->mmu_lock);
3881 write_unlock(&vcpu->kvm->mmu_lock);
4014 write_lock(&vcpu->kvm->mmu_lock);
4016 write_unlock(&vcpu->kvm->mmu_lock);
4020 write_lock(&vcpu->kvm->mmu_lock);
4031 write_unlock(&vcpu->kvm->mmu_lock);
4388 write_lock(&vcpu->kvm->mmu_lock);
4400 write_unlock(&vcpu->kvm->mmu_lock);
4468 read_lock(&vcpu->kvm->mmu_lock);
4476 read_unlock(&vcpu->kvm->mmu_lock);
5664 write_lock(&vcpu->kvm->mmu_lock);
5693 write_unlock(&vcpu->kvm->mmu_lock);
5786 write_lock(&vcpu->kvm->mmu_lock);
5802 write_unlock(&vcpu->kvm->mmu_lock);
5918 lockdep_assert_held_write(&kvm->mmu_lock);
5925 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5931 cond_resched_rwlock_write(&kvm->mmu_lock);
6080 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6118 write_lock(&kvm->mmu_lock);
6132 * invalidating TDP MMU roots must be done while holding mmu_lock for
6134 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6144 * Note: we need to do this under the protection of mmu_lock,
6151 write_unlock(&kvm->mmu_lock);
6246 write_lock(&kvm->mmu_lock);
6260 write_unlock(&kvm->mmu_lock);
6275 write_lock(&kvm->mmu_lock);
6278 write_unlock(&kvm->mmu_lock);
6282 read_lock(&kvm->mmu_lock);
6284 read_unlock(&kvm->mmu_lock);
6295 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6320 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
6434 write_unlock(&kvm->mmu_lock);
6442 write_lock(&kvm->mmu_lock);
6517 /* Must be called with the mmu_lock held in write-mode. */
6548 write_lock(&kvm->mmu_lock);
6550 write_unlock(&kvm->mmu_lock);
6553 read_lock(&kvm->mmu_lock);
6555 read_unlock(&kvm->mmu_lock);
6621 write_lock(&kvm->mmu_lock);
6623 write_unlock(&kvm->mmu_lock);
6627 read_lock(&kvm->mmu_lock);
6629 read_unlock(&kvm->mmu_lock);
6637 write_lock(&kvm->mmu_lock);
6643 write_unlock(&kvm->mmu_lock);
6647 read_lock(&kvm->mmu_lock);
6649 read_unlock(&kvm->mmu_lock);
6668 write_lock(&kvm->mmu_lock);
6675 if (cond_resched_rwlock_write(&kvm->mmu_lock))
6684 write_unlock(&kvm->mmu_lock);
6745 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
6755 write_lock(&kvm->mmu_lock);
6766 write_unlock(&kvm->mmu_lock);
7022 write_lock(&kvm->mmu_lock);
7087 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7091 cond_resched_rwlock_write(&kvm->mmu_lock);
7101 write_unlock(&kvm->mmu_lock);