Lines Matching refs:mmu_lock
2000 if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
2002 cond_resched_lock(&vcpu->kvm->mmu_lock);
2454 spin_lock(&kvm->mmu_lock);
2465 spin_unlock(&kvm->mmu_lock);
2476 spin_lock(&kvm->mmu_lock);
2484 spin_unlock(&kvm->mmu_lock);
2818 * mmu_notifier_retry() was successful and mmu_lock is held, so
3178 spin_lock(&kvm->mmu_lock);
3201 spin_unlock(&kvm->mmu_lock);
3222 spin_lock(&vcpu->kvm->mmu_lock);
3225 spin_unlock(&vcpu->kvm->mmu_lock);
3231 spin_unlock(&vcpu->kvm->mmu_lock);
3414 spin_lock(&vcpu->kvm->mmu_lock);
3420 spin_unlock(&vcpu->kvm->mmu_lock);
3424 spin_lock(&vcpu->kvm->mmu_lock);
3438 spin_unlock(&vcpu->kvm->mmu_lock);
3736 spin_lock(&vcpu->kvm->mmu_lock);
3751 spin_unlock(&vcpu->kvm->mmu_lock);
5015 spin_lock(&vcpu->kvm->mmu_lock);
5047 spin_unlock(&vcpu->kvm->mmu_lock);
5247 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5254 cond_resched_lock(&kvm->mmu_lock);
5407 cond_resched_lock(&kvm->mmu_lock)) {
5441 spin_lock(&kvm->mmu_lock);
5458 * Note: we need to do this under the protection of mmu_lock,
5468 spin_unlock(&kvm->mmu_lock);
5510 spin_lock(&kvm->mmu_lock);
5534 spin_unlock(&kvm->mmu_lock);
5549 spin_lock(&kvm->mmu_lock);
5554 spin_unlock(&kvm->mmu_lock);
5614 spin_lock(&kvm->mmu_lock);
5620 spin_unlock(&kvm->mmu_lock);
5628 * are related to dirty logging, and do the TLB flush out of mmu_lock.
5643 spin_lock(&kvm->mmu_lock);
5647 spin_unlock(&kvm->mmu_lock);
5665 spin_lock(&kvm->mmu_lock);
5670 spin_unlock(&kvm->mmu_lock);
5682 spin_lock(&kvm->mmu_lock);
5686 spin_unlock(&kvm->mmu_lock);
5699 spin_lock(&kvm->mmu_lock);
5706 if (cond_resched_lock(&kvm->mmu_lock))
5715 spin_unlock(&kvm->mmu_lock);
5765 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5775 spin_lock(&kvm->mmu_lock);
5786 spin_unlock(&kvm->mmu_lock);
6019 spin_lock(&kvm->mmu_lock);
6043 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
6045 cond_resched_lock(&kvm->mmu_lock);
6051 spin_unlock(&kvm->mmu_lock);