Home
Sort by
last modified time
|
relevance
|
path
Repository(s)
applications
arkcompiler
base
build
commonlibrary
developtools
device
docs
domains
drivers
foundation
ide
interface
kernel
napi_generator
productdefine
test
third_party
vendor
select all
invert selection
clear
Full Search
Search through all text tokens(words,strings,identifiers,numbers) in index.
Definition
Only finds symbol definitions(where e.g a variable(function,...) is defined).
Symbol
Only finds symbol(e.g. methods classes,function,variables).
File Path
Path of the source file(use "/").If you want just exact path,enclose it in "".Source files end with: .jar/.bz2/.a/.h/.java...
History
History log comments.
Type
Any
Bzip(2)
C
Clojure
C#
C++
ELF
Erlang
Image file
Fortran
Golang
GZIP
Haskell
Jar
Java
Java class
JavaScript
Lisp
Lua
Pascal
Perl
PHP
Plain Text
PL/SQL
Python
Rust
Scala
Shell script
SQL
Tar
Tcl
Troff
UUEncoded
Visual Basic
XML
Zip
Type of analyzer used to filter file types include with selected(e.g. just C sources).
Help
Searched
refs:mmu_lock
(Results
1 - 25
of
58
) sorted by relevance
1
2
3
/kernel/linux/linux-6.6/virt/kvm/
H
A
D
kvm_mm.h
8
* for the
mmu_lock
. These macros, for use in common code
14
#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->
mmu_lock
)
15
#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->
mmu_lock
)
16
#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->
mmu_lock
)
18
#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->
mmu_lock
)
19
#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->
mmu_lock
)
20
#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->
mmu_lock
)
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/
H
A
D
page_track.c
80
lockdep_assert_held_write(&kvm->
mmu_lock
);
in __kvm_write_track_add_gfn()
103
lockdep_assert_held_write(&kvm->
mmu_lock
);
in __kvm_write_track_remove_gfn()
172
write_lock(&kvm->
mmu_lock
);
in kvm_page_track_register_notifier()
174
write_unlock(&kvm->
mmu_lock
);
in kvm_page_track_register_notifier()
190
write_lock(&kvm->
mmu_lock
);
in kvm_page_track_unregister_notifier()
192
write_unlock(&kvm->
mmu_lock
);
in kvm_page_track_unregister_notifier()
268
write_lock(&kvm->
mmu_lock
);
in kvm_write_track_add_gfn()
270
write_unlock(&kvm->
mmu_lock
);
in kvm_write_track_add_gfn()
298
write_lock(&kvm->
mmu_lock
);
in kvm_write_track_remove_gfn()
300
write_unlock(&kvm->
mmu_lock
);
in kvm_write_track_remove_gfn()
[all...]
H
A
D
tdp_mmu.c
26
lockdep_assert_held_read(&kvm->
mmu_lock
);
in kvm_lockdep_assert_mmu_lock_held()
28
lockdep_assert_held_write(&kvm->
mmu_lock
);
in kvm_lockdep_assert_mmu_lock_held()
169
* Iterate over all TDP MMU roots. Requires that
mmu_lock
be held for write,
170
* the implication being that any flow that holds
mmu_lock
for read is
172
* Holding
mmu_lock
for write obviates the need for RCU protection as the list
226
lockdep_assert_held_write(&kvm->
mmu_lock
);
in kvm_tdp_mmu_get_vcpu_root_hpa()
294
lockdep_assert_held_write(&kvm->
mmu_lock
);
in tdp_mmu_unlink_sp()
356
*
mmu_lock
ensures the SPTE can't be made present.
in handle_removed_pt()
368
* modified by a different vCPU outside of
mmu_lock
.
in handle_removed_pt()
377
* task can zap/remove the SPTE as
mmu_lock
i
in handle_removed_pt()
[all...]
H
A
D
mmu.c
2113
if (need_resched() || rwlock_needbreak(&vcpu->kvm->
mmu_lock
)) {
in mmu_sync_children()
2120
cond_resched_rwlock_write(&vcpu->kvm->
mmu_lock
);
in mmu_sync_children()
2570
lockdep_assert_held_write(&kvm->
mmu_lock
);
in __kvm_mmu_prepare_zap_page()
2737
write_lock(&kvm->
mmu_lock
);
in kvm_mmu_change_mmu_pages()
2748
write_unlock(&kvm->
mmu_lock
);
in kvm_mmu_change_mmu_pages()
2758
write_lock(&kvm->
mmu_lock
);
in kvm_mmu_unprotect_page()
2764
write_unlock(&kvm->
mmu_lock
);
in kvm_mmu_unprotect_page()
2831
* run with
mmu_lock
held for read, not write, and the unsync
in mmu_try_to_unsync_pages()
2834
* no meaningful penalty if
mmu_lock
is held for write.
in mmu_try_to_unsync_pages()
2844
* possible as clearing sp->unsync _must_ hold
mmu_lock
in mmu_try_to_unsync_pages()
[all...]
/kernel/linux/linux-6.6/arch/riscv/kvm/
H
A
D
mmu.c
297
* If the range is too large, release the kvm->
mmu_lock
in gstage_unmap_range()
301
cond_resched_lock(&kvm->
mmu_lock
);
in gstage_unmap_range()
340
spin_lock(&kvm->
mmu_lock
);
in gstage_wp_memory_region()
342
spin_unlock(&kvm->
mmu_lock
);
in gstage_wp_memory_region()
372
spin_lock(&kvm->
mmu_lock
);
in kvm_riscv_gstage_ioremap()
374
spin_unlock(&kvm->
mmu_lock
);
in kvm_riscv_gstage_ioremap()
388
spin_lock(&kvm->
mmu_lock
);
in kvm_riscv_gstage_iounmap()
390
spin_unlock(&kvm->
mmu_lock
);
in kvm_riscv_gstage_iounmap()
428
spin_lock(&kvm->
mmu_lock
);
in kvm_arch_flush_shadow_memslot()
430
spin_unlock(&kvm->
mmu_lock
);
in kvm_arch_flush_shadow_memslot()
[all...]
/kernel/linux/linux-5.10/arch/arm64/kvm/
H
A
D
mmu.c
61
cond_resched_lock(&kvm->
mmu_lock
);
in stage2_apply_range()
123
* be called while holding
mmu_lock
(unless for freeing the stage2 pgd before
133
assert_spin_locked(&kvm->
mmu_lock
);
in __unmap_stage2_range()
167
spin_lock(&kvm->
mmu_lock
);
in stage2_flush_vm()
173
spin_unlock(&kvm->
mmu_lock
);
in stage2_flush_vm()
459
spin_lock(&kvm->
mmu_lock
);
in stage2_unmap_vm()
465
spin_unlock(&kvm->
mmu_lock
);
in stage2_unmap_vm()
475
spin_lock(&kvm->
mmu_lock
);
in kvm_free_stage2_pgd()
482
spin_unlock(&kvm->
mmu_lock
);
in kvm_free_stage2_pgd()
519
spin_lock(&kvm->
mmu_lock
);
in kvm_phys_addr_ioremap()
[all...]
/kernel/linux/linux-5.10/arch/powerpc/kvm/
H
A
D
book3s_mmu_hpte.c
63
spin_lock(&vcpu3s->
mmu_lock
);
in kvmppc_mmu_hpte_cache_map()
92
spin_unlock(&vcpu3s->
mmu_lock
);
in kvmppc_mmu_hpte_cache_map()
110
spin_lock(&vcpu3s->
mmu_lock
);
in invalidate_pte()
114
spin_unlock(&vcpu3s->
mmu_lock
);
in invalidate_pte()
127
spin_unlock(&vcpu3s->
mmu_lock
);
in invalidate_pte()
369
spin_lock_init(&vcpu3s->
mmu_lock
);
in kvmppc_mmu_hpte_init()
H
A
D
book3s_hv_nested.c
635
* so we don't need to hold kvm->
mmu_lock
.
in kvmhv_release_nested()
652
spin_lock(&kvm->
mmu_lock
);
in kvmhv_remove_nested()
663
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_remove_nested()
682
spin_lock(&kvm->
mmu_lock
);
in kvmhv_release_all_nested()
694
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_release_all_nested()
711
spin_lock(&kvm->
mmu_lock
);
in kvmhv_flush_nested()
713
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_flush_nested()
729
spin_lock(&kvm->
mmu_lock
);
in kvmhv_get_nested()
733
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_get_nested()
741
spin_lock(&kvm->
mmu_lock
);
in kvmhv_get_nested()
[all...]
H
A
D
book3s_64_mmu_radix.c
393
/* Called with kvm->
mmu_lock
held */
618
spin_lock(&kvm->
mmu_lock
);
in kvmppc_create_pte()
754
spin_unlock(&kvm->
mmu_lock
);
in kvmppc_create_pte()
842
spin_lock(&kvm->
mmu_lock
);
in kvmppc_book3s_instantiate_page()
847
spin_unlock(&kvm->
mmu_lock
);
in kvmppc_book3s_instantiate_page()
977
spin_lock(&kvm->
mmu_lock
);
in kvmppc_book3s_radix_page_fault()
981
spin_unlock(&kvm->
mmu_lock
);
in kvmppc_book3s_radix_page_fault()
997
/* Called with kvm->
mmu_lock
held */
1017
/* Called with kvm->
mmu_lock
held */
1045
/* Called with kvm->
mmu_lock
hel
[all...]
H
A
D
book3s_hv_rm_mmu.c
252
arch_spin_lock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_enter()
267
arch_spin_unlock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_enter()
281
arch_spin_unlock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_enter()
936
arch_spin_lock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_page_init_zero()
948
arch_spin_unlock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_page_init_zero()
964
arch_spin_lock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_page_init_copy()
979
arch_spin_unlock(&kvm->
mmu_lock
.rlock.raw_lock);
in kvmppc_do_h_page_init_copy()
/kernel/linux/linux-6.6/arch/powerpc/kvm/
H
A
D
book3s_mmu_hpte.c
63
spin_lock(&vcpu3s->
mmu_lock
);
in kvmppc_mmu_hpte_cache_map()
92
spin_unlock(&vcpu3s->
mmu_lock
);
in kvmppc_mmu_hpte_cache_map()
110
spin_lock(&vcpu3s->
mmu_lock
);
in invalidate_pte()
114
spin_unlock(&vcpu3s->
mmu_lock
);
in invalidate_pte()
127
spin_unlock(&vcpu3s->
mmu_lock
);
in invalidate_pte()
369
spin_lock_init(&vcpu3s->
mmu_lock
);
in kvmppc_mmu_hpte_init()
H
A
D
book3s_hv_nested.c
728
* so we don't need to hold kvm->
mmu_lock
.
in kvmhv_release_nested()
745
spin_lock(&kvm->
mmu_lock
);
in kvmhv_remove_nested()
751
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_remove_nested()
770
spin_lock(&kvm->
mmu_lock
);
in kvmhv_release_all_nested()
780
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_release_all_nested()
797
spin_lock(&kvm->
mmu_lock
);
in kvmhv_flush_nested()
799
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_flush_nested()
814
spin_lock(&kvm->
mmu_lock
);
in kvmhv_get_nested()
818
spin_unlock(&kvm->
mmu_lock
);
in kvmhv_get_nested()
832
spin_lock(&kvm->
mmu_lock
);
in kvmhv_get_nested()
[all...]
H
A
D
book3s_64_mmu_radix.c
417
/* Called with kvm->
mmu_lock
held */
642
spin_lock(&kvm->
mmu_lock
);
in kvmppc_create_pte()
778
spin_unlock(&kvm->
mmu_lock
);
in kvmppc_create_pte()
866
spin_lock(&kvm->
mmu_lock
);
in kvmppc_book3s_instantiate_page()
871
spin_unlock(&kvm->
mmu_lock
);
in kvmppc_book3s_instantiate_page()
1006
spin_lock(&kvm->
mmu_lock
);
in kvmppc_book3s_radix_page_fault()
1010
spin_unlock(&kvm->
mmu_lock
);
in kvmppc_book3s_radix_page_fault()
1026
/* Called with kvm->
mmu_lock
held */
1045
/* Called with kvm->
mmu_lock
held */
1073
/* Called with kvm->
mmu_lock
hel
[all...]
/kernel/linux/linux-6.6/arch/arm64/kvm/
H
A
D
mmu.c
76
cond_resched_rwlock_write(&kvm->
mmu_lock
);
in stage2_apply_range()
105
if (need_resched() || rwlock_needbreak(&kvm->
mmu_lock
))
in need_split_memcache_topup_or_resched()
122
lockdep_assert_held_write(&kvm->
mmu_lock
);
in kvm_mmu_split_huge_pages()
134
write_unlock(&kvm->
mmu_lock
);
in kvm_mmu_split_huge_pages()
140
write_lock(&kvm->
mmu_lock
);
in kvm_mmu_split_huge_pages()
315
* be called while holding
mmu_lock
(unless for freeing the stage2 pgd before
325
lockdep_assert_held_write(&kvm->
mmu_lock
);
in __unmap_stage2_range()
359
write_lock(&kvm->
mmu_lock
);
in stage2_flush_vm()
365
write_unlock(&kvm->
mmu_lock
);
in stage2_flush_vm()
998
write_lock(&kvm->
mmu_lock
);
in stage2_unmap_vm()
[all...]
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
H
A
D
page_track.c
187
spin_lock(&kvm->
mmu_lock
);
in kvm_page_track_register_notifier()
189
spin_unlock(&kvm->
mmu_lock
);
in kvm_page_track_register_notifier()
205
spin_lock(&kvm->
mmu_lock
);
in kvm_page_track_unregister_notifier()
207
spin_unlock(&kvm->
mmu_lock
);
in kvm_page_track_unregister_notifier()
H
A
D
tdp_mmu.c
54
lockdep_assert_held(&kvm->
mmu_lock
);
in tdp_mmu_next_root_valid()
112
lockdep_assert_held(&kvm->
mmu_lock
);
in kvm_tdp_mmu_free_root()
163
spin_lock(&kvm->
mmu_lock
);
in get_tdp_mmu_vcpu_root()
169
spin_unlock(&kvm->
mmu_lock
);
in get_tdp_mmu_vcpu_root()
179
spin_unlock(&kvm->
mmu_lock
);
in get_tdp_mmu_vcpu_root()
429
if (need_resched() || spin_needbreak(&kvm->
mmu_lock
)) {
in tdp_mmu_iter_cond_resched()
433
cond_resched_lock(&kvm->
mmu_lock
);
in tdp_mmu_iter_cond_resched()
1006
lockdep_assert_held(&kvm->
mmu_lock
);
in kvm_tdp_mmu_clear_dirty_pt_masked()
1161
lockdep_assert_held(&kvm->
mmu_lock
);
in kvm_tdp_mmu_write_protect_gfn()
H
A
D
mmu.c
2000
if (need_resched() || spin_needbreak(&vcpu->kvm->
mmu_lock
)) {
in mmu_sync_children()
2002
cond_resched_lock(&vcpu->kvm->
mmu_lock
);
in mmu_sync_children()
2454
spin_lock(&kvm->
mmu_lock
);
in kvm_mmu_change_mmu_pages()
2465
spin_unlock(&kvm->
mmu_lock
);
in kvm_mmu_change_mmu_pages()
2476
spin_lock(&kvm->
mmu_lock
);
in kvm_mmu_unprotect_page()
2484
spin_unlock(&kvm->
mmu_lock
);
in kvm_mmu_unprotect_page()
2818
* mmu_notifier_retry() was successful and
mmu_lock
is held, so
in kvm_mmu_hugepage_adjust()
3178
spin_lock(&kvm->
mmu_lock
);
in kvm_mmu_free_roots()
3201
spin_unlock(&kvm->
mmu_lock
);
in kvm_mmu_free_roots()
3222
spin_lock(&vcpu->kvm->
mmu_lock
);
in mmu_alloc_root()
[all...]
H
A
D
mmu_internal.h
101
lockdep_assert_held(&kvm->
mmu_lock
);
in kvm_mmu_get_root()
108
lockdep_assert_held(&kvm->
mmu_lock
);
in kvm_mmu_put_root()
/kernel/linux/linux-6.6/arch/mips/kvm/
H
A
D
mmu.c
265
* The caller must hold the @kvm->
mmu_lock
spinlock.
390
* The caller must hold the @kvm->
mmu_lock
spinlock.
in BUILD_PTE_RANGE_OP()
412
* acquire @kvm->
mmu_lock
.
521
spin_lock(&kvm->
mmu_lock
);
in _kvm_mips_map_page_fast()
556
spin_unlock(&kvm->
mmu_lock
);
in _kvm_mips_map_page_fast()
622
* chance to grab the
mmu_lock
without mmu_invalidate_retry() noticing.
in kvm_mips_map_page()
638
spin_lock(&kvm->
mmu_lock
);
in kvm_mips_map_page()
646
spin_unlock(&kvm->
mmu_lock
);
in kvm_mips_map_page()
675
spin_unlock(&kvm->
mmu_lock
);
in kvm_mips_map_page()
/kernel/linux/linux-6.6/drivers/accel/habanalabs/common/
H
A
D
command_buffer.c
41
mutex_lock(&hdev->
mmu_lock
);
in cb_map_mem()
53
mutex_unlock(&hdev->
mmu_lock
);
in cb_map_mem()
62
mutex_unlock(&hdev->
mmu_lock
);
in cb_map_mem()
72
mutex_lock(&hdev->
mmu_lock
);
in cb_unmap_mem()
75
mutex_unlock(&hdev->
mmu_lock
);
in cb_unmap_mem()
/kernel/linux/linux-6.6/arch/x86/kvm/
H
A
D
debugfs.c
112
write_lock(&kvm->
mmu_lock
);
in kvm_mmu_rmaps_stat_show()
132
write_unlock(&kvm->
mmu_lock
);
in kvm_mmu_rmaps_stat_show()
/kernel/linux/linux-5.10/arch/mips/kvm/
H
A
D
mmu.c
266
* The caller must hold the @kvm->
mmu_lock
spinlock.
391
* The caller must hold the @kvm->
mmu_lock
spinlock.
in BUILD_PTE_RANGE_OP()
413
* acquire @kvm->
mmu_lock
.
596
spin_lock(&kvm->
mmu_lock
);
in _kvm_mips_map_page_fast()
631
spin_unlock(&kvm->
mmu_lock
);
in _kvm_mips_map_page_fast()
697
* chance to grab the
mmu_lock
without mmu_notifier_retry() noticing.
in kvm_mips_map_page()
713
spin_lock(&kvm->
mmu_lock
);
in kvm_mips_map_page()
721
spin_unlock(&kvm->
mmu_lock
);
in kvm_mips_map_page()
750
spin_unlock(&kvm->
mmu_lock
);
in kvm_mips_map_page()
1043
spin_lock(&kvm->
mmu_lock
);
in kvm_mips_handle_mapped_seg_tlb_fault()
[all...]
/kernel/linux/linux-5.10/drivers/misc/habanalabs/common/
H
A
D
command_buffer.c
66
mutex_lock(&ctx->
mmu_lock
);
in cb_map_mem()
86
mutex_unlock(&ctx->
mmu_lock
);
in cb_map_mem()
103
mutex_unlock(&ctx->
mmu_lock
);
in cb_map_mem()
120
mutex_lock(&ctx->
mmu_lock
);
in cb_unmap_mem()
132
mutex_unlock(&ctx->
mmu_lock
);
in cb_unmap_mem()
/kernel/linux/linux-5.10/arch/loongarch/kvm/
H
A
D
mmu.c
307
* The caller must hold the @kvm->
mmu_lock
spinlock.
432
* The caller must hold the @kvm->
mmu_lock
spinlock.
453
* acquire @kvm->
mmu_lock
.
490
spin_lock(&kvm->
mmu_lock
);
in kvm_arch_commit_memory_region()
497
spin_unlock(&kvm->
mmu_lock
);
in kvm_arch_commit_memory_region()
521
spin_lock(&kvm->
mmu_lock
);
in kvm_arch_flush_shadow_memslot()
528
spin_unlock(&kvm->
mmu_lock
);
in kvm_arch_flush_shadow_memslot()
910
* was successful and we are holding the
mmu_lock
, so if this
in transparent_hugepage_adjust()
1021
spin_lock(&kvm->
mmu_lock
);
in kvm_map_page_fast()
1070
spin_unlock(&kvm->
mmu_lock
);
in kvm_map_page_fast()
[all...]
/kernel/linux/linux-6.6/drivers/accel/habanalabs/common/mmu/
H
A
D
mmu.c
50
mutex_init(&hdev->
mmu_lock
);
in hl_mmu_init()
94
mutex_destroy(&hdev->
mmu_lock
);
in hl_mmu_fini()
572
mutex_lock(&hdev->
mmu_lock
);
in hl_mmu_get_tlb_info()
574
mutex_unlock(&hdev->
mmu_lock
);
in hl_mmu_get_tlb_info()
674
mutex_lock(&hdev->
mmu_lock
);
in hl_mmu_prefetch_work_function()
678
mutex_unlock(&hdev->
mmu_lock
);
in hl_mmu_prefetch_work_function()
Completed in 41 milliseconds
1
2
3