Home
last modified time | relevance | path

Searched refs:vmas (Results 1 - 17 of 17) sorted by relevance

/kernel/linux/linux-6.6/mm/damon/
H A Dvaddr-test.h17 static int __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas, in __link_vmas() argument
28 mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); in __link_vmas()
29 if (mas_store_gfp(&mas, &vmas[i], GFP_KERNEL)) in __link_vmas()
71 struct vm_area_struct vmas[] = { in damon_test_three_regions_in_vmas() local
81 if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas))) in damon_test_three_regions_in_vmas()
/kernel/linux/linux-5.10/mm/
H A Dgup.c976 * @vmas: array of pointers to vmas corresponding to each page.
991 * @vmas are valid only as long as mmap_lock is held.
1030 struct vm_area_struct **vmas, int *locked) in __get_user_pages()
1074 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1135 if (vmas) { in __get_user_pages()
1136 vmas[i] = vma; in __get_user_pages()
1257 struct vm_area_struct **vmas, in __get_user_pages_locked()
1265 /* if VM_FAULT_RETRY can be returned, vmas become invalid */ in __get_user_pages_locked()
1266 BUG_ON(vmas); in __get_user_pages_locked()
1027 __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages() argument
1253 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) __get_user_pages_locked() argument
1500 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int foll_flags) __get_user_pages_locked() argument
1577 check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) check_dax_vmas() argument
1597 check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int gup_flags) check_and_migrate_cma_pages() argument
1692 check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int gup_flags) check_and_migrate_cma_pages() argument
1707 __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int gup_flags) __gup_longterm_locked() argument
1760 __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int flags) __gup_longterm_locked() argument
1792 __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages_remote() argument
1881 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) get_user_pages_remote() argument
1895 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) get_user_pages_remote() argument
1903 __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages_remote() argument
1928 get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) get_user_pages() argument
2949 pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) pin_user_pages_remote() argument
2982 pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) pin_user_pages() argument
[all...]
H A Dvmacache.c38 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; in vmacache_update()
72 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find()
105 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact()
H A Dhugetlb.c4853 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page()
4971 if (!pages && !vmas && !pfn_offset && in follow_hugetlb_page()
5002 if (vmas) in follow_hugetlb_page()
5003 vmas[i] = vma; in follow_hugetlb_page()
4852 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, unsigned long *nr_pages, long i, unsigned int flags, int *locked) follow_hugetlb_page() argument
H A Dnommu.c637 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
/kernel/linux/linux-5.10/include/linux/
H A Dvmacache.h10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); in vmacache_flush()
H A Dmm_types_task.h36 struct vm_area_struct *vmas[VMACACHE_SIZE]; member
H A Dmm.h195 * Default maximum number of active map areas, this limits the number of vmas
421 * Special vmas that are non-mergable, non-mlock()able.
1775 struct vm_area_struct **vmas, int *locked);
1779 struct vm_area_struct **vmas, int *locked);
1782 struct vm_area_struct **vmas);
1785 struct vm_area_struct **vmas);
H A Dhugetlb.h224 struct vm_area_struct **vmas, unsigned long *position, in follow_hugetlb_page()
222 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, unsigned long *nr_pages, long i, unsigned int flags, int *nonblocking) follow_hugetlb_page() argument
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/
H A Dmsm_gem.c377 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma()
390 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma()
421 list_for_each_entry(vma, &msm_obj->vmas, list) { in put_iova_spaces()
439 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova_vmas()
970 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe()
972 seq_puts(m, " vmas:"); in msm_gem_describe()
974 list_for_each_entry(vma, &msm_obj->vmas, list) { in msm_gem_describe()
1159 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
H A Dmsm_gem.h64 struct list_head list; /* node in msm_gem_object::vmas */
102 struct list_head vmas; /* list of msm_gem_vma */ member
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/
H A Dmsm_gem.c332 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma()
345 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma()
371 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova()
840 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe()
842 seq_puts(m, " vmas:"); in msm_gem_describe()
844 list_for_each_entry(vma, &msm_obj->vmas, list) { in msm_gem_describe()
1031 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
H A Dmsm_gem.h38 struct list_head list; /* node in msm_gem_object::vmas */
80 struct list_head vmas; /* list of msm_gem_vma */ member
/kernel/linux/linux-5.10/drivers/video/fbdev/vermilion/
H A Dvermilion.h210 atomic_t vmas; member
/kernel/linux/linux-6.6/drivers/video/fbdev/vermilion/
H A Dvermilion.h210 atomic_t vmas; member
/kernel/linux/linux-5.10/kernel/debug/
H A Ddebug_core.c294 if (!current->vmacache.vmas[i]) in kgdb_flush_swbreak_addr()
296 flush_cache_range(current->vmacache.vmas[i], in kgdb_flush_swbreak_addr()
/kernel/linux/linux-5.10/io_uring/
H A Dio_uring.c9052 struct vm_area_struct **vmas = NULL; in io_sqe_buffer_register() local
9075 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *), in io_sqe_buffer_register()
9077 if (!vmas) in io_sqe_buffer_register()
9087 pages, vmas); in io_sqe_buffer_register()
9089 struct file *file = vmas[0]->vm_file; in io_sqe_buffer_register()
9093 if (vmas[i]->vm_file != file) { in io_sqe_buffer_register()
9099 if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) { in io_sqe_buffer_register()
9110 * if we did partial map, or found file backed vmas, in io_sqe_buffer_register()
9146 kvfree(vmas); in io_sqe_buffer_register()

Completed in 39 milliseconds