/kernel/linux/linux-5.10/include/linux/ |
H A D | mempolicy.h | 131 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); 151 struct mempolicy **mpol, nodemask_t **nodemask); 159 struct mempolicy *mpol = get_task_policy(current); in policy_nodemask_current() local 161 return policy_nodemask(gfp, mpol); in policy_nodemask_current() 179 extern int mpol_parse_str(char *str, struct mempolicy **mpol); 214 struct mempolicy *mpol) in mpol_shared_policy_init() 255 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() 257 *mpol = NULL; in huge_node() 278 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument 213 mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) mpol_shared_policy_init() argument 253 huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) huge_node() argument
|
H A D | shmem_fs.h | 42 struct mempolicy *mpol; /* default memory policy for mappings */ member
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mempolicy.h | 128 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); 148 struct mempolicy **mpol, nodemask_t **nodemask); 169 extern int mpol_parse_str(char *str, struct mempolicy **mpol); 211 struct mempolicy *mpol) in mpol_shared_policy_init() 252 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() 254 *mpol = NULL; in huge_node() 275 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument 210 mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) mpol_shared_policy_init() argument 250 huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) huge_node() argument
|
H A D | shmem_fs.h | 67 struct mempolicy *mpol; /* default memory policy for mappings */ member
|
/kernel/linux/linux-5.10/mm/ |
H A D | shmem.c | 113 struct mempolicy *mpol; member 1471 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument 1475 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol() 1478 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol() 1480 seq_printf(seq, ",mpol=%s", buffer); in shmem_show_mpol() 1485 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() local 1486 if (sbinfo->mpol) { in shmem_get_sbmpol() 1488 mpol = sbinfo->mpol; in shmem_get_sbmpol() 1495 shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) shmem_show_mpol() argument 2216 shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) shmem_set_policy() argument [all...] |
H A D | mempolicy.c | 312 /* Slow path of a mpol destructor. */ 982 * Take a refcount on the mpol, lookup_node() in do_get_mempolicy() 2000 * huge_node(@vma, @addr, @gfp_flags, @mpol) 2004 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2015 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() 2019 *mpol = get_vma_policy(vma, addr); in huge_node() 2022 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { in huge_node() 2023 nid = interleave_nid(*mpol, vma, addr, in huge_node() 2026 nid = policy_node(gfp_flags, *mpol, numa_node_id()); in huge_node() 2027 if ((*mpol) in huge_node() 2014 huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) huge_node() argument 2655 mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) mpol_shared_policy_init() argument 2871 mpol_parse_str(char *str, struct mempolicy **mpol) mpol_parse_str() argument [all...] |
H A D | hugetlb.c | 1142 struct mempolicy *mpol; in dequeue_huge_page_vma() local 1161 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma() 1168 mpol_cond_put(mpol); in dequeue_huge_page_vma() 1972 struct mempolicy *mpol; in alloc_buddy_huge_page_with_mpol() local 1977 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_huge_page_with_mpol() 1979 mpol_cond_put(mpol); in alloc_buddy_huge_page_with_mpol() 2007 struct mempolicy *mpol; in alloc_huge_page_vma() local 2014 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in alloc_huge_page_vma() 2016 mpol_cond_put(mpol); in alloc_huge_page_vma()
|
/kernel/linux/linux-6.6/mm/ |
H A D | shmem.c | 115 struct mempolicy *mpol; member 1553 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument 1557 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol() 1560 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol() 1562 seq_printf(seq, ",mpol=%s", buffer); in shmem_show_mpol() 1567 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() local 1568 if (sbinfo->mpol) { in shmem_get_sbmpol() 1570 mpol = sbinfo->mpol; in shmem_get_sbmpol() 1577 shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) shmem_show_mpol() argument 2343 shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) shmem_set_policy() argument 4123 struct mempolicy *mpol = NULL; shmem_reconfigure() local 4217 struct mempolicy *mpol; shmem_show_options() local [all...] |
H A D | mempolicy.c | 311 /* Slow path of a mpol destructor. */ 985 * Take a refcount on the mpol, because we are about to in do_get_mempolicy() 2020 * huge_node(@vma, @addr, @gfp_flags, @mpol) 2024 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2035 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() 2040 *mpol = get_vma_policy(vma, addr); in huge_node() 2042 mode = (*mpol)->mode; in huge_node() 2045 nid = interleave_nid(*mpol, vma, addr, in huge_node() 2048 nid = policy_node(gfp_flags, *mpol, numa_node_id()); in huge_node() 2050 *nodemask = &(*mpol) in huge_node() 2034 huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) huge_node() argument 2792 mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) mpol_shared_policy_init() argument 3006 mpol_parse_str(char *str, struct mempolicy **mpol) mpol_parse_str() argument [all...] |
H A D | hugetlb.c | 1402 struct mempolicy *mpol; in dequeue_hugetlb_folio_vma() local 1420 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma() 1422 if (mpol_is_preferred_many(mpol)) { in dequeue_hugetlb_folio_vma() 1439 mpol_cond_put(mpol); in dequeue_hugetlb_folio_vma() 2479 struct mempolicy *mpol; in alloc_buddy_hugetlb_folio_with_mpol() local 2484 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_hugetlb_folio_with_mpol() 2485 if (mpol_is_preferred_many(mpol)) { in alloc_buddy_hugetlb_folio_with_mpol() 2497 mpol_cond_put(mpol); in alloc_buddy_hugetlb_folio_with_mpol() 2525 struct mempolicy *mpol; in alloc_hugetlb_folio_vma() local 2532 node = huge_node(vma, address, gfp_mask, &mpol, in alloc_hugetlb_folio_vma() 4552 struct mempolicy *mpol = get_task_policy(current); policy_mbind_nodemask() local [all...] |