Home
last modified time | relevance | path

Searched refs:page (Results 126 - 150 of 6697) sorted by relevance

12345678910>>...268

/kernel/linux/linux-6.6/mm/
H A Dgup.c33 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages()
42 * stick around until the page is freed. in sanity_check_pinned_pages()
47 * THP we can assume that either the given page (PTE-mapped THP) or in sanity_check_pinned_pages()
48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If in sanity_check_pinned_pages()
52 struct page *page = *pages; in sanity_check_pinned_pages() local
53 struct folio *folio = page_folio(page); in sanity_check_pinned_pages()
55 if (is_zero_page(page) || in sanity_check_pinned_pages()
59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); in sanity_check_pinned_pages()
71 try_get_folio(struct page *page, int refs) try_get_folio() argument
126 try_grab_folio(struct page *page, int refs, unsigned int flags) try_grab_folio() argument
225 try_grab_page(struct page *page, unsigned int flags) try_grab_page() argument
272 unpin_user_page(struct page *page) unpin_user_page() argument
424 unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty) unpin_user_page_range_dirty_lock() argument
542 can_follow_write_pte(pte_t pte, struct page *page, struct vm_area_struct *vma, unsigned int flags) can_follow_write_pte() argument
584 struct page *page; follow_page_pte() local
698 struct page *page; follow_pmd_mask() local
749 struct page *page; follow_pud_mask() local
839 struct page *page; follow_page() local
857 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) get_gate_page() argument
1201 struct page *page; __get_user_pages() local
2014 struct page *page; get_dump_page() local
2525 struct page *page = pages[--(*nr)]; undo_dev_pagemap() local
2568 struct page *page; gup_pte_range() local
2678 struct page *page = pfn_to_page(pfn); __gup_device_huge() local
2758 record_subpages(struct page *page, unsigned long addr, unsigned long end, struct page **pages) record_subpages() argument
2782 struct page *page; gup_hugepte() local
2856 struct page *page; gup_huge_pmd() local
2900 struct page *page; gup_huge_pud() local
2946 struct page *page; gup_huge_pgd() local
[all...]
H A Dhighmem.c13 * Rewrote high memory support to move the page cache into
62 * Determine color of virtual address where the page should be mapped.
64 static inline unsigned int get_pkmap_color(struct page *page) in get_pkmap_color() argument
71 * Get next index for mapping inside PKMAP region for page with given color.
82 * Determine if page index inside PKMAP region (pkmap_nr) of given color
154 struct page *__kmap_to_page(void *vaddr) in __kmap_to_page()
193 struct page *page; in flush_all_zero_pkmaps() local
212 * no-one has the page mappe in flush_all_zero_pkmaps()
234 map_new_virtual(struct page *page) map_new_virtual() argument
296 kmap_high(struct page *page) kmap_high() argument
326 kmap_high_get(struct page *page) kmap_high_get() argument
348 kunmap_high(struct page *page) kunmap_high() argument
392 zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) zero_user_segments() argument
500 arch_kmap_local_high_get(struct page *page) arch_kmap_local_high_get() argument
564 __kmap_local_page_prot(struct page *page, pgprot_t prot) __kmap_local_page_prot() argument
716 struct page *page; global() member
731 page_slot(const struct page *page) page_slot() argument
742 page_address(const struct page *page) page_address() argument
775 set_page_address(struct page *page, void *virtual) set_page_address() argument
[all...]
/kernel/linux/linux-5.10/fs/
H A Dmpage.c39 * If a page does not map to a contiguous run of blocks then it simply falls
42 * Why is this? If a page's completion depends on a number of different BIOs
44 * status of that page is hard. See end_buffer_async_read() for the details.
53 struct page *page = bv->bv_page; in mpage_end_io() local
54 page_endio(page, bio_op(bio), in mpage_end_io()
77 /* Restrict the given (page cache) mask for slab allocations */ in mpage_alloc()
96 * the page, which allows readpage to avoid triggering a duplicate call
100 * them. So when the buffer is up to date and the page size == block size,
101 * this marks the page u
104 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) map_buffer_to_page() argument
138 struct page *page; global() member
158 struct page *page = args->page; do_mpage_readpage() local
381 struct page *page; mpage_readahead() local
402 mpage_readpage(struct page *page, get_block_t get_block) mpage_readpage() argument
445 clean_buffers(struct page *page, unsigned first_unmapped) clean_buffers() argument
475 clean_page_buffers(struct page *page) clean_page_buffers() argument
480 __mpage_writepage(struct page *page, struct writeback_control *wbc, void *data) __mpage_writepage() argument
726 mpage_writepage(struct page *page, get_block_t get_block, struct writeback_control *wbc) mpage_writepage() argument
[all...]
/kernel/linux/linux-6.6/fs/nilfs2/
H A Dpage.c3 * Buffer/page management specific to NILFS
14 #include <linux/page-flags.h>
20 #include "page.h"
29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, in __nilfs_get_page_block() argument
36 if (!page_has_buffers(page)) in __nilfs_get_page_block()
37 create_empty_buffers(page, 1 << blkbits, b_state); in __nilfs_get_page_block()
40 bh = nilfs_page_get_nth_block(page, block - first_block); in __nilfs_get_page_block()
54 struct page *page; in nilfs_grab_buffer() local
76 struct page *page = bh->b_page; nilfs_forget_buffer() local
140 nilfs_page_buffers_clean(struct page *page) nilfs_page_buffers_clean() argument
153 nilfs_page_bug(struct page *page) nilfs_page_bug() argument
394 nilfs_clear_dirty_page(struct page *page, bool silent) nilfs_clear_dirty_page() argument
431 nilfs_page_count_clean_buffers(struct page *page, unsigned int from, unsigned int to) nilfs_page_count_clean_buffers() argument
458 __nilfs_clear_page_dirty(struct page *page) __nilfs_clear_page_dirty() argument
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dpagemap.h145 * mapping_shrinkable - test if page cache state allows inode reclaim
146 * @mapping: the page cache mapping
171 * inodes before there is highmem pressure from the page in mapping_shrinkable()
184 * head pointer, which allows non-resident page cache entries in mapping_shrinkable()
385 struct address_space *page_mapping(struct page *);
393 * For folios which are in the page cache, return the mapping that this
394 * page belongs to. Folios in the swap cache return the mapping of the
399 * Do not call this for folios which aren't in the page cache or swap cache.
413 * For folios which are in the page cache, return the mapping that this
414 * page belong
429 page_file_mapping(struct page *page) page_file_mapping() argument
504 attach_page_private(struct page *page, void *data) attach_page_private() argument
509 detach_page_private(struct page *page) detach_page_private() argument
890 page_to_index(struct page *page) page_to_index() argument
911 page_to_pgoff(struct page *page) page_to_pgoff() argument
921 page_offset(struct page *page) page_offset() argument
926 page_file_offset(struct page *page) page_file_offset() argument
1028 trylock_page(struct page *page) trylock_page() argument
1073 lock_page(struct page *page) lock_page() argument
1144 wait_on_page_locked(struct page *page) wait_on_page_locked() argument
1157 __set_page_dirty(struct page *page, struct address_space *mapping, int warn) __set_page_dirty() argument
1394 struct page *page; __readahead_batch() local
1407 VM_BUG_ON_PAGE(PageTail(page), page); __readahead_batch() local
1519 page_mkwrite_check_truncate(struct page *page, struct inode *inode) page_mkwrite_check_truncate() argument
1557 i_blocks_per_page(struct inode *inode, struct page *page) i_blocks_per_page() argument
[all...]
/third_party/ltp/testcases/kernel/mce-test/hwpoison/
H A Dtsimpleinj.c39 void testmem(char *msg, char *page, int write) in testmem() argument
41 printf("%s page %p\n", msg, page); in testmem()
44 if (madvise(page, PS, MADV_POISON) != 0) { in testmem()
49 *page = 2; in testmem()
51 printf("%x\n", *(unsigned char *)page); in testmem()
81 char *page; in main() local
90 page = mmap(NULL, PS, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_POPULATE, 0, 0); in main()
91 testmem("dirty", page, 1); in main()
93 page in main()
[all...]
/third_party/ltp/testcases/kernel/mce-test/tsrc/
H A Dtsimpleinj.c39 void testmem(char *msg, char *page, int write) in testmem() argument
41 printf("%s page %p\n", msg, page); in testmem()
44 if (madvise(page, PS, MADV_POISON) != 0) { in testmem()
49 *page = 2; in testmem()
51 printf("%x\n", *(unsigned char *)page); in testmem()
81 char *page; in main() local
90 page = mmap(NULL, PS, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_POPULATE, 0, 0); in main()
91 testmem("dirty", page, 1); in main()
93 page in main()
[all...]
/kernel/linux/linux-5.10/arch/sparc/include/asm/
H A Dcacheflush_64.h5 #include <asm/page.h>
26 #define flush_cache_page(vma, page, pfn) \
38 void flush_dcache_page_impl(struct page *page);
40 void smp_flush_dcache_page_impl(struct page *page, int cpu);
41 void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
43 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
[all...]
/kernel/linux/linux-5.10/fs/f2fs/
H A Dnode.h239 static inline nid_t ino_of_node(struct page *node_page) in ino_of_node()
245 static inline nid_t nid_of_node(struct page *node_page) in nid_of_node()
251 static inline unsigned int ofs_of_node(struct page *node_page) in ofs_of_node()
258 static inline __u64 cpver_of_node(struct page *node_page) in cpver_of_node()
264 static inline block_t next_blkaddr_of_node(struct page *node_page) in next_blkaddr_of_node()
270 static inline void fill_node_footer(struct page *page, nid_t nid, in fill_node_footer() argument
273 struct f2fs_node *rn = F2FS_NODE(page); in fill_node_footer()
289 static inline void copy_node_footer(struct page *dst, struct page *sr
296 fill_node_footer_blkaddr(struct page *page, block_t blkaddr) fill_node_footer_blkaddr() argument
309 is_recoverable_dnode(struct page *page) is_recoverable_dnode() argument
391 is_cold_data(struct page *page) is_cold_data() argument
396 set_cold_data(struct page *page) set_cold_data() argument
401 clear_cold_data(struct page *page) clear_cold_data() argument
406 is_node(struct page *page, int type) is_node() argument
416 is_inline_node(struct page *page) is_inline_node() argument
421 set_inline_node(struct page *page) set_inline_node() argument
426 clear_inline_node(struct page *page) clear_inline_node() argument
431 set_cold_node(struct page *page, bool is_dir) set_cold_node() argument
443 set_mark(struct page *page, int mark, int type) set_mark() argument
454 f2fs_inode_chksum_set(F2FS_P_SB(page), page); set_mark() local
[all...]
/kernel/linux/linux-5.10/fs/ntfs/
H A Daops.c3 * aops.c - NTFS kernel address space operations and page cache handling.
39 * page has been completed and mark the page uptodate or set the error bit on
40 * the page. To determine the size of the records that need fixing up, we
49 struct page *page; in ntfs_end_buffer_async_read() local
54 page = bh->b_page; in ntfs_end_buffer_async_read()
55 vi = page->mapping->host; in ntfs_end_buffer_async_read()
64 file_ofs = ((s64)page->index << PAGE_SHIFT) + in ntfs_end_buffer_async_read()
82 kaddr = kmap_atomic(page); in ntfs_end_buffer_async_read()
164 ntfs_read_block(struct page *page) ntfs_read_block() argument
378 ntfs_readpage(struct file *file, struct page *page) ntfs_readpage() argument
530 ntfs_write_block(struct page *page, struct writeback_control *wbc) ntfs_write_block() argument
899 ntfs_write_mst_block(struct page *page, struct writeback_control *wbc) ntfs_write_mst_block() argument
1336 ntfs_writepage(struct page *page, struct writeback_control *wbc) ntfs_writepage() argument
1712 mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) mark_ntfs_record_dirty() argument
[all...]
H A Dcompress.c84 * zero_partial_compressed_page - zero out of bounds compressed page region
86 static void zero_partial_compressed_page(struct page *page, in zero_partial_compressed_page() argument
89 u8 *kp = page_address(page); in zero_partial_compressed_page()
92 ntfs_debug("Zeroing page region outside initialized size."); in zero_partial_compressed_page()
93 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { in zero_partial_compressed_page()
103 * handle_bounds_compressed_page - test for&handle out of bounds compressed page
105 static inline void handle_bounds_compressed_page(struct page *page, in handle_bounds_compressed_page() argument
108 if ((page in handle_bounds_compressed_page()
462 ntfs_read_compressed_block(struct page *page) ntfs_read_compressed_block() argument
[all...]
/kernel/linux/linux-6.6/fs/ntfs/
H A Daops.c3 * aops.c - NTFS kernel address space operations and page cache handling.
39 * page has been completed and mark the page uptodate or set the error bit on
40 * the page. To determine the size of the records that need fixing up, we
49 struct page *page; in ntfs_end_buffer_async_read() local
54 page = bh->b_page; in ntfs_end_buffer_async_read()
55 vi = page->mapping->host; in ntfs_end_buffer_async_read()
64 file_ofs = ((s64)page->index << PAGE_SHIFT) + in ntfs_end_buffer_async_read()
82 kaddr = kmap_atomic(page); in ntfs_end_buffer_async_read()
164 ntfs_read_block(struct page *page) ntfs_read_block() argument
380 struct page *page = &folio->page; ntfs_read_folio() local
531 ntfs_write_block(struct page *page, struct writeback_control *wbc) ntfs_write_block() argument
901 ntfs_write_mst_block(struct page *page, struct writeback_control *wbc) ntfs_write_mst_block() argument
1338 ntfs_writepage(struct page *page, struct writeback_control *wbc) ntfs_writepage() argument
1713 mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) mark_ntfs_record_dirty() argument
[all...]
H A Dcompress.c84 * zero_partial_compressed_page - zero out of bounds compressed page region
86 static void zero_partial_compressed_page(struct page *page, in zero_partial_compressed_page() argument
89 u8 *kp = page_address(page); in zero_partial_compressed_page()
92 ntfs_debug("Zeroing page region outside initialized size."); in zero_partial_compressed_page()
93 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { in zero_partial_compressed_page()
103 * handle_bounds_compressed_page - test for&handle out of bounds compressed page
105 static inline void handle_bounds_compressed_page(struct page *page, in handle_bounds_compressed_page() argument
108 if ((page in handle_bounds_compressed_page()
462 ntfs_read_compressed_block(struct page *page) ntfs_read_compressed_block() argument
[all...]
/kernel/linux/linux-5.10/arch/powerpc/mm/
H A Dpgtable-frag.c4 * Handling Page Tables through page fragments
21 struct page *page; in pte_frag_destroy() local
23 page = virt_to_page(pte_frag); in pte_frag_destroy()
26 /* We allow PTE_FRAG_NR fragments from a PTE page */ in pte_frag_destroy()
27 if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { in pte_frag_destroy()
28 pgtable_pte_page_dtor(page); in pte_frag_destroy()
29 __free_page(page); in pte_frag_destroy()
45 * If we have taken up all the fragments mark PTE page NULL in get_pte_from_cache()
58 struct page *pag in __alloc_for_ptecache() local
111 struct page *page = virt_to_page(table); pte_fragment_free() local
[all...]
/kernel/linux/common_modules/xpm/core/
H A Dxpm_hck_hooks.c6 #include <asm/page.h>
85 * A xpm readonly region is an area where any page mapped
104 unsigned long addr, struct page *page, vm_fault_t *ret) in xpm_integrity_check()
106 if (!page) in xpm_integrity_check()
109 /* integrity violation: write a readonly page */ in xpm_integrity_check()
111 PageXPMReadonly(page)) { in xpm_integrity_check()
112 report_integrity_event(INTEGRITY_RO, vma, page); in xpm_integrity_check()
117 /* integrity violation: execute a writetained page */ in xpm_integrity_check()
118 if (PageXPMWritetainted(page) in xpm_integrity_check()
103 xpm_integrity_check(struct vm_area_struct *vma, unsigned int vflags, unsigned long addr, struct page *page, vm_fault_t *ret) xpm_integrity_check() argument
125 xpm_integrity_update(struct vm_area_struct *vma, unsigned int vflags, struct page *page) xpm_integrity_update() argument
140 xpm_integrity_validate(struct vm_area_struct *vma, unsigned int vflags, unsigned long addr, struct page *page, vm_fault_t *ret) xpm_integrity_validate() argument
156 xpm_integrity_equal(struct page *page, struct page *kpage, bool *ret) xpm_integrity_equal() argument
[all...]
/kernel/linux/linux-5.10/fs/erofs/
H A Dzdata.h28 /* I: page offset of start position of decompression */
31 /* L: maximum relative page index in pagevec[] */
63 struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
100 struct page *page) in erofs_page_is_managed()
102 return page->mapping == MNGD_MAPPING(sbi); in erofs_page_is_managed()
110 * waiters (aka. ongoing_packs): # to unlock the page
111 * sub-index: 0 - for partial page, >= 1 full page sub-index
121 static inline unsigned int z_erofs_onlinepage_index(struct page *pag argument
99 erofs_page_is_managed(const struct erofs_sb_info *sbi, struct page *page) erofs_page_is_managed() argument
131 z_erofs_onlinepage_init(struct page *page) z_erofs_onlinepage_init() argument
144 z_erofs_onlinepage_fixup(struct page *page, uintptr_t index, bool down) z_erofs_onlinepage_fixup() argument
166 z_erofs_onlinepage_endio(struct page *page) z_erofs_onlinepage_endio() argument
[all...]
/kernel/linux/linux-5.10/mm/
H A Dksm.c68 * by their contents. Because each such page is write-protected, searching on
73 * mapping from a KSM page to virtual addresses that map this page.
82 * different KSM page copy of that content
106 * take 10 attempts to find a page in the unstable tree, once it is found,
107 * it is secured in the stable tree. (When we scan a new page, we first
146 * @node: rb node of this ksm page in the stable tree
150 * @hlist: hlist head of rmap_items using this ksm page
151 * @kpfn: page frame number of this ksm page (perhap
473 struct page *page; break_ksm() local
559 struct page *page; get_mergeable_page() local
699 struct page *page; get_ksm_page() local
779 struct page *page; remove_rmap_item_from_tree() local
863 page_stable_node(struct page *page) page_stable_node() argument
868 set_page_stable_node(struct page *page, struct stable_node *stable_node) set_page_stable_node() argument
880 struct page *page; remove_stable_node() local
1026 calc_checksum(struct page *page) calc_checksum() argument
1035 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) write_protect_page() argument
1122 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) replace_page() argument
1204 try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) try_to_merge_one_page() argument
1282 try_to_merge_with_ksm_page(struct rmap_item *rmap_item, struct page *page, struct page *kpage) try_to_merge_with_ksm_page() argument
1319 try_to_merge_two_pages(struct rmap_item *rmap_item, struct page *page, struct rmap_item *tree_rmap_item, struct page *tree_page) try_to_merge_two_pages() argument
1557 stable_tree_search(struct page *page) stable_tree_search() argument
1929 unstable_tree_search_insert(struct rmap_item *rmap_item, struct page *page, struct page **tree_pagep) unstable_tree_search_insert() argument
2041 cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) cmp_and_merge_page() argument
2230 scan_get_next_rmap_item(struct page **page) scan_get_next_rmap_item() argument
2263 struct page *page; scan_get_next_rmap_item() local
2396 struct page *page; ksm_do_scan() local
2574 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) ksm_might_need_to_copy() argument
2609 rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) rmap_walk_ksm() argument
[all...]
H A Dshmem.c94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
105 pgoff_t next; /* the next page offset to be fallocated */
140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
144 struct page **pagep, enum sgp_type sgp,
148 struct page **pagep, enum sgp_type sgp,
153 struct page **pagep, enum sgp_type sgp) in shmem_getpage()
433 * Checking page is not enough: by the time a SwapCache page i
528 struct page *page; shmem_unused_huge_shrink() local
672 shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, void *expected, gfp_t gfp, struct mm_struct *charge_mm) shmem_add_to_page_cache() argument
682 VM_BUG_ON_PAGE(PageTail(page), page); shmem_add_to_page_cache() local
745 shmem_delete_from_page_cache(struct page *page, void *radswap) shmem_delete_from_page_cache() argument
750 VM_BUG_ON_PAGE(PageCompound(page), page); shmem_delete_from_page_cache() local
789 struct page *page; shmem_partial_swap_usage() local
886 shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end) shmem_punch_compound() argument
931 struct page *page = pvec.pages[i]; shmem_undo_range() local
945 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); shmem_undo_range() local
952 VM_BUG_ON_PAGE(PageWriteback(page), page); shmem_undo_range() local
965 struct page *page = NULL; shmem_undo_range() local
980 struct page *page = NULL; shmem_undo_range() local
1008 struct page *page = pvec.pages[i]; shmem_undo_range() local
1035 VM_BUG_ON_PAGE(PageWriteback(page), page); shmem_undo_range() local
1198 struct page *page; shmem_find_swap_entries() local
1248 struct page *page = pvec.pages[i]; shmem_unuse_swap_entries() local
1364 shmem_writepage(struct page *page, struct writeback_control *wbc) shmem_writepage() argument
1372 VM_BUG_ON_PAGE(PageCompound(page), page); shmem_writepage() local
1527 struct page *page; shmem_swapin() local
1545 struct page *page; shmem_alloc_hugepage() local
1567 struct page *page; shmem_alloc_page() local
1581 struct page *page; shmem_alloc_and_acct_page() local
1620 shmem_should_replace_page(struct page *page, gfp_t gfp) shmem_should_replace_page() argument
1706 struct page *page; shmem_swapin_page() local
1807 struct page *page; shmem_getpage_gfp() local
2366 struct page *page; shmem_mfill_atomic_pte() local
2500 struct page *page = NULL; shmem_mfill_zeropage_pte() local
2538 shmem_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) shmem_write_end() argument
2597 struct page *page = NULL; shmem_file_read_iter() local
2689 struct page *page; shmem_seek_hole_data() local
2846 struct page *page; shmem_fallocate() local
3149 struct page *page; shmem_symlink() local
3207 struct page *page = NULL; shmem_get_link() local
4332 struct page *page; shmem_read_mapping_page_gfp() local
[all...]
/kernel/linux/linux-5.10/arch/nds32/mm/
H A Dcacheflush.c28 void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument
33 kaddr = (unsigned long)kmap_atomic(page); in flush_icache_page()
39 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, in flush_icache_user_page() argument
43 kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK); in flush_icache_user_page()
51 struct page *page; in update_mmu_cache() local
65 page = pfn_to_page(pfn); in update_mmu_cache()
67 if ((test_and_clear_bit(PG_dcache_dirty, &page in update_mmu_cache()
80 aliasing(unsigned long addr, unsigned long page) aliasing() argument
187 clear_user_page(void *addr, unsigned long vaddr, struct page *page) clear_user_page() argument
218 clear_user_highpage(struct page *page, unsigned long vaddr) clear_user_highpage() argument
237 flush_dcache_page(struct page *page) flush_dcache_page() argument
265 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_to_user_page() argument
286 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_from_user_page() argument
299 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr) flush_anon_page() argument
321 flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dfscache.h34 * a page is currently backed by a local disk cache
36 #define PageFsCache(page) PagePrivate2((page))
37 #define SetPageFsCache(page) SetPagePrivate2((page))
38 #define ClearPageFsCache(page) ClearPagePrivate2((page))
39 #define TestSetPageFsCache(page) TestSetPagePrivate2((page))
40 #define TestClearPageFsCache(page) TestClearPagePrivate
540 fscache_read_or_alloc_page(struct fscache_cookie *cookie, struct page *page, fscache_rw_complete_t end_io_func, void *context, gfp_t gfp) fscache_read_or_alloc_page() argument
624 fscache_alloc_page(struct fscache_cookie *cookie, struct page *page, gfp_t gfp) fscache_alloc_page() argument
674 fscache_write_page(struct fscache_cookie *cookie, struct page *page, loff_t object_size, gfp_t gfp) fscache_write_page() argument
700 fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) fscache_uncache_page() argument
718 fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) fscache_check_page_write() argument
738 fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) fscache_wait_on_page_write() argument
761 fscache_maybe_release_page(struct fscache_cookie *cookie, struct page *page, gfp_t gfp) fscache_maybe_release_page() argument
[all...]
H A Dmemcontrol.h23 #include <linux/page-flags.h>
28 struct page;
32 /* Cgroup-specific page state, on top of universal node page state */
57 static inline bool is_prot_page(struct page *page) in is_prot_page() argument
213 * page cache and RSS per cgroup. We would eventually like to provide
458 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
460 void mem_cgroup_uncharge(struct page *pag
766 __mod_memcg_page_state(struct page *page, int idx, int val) __mod_memcg_page_state() argument
773 mod_memcg_page_state(struct page *page, int idx, int val) mod_memcg_page_state() argument
866 is_file_page(struct page *page) is_file_page() argument
876 __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) __mod_lruvec_page_state() argument
901 mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) mod_lruvec_page_state() argument
929 count_memcg_page_event(struct page *page, enum vm_event_item idx) count_memcg_page_event() argument
1043 mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) mem_cgroup_charge() argument
1049 mem_cgroup_uncharge(struct page *page) mem_cgroup_uncharge() argument
1067 mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) mem_cgroup_page_lruvec() argument
1089 get_mem_cgroup_from_page(struct page *page) get_mem_cgroup_from_page() argument
1171 lock_page_memcg(struct page *page) lock_page_memcg() argument
1180 unlock_page_memcg(struct page *page) unlock_page_memcg() argument
1239 __mod_memcg_page_state(struct page *page, int idx, int nr) __mod_memcg_page_state() argument
1245 mod_memcg_page_state(struct page *page, int idx, int nr) mod_memcg_page_state() argument
1280 __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) __mod_lruvec_page_state() argument
1286 mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) mod_lruvec_page_state() argument
1295 struct page *page = virt_to_head_page(p); __mod_lruvec_slab_state() local
1303 struct page *page = virt_to_head_page(p); mod_lruvec_slab_state() local
1336 count_memcg_page_event(struct page *page, int idx) count_memcg_page_event() argument
1362 __inc_memcg_page_state(struct page *page, int idx) __inc_memcg_page_state() argument
1369 __dec_memcg_page_state(struct page *page, int idx) __dec_memcg_page_state() argument
1387 __inc_lruvec_page_state(struct page *page, enum node_stat_item idx) __inc_lruvec_page_state() argument
1393 __dec_lruvec_page_state(struct page *page, enum node_stat_item idx) __dec_lruvec_page_state() argument
1424 inc_memcg_page_state(struct page *page, int idx) inc_memcg_page_state() argument
1431 dec_memcg_page_state(struct page *page, int idx) dec_memcg_page_state() argument
1449 inc_lruvec_page_state(struct page *page, enum node_stat_item idx) inc_lruvec_page_state() argument
1455 dec_lruvec_page_state(struct page *page, enum node_stat_item idx) dec_lruvec_page_state() argument
1484 mem_cgroup_track_foreign_dirty(struct page *page, struct bdi_writeback *wb) mem_cgroup_track_foreign_dirty() argument
1511 mem_cgroup_track_foreign_dirty(struct page *page, struct bdi_writeback *wb) mem_cgroup_track_foreign_dirty() argument
1591 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) memcg_kmem_charge_page() argument
1599 memcg_kmem_uncharge_page(struct page *page, int order) memcg_kmem_uncharge_page() argument
1634 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) memcg_kmem_charge_page() argument
1640 memcg_kmem_uncharge_page(struct page *page, int order) memcg_kmem_uncharge_page() argument
1644 __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) __memcg_kmem_charge_page() argument
1650 __memcg_kmem_uncharge_page(struct page *page, int order) __memcg_kmem_uncharge_page() argument
[all...]
/kernel/linux/linux-6.6/fs/erofs/
H A Dcompress.h13 struct page **in, **out;
27 struct page **pagepool);
31 /* some special page->private (unsigned long, see below) */
36 * For all pages in a pcluster, page->private should be one of
37 * Type Last 2bits page->private
38 * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
39 * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
40 * cached/managed page 00 pointer to z_erofs_pcluster
41 * online page (file-backed, 01/10/11 sub-index << 2 | count
44 * page
60 z_erofs_is_shortlived_page(struct page *page) z_erofs_is_shortlived_page() argument
69 z_erofs_put_shortlivedpage(struct page **pagepool, struct page *page) z_erofs_put_shortlivedpage() argument
86 erofs_page_is_managed(const struct erofs_sb_info *sbi, struct page *page) erofs_page_is_managed() argument
[all...]
/kernel/linux/linux-6.6/arch/x86/include/asm/
H A Dset_memory.h6 #include <asm/page.h>
32 * operation on other mappings of the same physical page
33 * - Restore default attributes when a page is freed
57 int set_pages_array_uc(struct page **pages, int addrinarray);
58 int set_pages_array_wc(struct page **pages, int addrinarray);
59 int set_pages_array_wb(struct page **pages, int addrinarray);
63 * are provided that work on a "struct page".
65 * memory that the struct page represents, and internally just
76 * get a struct page* that the old API required.
81 int set_pages_uc(struct page *pag
[all...]
/kernel/linux/linux-5.10/drivers/xen/
H A Dballoon.c63 #include <asm/page.h>
74 #include <xen/page.h>
121 * Use one extent per PAGE_SIZE to avoid to break down the page into
150 /* We increase/decrease in batches which fit in a page */
163 /* balloon_append: add the given page to the balloon. */
164 static void balloon_append(struct page *page) in balloon_append() argument
166 __SetPageOffline(page); in balloon_append()
169 if (PageHighMem(page)) { in balloon_append()
170 list_add_tail(&page in balloon_append()
182 struct page *page; balloon_retrieve() local
201 balloon_next_page(struct page *page) balloon_next_page() argument
356 xen_online_page(struct page *page, unsigned int order) xen_online_page() argument
406 struct page *page; increase_reservation() local
445 struct page *page, *tmp; decrease_reservation() local
619 struct page *page; alloc_xenballooned_pages() local
[all...]
/kernel/linux/linux-6.6/drivers/xen/
H A Dballoon.c64 #include <asm/page.h>
75 #include <xen/page.h>
105 * Use one extent per PAGE_SIZE to avoid to break down the page into
134 /* We increase/decrease in batches which fit in a page */
147 /* balloon_append: add the given page to the balloon. */
148 static void balloon_append(struct page *page) in balloon_append() argument
150 __SetPageOffline(page); in balloon_append()
153 if (PageHighMem(page)) { in balloon_append()
154 list_add_tail(&page in balloon_append()
166 struct page *page; balloon_retrieve() local
185 balloon_next_page(struct page *page) balloon_next_page() argument
340 xen_online_page(struct page *page, unsigned int order) xen_online_page() argument
390 struct page *page; increase_reservation() local
429 struct page *page, *tmp; decrease_reservation() local
603 struct page *page; xen_alloc_ballooned_pages() local
[all...]

Completed in 27 milliseconds

12345678910>>...268