/kernel/linux/linux-5.10/fs/ecryptfs/ |
H A D | mmap.c | 16 #include <linux/page-flags.h> 28 * Get one page from cache or lower f/s, return error otherwise. 30 * Returns locked and up-to-date page (if ok), with increased 33 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) in ecryptfs_get_locked_page() 35 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); in ecryptfs_get_locked_page() local 36 if (!IS_ERR(page)) in ecryptfs_get_locked_page() 37 lock_page(page); in ecryptfs_get_locked_page() 38 return page; in ecryptfs_get_locked_page() 43 * @page 51 ecryptfs_writepage(struct page *page, struct writeback_control *wbc) ecryptfs_writepage() argument 108 ecryptfs_copy_up_encrypted_with_header(struct page *page, struct ecryptfs_crypt_stat *crypt_stat) ecryptfs_copy_up_encrypted_with_header() argument 180 ecryptfs_readpage(struct file *file, struct page *page) ecryptfs_readpage() argument 235 fill_zeros_to_end_of_page(struct page *page, unsigned int to) fill_zeros_to_end_of_page() argument 270 struct page *page; ecryptfs_write_begin() local 462 ecryptfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ecryptfs_write_end() argument [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | internal.h | 182 struct page *find_get_entry(struct address_space *mapping, pgoff_t index); 183 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index); 186 * page_evictable - test whether a page is evictable 187 * @page: the page to test 189 * Test whether page is evictable--i.e., should be placed on active/inactive 192 * Reasons page might not be evictable: 193 * (1) page's mapping marked unevictable 194 * (2) page is part of an mlocked VMA 197 static inline bool page_evictable(struct page *pag argument 212 set_page_refcounted(struct page *page) set_page_refcounted() argument 214 VM_BUG_ON_PAGE(PageTail(page), page); set_page_refcounted() local 215 VM_BUG_ON_PAGE(page_ref_count(page), page); set_page_refcounted() local 386 struct page *page; global() member 408 buddy_order(struct page *page) buddy_order() argument 500 mlock_migrate_page(struct page *newpage, struct page *page) mlock_migrate_page() argument 520 vma_address(struct page *page, struct vm_area_struct *vma) vma_address() argument 525 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ vma_address() local 549 vma_address_end(struct page *page, struct vm_area_struct *vma) vma_address_end() argument 554 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ vma_address_end() local 585 clear_page_mlock(struct page *page) clear_page_mlock() argument 586 mlock_vma_page(struct page *page) mlock_vma_page() argument 766 is_migrate_highatomic_page(struct page *page) is_migrate_highatomic_page() argument [all...] |
H A D | gup.c | 31 static void hpage_pincount_add(struct page *page, int refs) in hpage_pincount_add() argument 33 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); in hpage_pincount_add() 34 VM_BUG_ON_PAGE(page != compound_head(page), page); in hpage_pincount_add() 36 atomic_add(refs, compound_pincount_ptr(page)); in hpage_pincount_add() 39 static void hpage_pincount_sub(struct page *page, in argument 48 put_page_refs(struct page *page, int refs) put_page_refs() argument 68 try_get_compound_head(struct page *page, int refs) try_get_compound_head() argument 113 try_grab_compound_head(struct page *page, int refs, unsigned int flags) try_grab_compound_head() argument 161 put_compound_head(struct page *page, int refs, unsigned int flags) put_compound_head() argument 197 try_grab_page(struct page *page, unsigned int flags) try_grab_page() argument 239 unpin_user_page(struct page *page) unpin_user_page() argument 284 struct page *page = compound_head(pages[index]); unpin_user_pages_dirty_lock() local 399 struct page *page; follow_page_pte() local 563 struct page *page; follow_pmd_mask() local 678 struct page *page; follow_pud_mask() local 717 struct page *page; follow_p4d_mask() local 761 struct page *page; follow_page_mask() local 800 struct page *page; follow_page() local 808 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) get_gate_page() argument 1052 struct page *page; __get_user_pages() local 1562 struct page *page; get_dump_page() local 2132 struct page *page = pages[--(*nr)]; undo_dev_pagemap() local 2173 struct page *head, *page; gup_pte_range() local 2210 VM_BUG_ON_PAGE(compound_head(page) != head, page); gup_pte_range() local 2267 struct page *page = pfn_to_page(pfn); __gup_device_huge() local 2342 record_subpages(struct page *page, unsigned long addr, unsigned long end, struct page **pages) record_subpages() argument 2366 struct page *head, *page; gup_hugepte() local 2430 struct page *head, *page; gup_huge_pmd() local 2464 struct page *head, *page; gup_huge_pud() local 2499 struct page *head, *page; gup_huge_pgd() local [all...] |
H A D | debug_page_ref.c | 8 void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 10 trace_page_ref_set(page, v); in __page_ref_set() 15 void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument 17 trace_page_ref_mod(page, v); in __page_ref_mod() 22 void __page_ref_mod_and_test(struct page *page, int v, int ret) in __page_ref_mod_and_test() argument 24 trace_page_ref_mod_and_test(page, v, ret); in __page_ref_mod_and_test() 29 void __page_ref_mod_and_return(struct page *pag argument 36 __page_ref_mod_unless(struct page *page, int v, int u) __page_ref_mod_unless() argument 43 __page_ref_freeze(struct page *page, int v, int ret) __page_ref_freeze() argument 50 __page_ref_unfreeze(struct page *page, int v) __page_ref_unfreeze() argument [all...] |
H A D | huge_memory.c | 46 * Defrag is invoked by khugepaged hugepage allocations and by page faults 63 struct page *huge_zero_page __read_mostly; 90 static struct page *get_huge_zero_page(void) in get_huge_zero_page() 92 struct page *zero_page; in get_huge_zero_page() 127 struct page *mm_get_huge_zero_page(struct mm_struct *mm) in mm_get_huge_zero_page() 150 /* we can free zero page only if last reference remains */ in shrink_huge_zero_page_count() 158 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan() 400 * we use page->mapping and page->index in second tail page in hugepage_init() 487 get_deferred_split_queue(struct page *page) get_deferred_split_queue() argument 498 get_deferred_split_queue(struct page *page) get_deferred_split_queue() argument 506 prep_transhuge_page(struct page *page) prep_transhuge_page() argument 517 is_transparent_hugepage(struct page *page) is_transparent_hugepage() argument 581 __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, gfp_t gfp) __do_huge_pmd_anonymous_page() argument 715 struct page *page; do_huge_pmd_anonymous_page() local 969 struct page *page; follow_devmap_pmd() local 1141 struct page *page; follow_devmap_pud() local 1274 struct page *page; do_huge_pmd_wp_page() local 1346 struct page *page = NULL; follow_trans_huge_pmd() local 1415 struct page *page; do_huge_pmd_numa_page() local 1576 struct page *page; madvise_free_huge_pmd() local 1681 struct page *page = NULL; zap_huge_pmd() local 2032 struct page *page; __split_huge_pmd_locked() local 2209 __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct page *page) __split_huge_pmd() argument 2288 split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) split_huge_pmd_address() argument 2354 unmap_page(struct page *page) unmap_page() argument 2366 VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); unmap_page() local 2369 remap_page(struct page *page, unsigned int nr) remap_page() argument 2445 __split_huge_page(struct page *page, struct list_head *list, pgoff_t end, unsigned long flags) __split_huge_page() argument 2533 total_mapcount(struct page *page) total_mapcount() argument 2537 VM_BUG_ON_PAGE(PageTail(page), page); total_mapcount() local 2581 page_trans_huge_mapcount(struct page *page, int *total_mapcount) page_trans_huge_mapcount() argument 2586 VM_BUG_ON_PAGE(PageHuge(page), page); page_trans_huge_mapcount() local 2616 can_split_huge_page(struct page *page, int *pextra_pins) can_split_huge_page() argument 2649 split_huge_page_to_list(struct page *page, struct list_head *list) split_huge_page_to_list() argument 2771 free_transhuge_page(struct page *page) free_transhuge_page() argument 2785 deferred_split_huge_page(struct page *page) deferred_split_huge_page() argument 2845 struct page *page; deferred_split_scan() local 2907 struct page *page; split_huge_pages_set() local 2957 set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) set_pmd_migration_entry() argument [all...] |
/kernel/linux/linux-5.10/arch/arm/mm/ |
H A D | flush.c | 128 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, in __flush_ptrace_access() argument 140 flush_pfn_alias(page_to_pfn(page), uaddr); in __flush_ptrace_access() 149 flush_icache_alias(page_to_pfn(page), uaddr, len); in __flush_ptrace_access() 159 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 167 __flush_ptrace_access(page, uaddr, kaddr, len, flags); in flush_ptrace_access() 170 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, in flush_uprobe_xol_access() argument 175 __flush_ptrace_access(page, uadd in flush_uprobe_xol_access() 185 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) copy_to_user_page() argument 199 __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument 237 __flush_dcache_aliases(struct address_space *mapping, struct page *page) __flush_dcache_aliases() argument 272 struct page *page; __sync_icache_dcache() local 315 flush_dcache_page(struct page *page) flush_dcache_page() argument 357 flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument 390 __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) __flush_anon_page() argument [all...] |
/kernel/linux/linux-5.10/net/ceph/ |
H A D | pagelist.c | 32 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() local 33 kunmap(page); in ceph_pagelist_unmap_tail() 44 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release() local 46 list_del(&page->lru); in ceph_pagelist_release() 47 __free_page(page); in ceph_pagelist_release() 56 struct page *pag in ceph_pagelist_addpage() local 110 struct page *page = __page_cache_alloc(GFP_NOFS); ceph_pagelist_reserve() local 124 struct page *page = list_first_entry(&pl->free_list, ceph_pagelist_free_reserve() local 153 struct page *page; ceph_pagelist_truncate() local [all...] |
/kernel/linux/linux-6.6/net/ceph/ |
H A D | pagelist.c | 32 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() local 33 kunmap(page); in ceph_pagelist_unmap_tail() 44 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release() local 46 list_del(&page->lru); in ceph_pagelist_release() 47 __free_page(page); in ceph_pagelist_release() 56 struct page *pag in ceph_pagelist_addpage() local 110 struct page *page = __page_cache_alloc(GFP_NOFS); ceph_pagelist_reserve() local 124 struct page *page = list_first_entry(&pl->free_list, ceph_pagelist_free_reserve() local 153 struct page *page; ceph_pagelist_truncate() local [all...] |
/kernel/linux/linux-5.10/fs/afs/ |
H A D | write.c | 17 * mark a page as having been made dirty and thus needing writeback 19 int afs_set_page_dirty(struct page *page) in afs_set_page_dirty() argument 22 return __set_page_dirty_nobuffers(page); in afs_set_page_dirty() 26 * partly or wholly fill a page that's under preparation for writing 29 loff_t pos, unsigned int len, struct page *page) in afs_fill_page() 41 data = kmap(page); in afs_fill_page() 43 kunmap(page); in afs_fill_page() 56 req->pages[0] = page; in afs_fill_page() 28 afs_fill_page(struct afs_vnode *vnode, struct key *key, loff_t pos, unsigned int len, struct page *page) afs_fill_page() argument 82 struct page *page; afs_write_begin() local 162 afs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) afs_write_end() argument 260 struct page *page = pv.pages[loop]; afs_kill_pages() local 303 struct page *page = pv.pages[loop]; afs_redirty_pages() local 500 struct page *pages[8], *page; afs_write_back_from_locked_page() local 658 afs_writepage(struct page *page, struct writeback_control *wbc) afs_writepage() argument 684 struct page *page; afs_writepages_region() local 913 afs_launder_page(struct page *page) afs_launder_page() argument [all...] |
H A D | file.c | 20 static int afs_readpage(struct file *file, struct page *page); 21 static void afs_invalidatepage(struct page *page, unsigned int offset, 23 static int afs_releasepage(struct page *page, gfp_t gfp_flags); 207 * deal with notification that a page was read from the cache 209 static void afs_file_readpage_read_complete(struct page *page, in afs_file_readpage_read_complete() argument 213 _enter("%p,%p,%d", page, dat in afs_file_readpage_read_complete() 274 afs_page_filler(void *data, struct page *page) afs_page_filler() argument 391 afs_readpage(struct file *file, struct page *page) afs_readpage() argument 421 struct page *page = req->pages[req->index]; afs_readpages_page_done() local 448 struct page *first, *page; afs_readpages_one() local 607 afs_invalidate_dirty(struct page *page, unsigned int offset, unsigned int length) afs_invalidate_dirty() argument 665 afs_invalidatepage(struct page *page, unsigned int offset, unsigned int length) afs_invalidatepage() argument 693 afs_releasepage(struct page *page, gfp_t gfp_flags) afs_releasepage() argument [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | pageblock-flags.h | 35 /* Huge page sizes are variable */ 55 struct page; 57 unsigned long get_pfnblock_flags_mask(struct page *page, 61 void set_pfnblock_flags_mask(struct page *page, 68 #define get_pageblock_skip(page) \ 69 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 71 #define clear_pageblock_skip(page) \ 79 get_pageblock_skip(struct page *page) get_pageblock_skip() argument 83 clear_pageblock_skip(struct page *page) clear_pageblock_skip() argument 86 set_pageblock_skip(struct page *page) set_pageblock_skip() argument [all...] |
H A D | mm_inline.h | 10 * page_is_file_lru - should the page be on a file LRU or anon LRU? 11 * @page: the page to test 13 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily 14 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal 15 * anonymous page, a tmpfs page o 23 page_is_file_lru(struct page *page) page_is_file_lru() argument 49 add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) add_page_to_lru_list() argument 56 add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec, enum lru_list lru) add_page_to_lru_list_tail() argument 63 del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) del_page_from_lru_list() argument 78 page_lru_base_type(struct page *page) page_lru_base_type() argument 97 page_off_lru(struct page *page) page_off_lru() argument 121 page_lru(struct page *page) page_lru() argument [all...] |
/kernel/linux/linux-6.6/include/net/page_pool/ |
H A D | helpers.h | 12 * uses one frame per-page, but it can fallback on the 13 * regular page allocator APIs. 21 * must call page_pool_put_page() to free the page, or attach 22 * the page to a page_pool-aware object like skbs marked with 25 * API users must call page_pool_put_page() once on a page, as it 26 * will either recycle the page, or in case of refcnt > 1, it will 40 * Drivers that wish to harvest page pool stats and report them to users 64 * page_pool_dev_alloc_pages() - allocate a page. 67 * Get a page from the page allocato 108 page_pool_fragment_page(struct page *page, long nr) page_pool_fragment_page() argument 113 page_pool_defrag_page(struct page *page, long nr) page_pool_defrag_page() argument 155 page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) page_pool_put_page() argument 180 page_pool_put_full_page(struct page_pool *pool, struct page *page, bool allow_direct) page_pool_put_full_page() argument 194 page_pool_recycle_direct(struct page_pool *pool, struct page *page) page_pool_recycle_direct() argument 210 page_pool_get_dma_addr(struct page *page) page_pool_get_dma_addr() argument 220 page_pool_set_dma_addr(struct page *page, dma_addr_t addr) page_pool_set_dma_addr() argument [all...] |
/foundation/arkui/ace_engine_lite/frameworks/src/core/modules/presets/test/unittest/common/ |
H A D | timer_module_tdd_test.cpp | 625 const JSValue page) const in TriggerTimer() 632 if (!JSObject::Has(page, "value")) { in TriggerTimer() 635 JSValue value = JSObject::Get(page, "value"); in TriggerTimer() 638 content = JSObject::GetString(page, "value"); in TriggerTimer() 653 * @tc.steps:step1.read js file, setTimeout_test001.js, and eval the js page in HWTEST_F() 656 JSValue page = CreatePage(SET_TIMEOUT_001, strlen(SET_TIMEOUT_001)); in HWTEST_F() local 657 EXPECT_FALSE(JSUndefined::Is(page)); in HWTEST_F() 666 char *content = TriggerTimer(xrate, yrate, sleepTime, page); in HWTEST_F() 669 DestroyPage(page); in HWTEST_F() 685 JSValue page in HWTEST_F() local 714 JSValue page = CreatePage(INTERVAL_TEST_003, strlen(INTERVAL_TEST_003)); HWTEST_F() local 743 JSValue page = CreatePage(TIME_OUT_004, strlen(TIME_OUT_004)); HWTEST_F() local 789 JSValue page = CreatePage(ARG_TEST_005, strlen(ARG_TEST_005)); HWTEST_F() local 835 JSValue page = CreatePage(CLEAR_TIME_OUT_006, strlen(CLEAR_TIME_OUT_006)); HWTEST_F() local 864 JSValue page = CreatePage(INTERVAL_TEST_007, strlen(INTERVAL_TEST_007)); HWTEST_F() local 893 JSValue page = CreatePage(CLEAR_TIME_OUT_008, strlen(CLEAR_TIME_OUT_008)); HWTEST_F() local 931 JSValue page = CreatePage(BUNDLE_09, strlen(BUNDLE_09)); HWTEST_F() local 988 JSValue page = CreatePage(BUNDLE_10, strlen(BUNDLE_10)); HWTEST_F() local [all...] |
/kernel/linux/linux-6.6/mm/kmsan/ |
H A D | shadow.c | 22 #define shadow_page_for(page) ((page)->kmsan_shadow) 24 #define origin_page_for(page) ((page)->kmsan_origin) 26 static void *shadow_ptr_for(struct page *page) in shadow_ptr_for() argument 28 return page_address(shadow_page_for(page)); in shadow_ptr_for() 31 static void *origin_ptr_for(struct page *page) in origin_ptr_for() argument 33 return page_address(origin_page_for(page)); in origin_ptr_for() 36 page_has_metadata(struct page *page) page_has_metadata() argument 41 set_no_shadow_origin_page(struct page *page) set_no_shadow_origin_page() argument 127 struct page *page; kmsan_get_metadata() local 172 kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags) kmsan_alloc_page() argument 207 kmsan_free_page(struct page *page, unsigned int order) kmsan_free_page() argument 281 struct page *page; kmsan_init_alloc_meta_for_range() local 299 kmsan_setup_meta(struct page *page, struct page *shadow, struct page *origin, int order) kmsan_setup_meta() argument [all...] |
/kernel/linux/linux-5.10/fs/gfs2/ |
H A D | aops.c | 40 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, in gfs2_page_add_databufs() argument 43 struct buffer_head *head = page_buffers(page); in gfs2_page_add_databufs() 85 * gfs2_writepage - Write page for writeback mappings 86 * @page: The page 89 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) in gfs2_writepage() argument 91 struct inode *inode = page->mapping->host; in gfs2_writepage() 100 return iomap_writepage(page, wb in gfs2_writepage() 117 gfs2_write_jdata_page(struct page *page, struct writeback_control *wbc) gfs2_write_jdata_page() argument 151 __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) __gfs2_jdata_writepage() argument 176 gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) gfs2_jdata_writepage() argument 249 struct page *page = pvec->pages[i]; gfs2_write_jdata_pagevec() local 431 stuffed_readpage(struct gfs2_inode *ip, struct page *page) stuffed_readpage() argument 465 __gfs2_readpage(void *file, struct page *page) __gfs2_readpage() argument 494 gfs2_readpage(struct file *file, struct page *page) gfs2_readpage() argument 516 struct page *page; gfs2_internal_read() local 621 jdata_set_page_dirty(struct page *page) jdata_set_page_dirty() argument 680 gfs2_invalidatepage(struct page *page, unsigned int offset, unsigned int length) gfs2_invalidatepage() argument 721 gfs2_releasepage(struct page *page, gfp_t gfp_mask) gfs2_releasepage() argument [all...] |
/kernel/linux/linux-6.6/fs/ubifs/ |
H A D | file.c | 15 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if 16 * the page is dirty and is used for optimization purposes - dirty pages are 18 * the budget for this page. The @PG_checked flag is set if full budgeting is 19 * required for the page e.g., when it corresponds to a file hole or it is 23 * information about how the page was budgeted, to make it possible to release 32 * 'ubifs_writepage()' we are only guaranteed that the page is locked. 99 static int do_readpage(struct page *page) in do_readpage() argument 105 struct inode *inode = page->mapping->host; in do_readpage() 110 inode->i_ino, page in do_readpage() 225 struct page *page; write_begin_slow() local 321 allocate_budget(struct ubifs_info *c, struct page *page, struct ubifs_inode *ui, int appending) allocate_budget() argument 427 struct page *page; ubifs_write_begin() local 511 cancel_budget(struct ubifs_info *c, struct page *page, struct ubifs_inode *ui, int appending) cancel_budget() argument 527 ubifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ubifs_write_end() argument 600 populate_page(struct ubifs_info *c, struct page *page, struct bu_info *bu, int *n) populate_page() argument 779 struct page *page; ubifs_do_bulk_read() local 821 ubifs_bulk_read(struct page *page) ubifs_bulk_read() argument 889 struct page *page = &folio->page; ubifs_read_folio() local 898 do_writepage(struct page *page, int len) do_writepage() argument 1001 ubifs_writepage(struct page *page, struct writeback_control *wbc) ubifs_writepage() argument 1148 struct page *page; do_truncation() local 1495 struct page *page = vmf->page; ubifs_vm_page_mkwrite() local [all...] |
/kernel/linux/linux-5.10/fs/nfs/ |
H A D | fscache.h | 96 extern void __nfs_fscache_invalidate_page(struct page *, struct inode *); 97 extern int nfs_fscache_release_page(struct page *, gfp_t); 100 struct inode *, struct page *); 104 extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int); 107 * wait for a page to complete writing to the cache 110 struct page *page) in nfs_fscache_wait_on_page_write() 112 if (PageFsCache(page)) in nfs_fscache_wait_on_page_write() 113 fscache_wait_on_page_write(nfsi->fscache, page); in nfs_fscache_wait_on_page_write() 117 * release the caching state associated with a page i 109 nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi, struct page *page) nfs_fscache_wait_on_page_write() argument 120 nfs_fscache_invalidate_page(struct page *page, struct inode *inode) nfs_fscache_invalidate_page() argument 130 nfs_readpage_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct page *page) nfs_readpage_from_fscache() argument 158 nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync) nfs_readpage_to_fscache() argument 206 nfs_fscache_release_page(struct page *page, gfp_t gfp) nfs_fscache_release_page() argument 210 nfs_fscache_invalidate_page(struct page *page, struct inode *inode) nfs_fscache_invalidate_page() argument 212 nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi, struct page *page) nfs_fscache_wait_on_page_write() argument 215 nfs_readpage_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct page *page) nfs_readpage_from_fscache() argument 229 nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync) nfs_readpage_to_fscache() argument [all...] |
/kernel/liteos_a/kernel/base/vm/ |
H A D | los_vm_page.c | 46 STATIC VOID OsVmPageInit(LosVmPage *page, paddr_t pa, UINT8 segID) in OsVmPageInit() argument 48 LOS_ListInit(&page->node); in OsVmPageInit() 49 page->flags = FILE_PAGE_FREE; in OsVmPageInit() 50 LOS_AtomicSet(&page->refCounts, 0); in OsVmPageInit() 51 page->physAddr = pa; in OsVmPageInit() 52 page->segID = segID; in OsVmPageInit() 53 page->order = VM_LIST_ORDER_MAX; in OsVmPageInit() 54 page->nPages = 0; in OsVmPageInit() 56 LOS_SpinInit(&page->lock); in OsVmPageInit() 60 STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_ argument 74 LosVmPage *page = NULL; OsVmPageStartup() local 126 LosVmPage *page = NULL; LOS_VmPageGet() local [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | debug_page_ref.c | 8 void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 10 trace_page_ref_set(page, v); in __page_ref_set() 15 void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument 17 trace_page_ref_mod(page, v); in __page_ref_mod() 22 void __page_ref_mod_and_test(struct page *page, int v, int ret) in __page_ref_mod_and_test() argument 24 trace_page_ref_mod_and_test(page, v, ret); in __page_ref_mod_and_test() 29 void __page_ref_mod_and_return(struct page *pag argument 36 __page_ref_mod_unless(struct page *page, int v, int u) __page_ref_mod_unless() argument 43 __page_ref_freeze(struct page *page, int v, int ret) __page_ref_freeze() argument 50 __page_ref_unfreeze(struct page *page, int v) __page_ref_unfreeze() argument [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | bootmem_info.h | 9 * Types for free bootmem stored in page->lru.next. These have to be in 23 void get_page_bootmem(unsigned long info, struct page *page, 25 void put_page_bootmem(struct page *page); 32 static inline void free_bootmem_page(struct page *page) in free_bootmem_page() argument 34 unsigned long magic = page->index; in free_bootmem_page() 40 VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); in free_bootmem_page() local 52 put_page_bootmem(struct page *page) put_page_bootmem() argument 56 get_page_bootmem(unsigned long info, struct page *page, unsigned long type) get_page_bootmem() argument 61 free_bootmem_page(struct page *page) free_bootmem_page() argument [all...] |
/kernel/linux/linux-5.10/fs/ |
H A D | buffer.c | 82 * Returns if the page has dirty or writeback buffers. If all the buffers 86 void buffer_check_dirty_writeback(struct page *page, in buffer_check_dirty_writeback() argument 93 BUG_ON(!PageLocked(page)); in buffer_check_dirty_writeback() 95 if (!page_has_buffers(page)) in buffer_check_dirty_writeback() 98 if (PageWriteback(page)) in buffer_check_dirty_writeback() 101 head = page_buffers(page); in buffer_check_dirty_writeback() 169 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync() 180 * But it's the page lock which protects the buffers. To get around this, 185 * may be quite high. This code could TryLock the page, an 197 struct page *page; __find_get_block_slow() local 249 struct page *page; end_buffer_async_read() local 347 struct page *page; end_buffer_async_write() local 600 __set_page_dirty(struct page *page, struct address_space *mapping, int warn) __set_page_dirty() argument 641 __set_page_dirty_buffers(struct page *page) __set_page_dirty_buffers() argument 839 alloc_page_buffers(struct page *page, unsigned long size, bool retry) alloc_page_buffers() argument 890 link_dev_buffers(struct page *page, struct buffer_head *head) link_dev_buffers() argument 919 init_page_buffers(struct page *page, struct block_device *bdev, sector_t block, int size) init_page_buffers() argument 958 struct page *page; grow_dev_page() local 1130 struct page *page = bh->b_page; mark_buffer_dirty() local 1443 set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) set_bh_page() argument 1501 block_invalidatepage(struct page *page, unsigned int offset, unsigned int length) block_invalidatepage() argument 1556 create_empty_buffers(struct page *page, unsigned long blocksize, unsigned long b_state) create_empty_buffers() argument 1622 struct page *page = pvec.pages[i]; clean_bdev_aliases() local 1673 create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state) create_page_buffers() argument 1712 __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler) __block_write_full_page() argument 1881 page_zero_new_buffers(struct page *page, unsigned from, unsigned to) page_zero_new_buffers() argument 1973 __block_write_begin_int(struct page *page, loff_t pos, unsigned len, get_block_t *get_block, struct iomap *iomap) __block_write_begin_int() argument 2058 __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) __block_write_begin() argument 2065 __block_commit_write(struct inode *inode, struct page *page, unsigned from, unsigned to) __block_commit_write() argument 2113 struct page *page; block_write_begin() local 2132 block_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) block_write_end() argument 2168 generic_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) generic_write_end() argument 2214 block_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count) block_is_partially_uptodate() argument 2259 block_read_full_page(struct page *page, get_block_t *get_block) block_read_full_page() argument 2352 struct page *page; generic_cont_expand_simple() local 2378 struct page *page; cont_expand_zero() local 2473 block_commit_write(struct page *page, unsigned from, unsigned to) block_commit_write() argument 2502 struct page *page = vmf->page; block_page_mkwrite() local 2553 attach_nobh_buffers(struct page *page, struct buffer_head *head) attach_nobh_buffers() argument 2586 struct page *page; nobh_write_begin() local 2718 nobh_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) nobh_write_end() argument 2758 nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc) nobh_writepage() argument 2804 struct page *page; nobh_truncate_page() local 2882 struct page *page; block_truncate_page() local 2952 block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc) block_write_full_page() argument 3199 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) drop_buffers() argument 3225 try_to_free_buffers(struct page *page) try_to_free_buffers() argument [all...] |
/kernel/linux/linux-5.10/fs/ceph/ |
H A D | cache.h | 27 int ceph_readpage_from_fscache(struct inode *inode, struct page *page); 32 void ceph_readpage_to_fscache(struct inode *inode, struct page *page); 33 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); 47 struct page *page) in ceph_fscache_uncache_page() 50 return fscache_uncache_page(ci->fscache, page); in ceph_fscache_uncache_page() 53 static inline int ceph_release_fscache_page(struct page *pag argument 46 ceph_fscache_uncache_page(struct inode *inode, struct page *page) ceph_fscache_uncache_page() argument 60 ceph_fscache_readpage_cancel(struct inode *inode, struct page *page) ceph_fscache_readpage_cancel() argument 127 ceph_readpage_from_fscache(struct inode* inode, struct page *page) ceph_readpage_from_fscache() argument 141 ceph_readpage_to_fscache(struct inode *inode, struct page *page) ceph_readpage_to_fscache() argument 150 ceph_invalidate_fscache_page(struct inode *inode, struct page *page) ceph_invalidate_fscache_page() argument 155 ceph_release_fscache_page(struct page *page, gfp_t gfp) ceph_release_fscache_page() argument 160 ceph_fscache_readpage_cancel(struct inode *inode, struct page *page) ceph_fscache_readpage_cancel() argument [all...] |
/kernel/linux/linux-6.6/block/ |
H A D | blk-sysfs.c | 30 queue_var_show(unsigned long var, char *page) in queue_var_show() argument 32 return sprintf(page, "%lu\n", var); in queue_var_show() 36 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument 41 err = kstrtoul(page, 10, &v); in queue_var_store() 50 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 52 return queue_var_show(q->nr_requests, page); in queue_requests_show() 56 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 64 ret = queue_var_store(&nr, page, count); in queue_requests_store() 78 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument 85 return queue_var_show(ra_kb, page); in queue_ra_show() 89 queue_ra_store(struct request_queue *q, const char *page, size_t count) queue_ra_store() argument 103 queue_max_sectors_show(struct request_queue *q, char *page) queue_max_sectors_show() argument 110 queue_max_segments_show(struct request_queue *q, char *page) queue_max_segments_show() argument 115 queue_max_discard_segments_show(struct request_queue *q, char *page) queue_max_discard_segments_show() argument 121 queue_max_integrity_segments_show(struct request_queue *q, char *page) queue_max_integrity_segments_show() argument 126 queue_max_segment_size_show(struct request_queue *q, char *page) queue_max_segment_size_show() argument 131 queue_logical_block_size_show(struct request_queue *q, char *page) queue_logical_block_size_show() argument 136 queue_physical_block_size_show(struct request_queue *q, char *page) queue_physical_block_size_show() argument 141 queue_chunk_sectors_show(struct request_queue *q, char *page) queue_chunk_sectors_show() argument 146 queue_io_min_show(struct request_queue *q, char *page) queue_io_min_show() argument 151 queue_io_opt_show(struct request_queue *q, char *page) queue_io_opt_show() argument 156 queue_discard_granularity_show(struct request_queue *q, char *page) queue_discard_granularity_show() argument 161 queue_discard_max_hw_show(struct request_queue *q, char *page) queue_discard_max_hw_show() argument 168 queue_discard_max_show(struct request_queue *q, char *page) queue_discard_max_show() argument 174 queue_discard_max_store(struct request_queue *q, const char *page, size_t count) queue_discard_max_store() argument 197 queue_discard_zeroes_data_show(struct request_queue *q, char *page) queue_discard_zeroes_data_show() argument 202 queue_write_same_max_show(struct request_queue *q, char *page) queue_write_same_max_show() argument 207 queue_write_zeroes_max_show(struct request_queue *q, char *page) queue_write_zeroes_max_show() argument 213 queue_zone_write_granularity_show(struct request_queue *q, char *page) queue_zone_write_granularity_show() argument 219 queue_zone_append_max_show(struct request_queue *q, char *page) queue_zone_append_max_show() argument 227 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) queue_max_sectors_store() argument 261 queue_max_hw_sectors_show(struct request_queue *q, char *page) queue_max_hw_sectors_show() argument 268 queue_virt_boundary_mask_show(struct request_queue *q, char *page) queue_virt_boundary_mask_show() argument 273 queue_dma_alignment_show(struct request_queue *q, char *page) queue_dma_alignment_show() argument 310 queue_zoned_show(struct request_queue *q, char *page) queue_zoned_show() argument 322 queue_nr_zones_show(struct request_queue *q, char *page) queue_nr_zones_show() argument 327 queue_max_open_zones_show(struct request_queue *q, char *page) queue_max_open_zones_show() argument 332 queue_max_active_zones_show(struct request_queue *q, char *page) queue_max_active_zones_show() argument 337 queue_nomerges_show(struct request_queue *q, char *page) queue_nomerges_show() argument 343 queue_nomerges_store(struct request_queue *q, const char *page, size_t count) queue_nomerges_store() argument 362 queue_rq_affinity_show(struct request_queue *q, char *page) queue_rq_affinity_show() argument 371 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) queue_rq_affinity_store() argument 395 queue_poll_delay_show(struct request_queue *q, char *page) queue_poll_delay_show() argument 400 queue_poll_delay_store(struct request_queue *q, const char *page, size_t count) queue_poll_delay_store() argument 406 queue_poll_show(struct request_queue *q, char *page) queue_poll_show() argument 411 queue_poll_store(struct request_queue *q, const char *page, size_t count) queue_poll_store() argument 421 queue_io_timeout_show(struct request_queue *q, char *page) queue_io_timeout_show() argument 426 queue_io_timeout_store(struct request_queue *q, const char *page, size_t count) queue_io_timeout_store() argument 441 queue_wc_show(struct request_queue *q, char *page) queue_wc_show() argument 449 queue_wc_store(struct request_queue *q, const char *page, size_t count) queue_wc_store() argument 466 queue_fua_show(struct request_queue *q, char *page) queue_fua_show() argument 471 queue_dax_show(struct request_queue *q, char *page) queue_dax_show() argument 547 queue_var_store64(s64 *var, const char *page) queue_var_store64() argument 560 queue_wb_lat_show(struct request_queue *q, char *page) queue_wb_lat_show() argument 571 queue_wb_lat_store(struct request_queue *q, const char *page, size_t count) queue_wb_lat_store() argument 715 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) queue_attr_show() argument 731 queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) queue_attr_store() argument [all...] |
/third_party/ltp/testcases/kernel/mce-test/hwpoison/ |
H A D | tinjpage.c | 74 void munmap_reserve(void *page, int size) in munmap_reserve() argument 76 if (munmap(page, size) < 0) in munmap_reserve() 78 if (mmap(page, size, PROT_NONE, MAP_PRIVATE|MAP_FIXED, 0, 0) < 0) in munmap_reserve() 164 void inject_madvise(char *page) in inject_madvise() argument 166 if (madvise(page, PS, MADV_POISON) != 0) { in inject_madvise() 175 u64 page_to_pfn(char *page) in page_to_pfn() argument 187 ((u64)page / PS)*sizeof(u64)) != sizeof(u64)) in page_to_pfn() 200 * Slightly racy with page migration because we don't mlock the page. 202 void inject_mce_inject(char *page) in inject_mce_inject() argument 229 poison(char *msg, char *page, enum rmode mode) poison() argument 254 recover(char *msg, char *page, enum rmode mode) recover() argument 290 testmem(char *msg, char *page, enum rmode mode) testmem() argument 357 char *page; dirty_anonymous() local 364 char *page; dirty_anonymous_unmap() local 372 char *page; mlocked_anonymous() local 379 char *page; do_file_clean() local 415 char *page; do_file_dirty() local 469 char *page; file_hole() local 486 char *page; nonlinear() local 534 char *page; global() member 567 char *page; under_io_dirty() local 588 char *page; under_io_clean() local 793 char *page; anonymous_hugepage() local 808 char *page; file_backed_hugepage() local 826 char *page; shm_hugepage() local [all...] |