/kernel/linux/linux-5.10/drivers/target/ |
H A D | target_core_configfs.c | 85 char *page) in target_core_item_version_show() 87 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" in target_core_item_version_show() 98 char *page) in target_core_item_dbroot_show() 100 return sprintf(page, "%s\n", db_root); in target_core_item_dbroot_show() 104 const char *page, size_t count) in target_core_item_dbroot_store() 123 read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page); in target_core_item_dbroot_store() 513 static ssize_t _name##_show(struct config_item *item, char *page) \ 515 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \ 551 static ssize_t _name##_store(struct config_item *item, const char *page,\ 558 ret = kstrtou32(page, 84 target_core_item_version_show(struct config_item *item, char *page) target_core_item_version_show() argument 97 target_core_item_dbroot_show(struct config_item *item, char *page) target_core_item_dbroot_show() argument 103 target_core_item_dbroot_store(struct config_item *item, const char *page, size_t count) target_core_item_dbroot_store() argument 624 emulate_model_alias_store(struct config_item *item, const char *page, size_t count) emulate_model_alias_store() argument 654 emulate_write_cache_store(struct config_item *item, const char *page, size_t count) emulate_write_cache_store() argument 676 emulate_ua_intlck_ctrl_store(struct config_item *item, const char *page, size_t count) emulate_ua_intlck_ctrl_store() argument 706 emulate_tas_store(struct config_item *item, const char *page, size_t count) emulate_tas_store() argument 730 emulate_tpu_store(struct config_item *item, const char *page, size_t count) emulate_tpu_store() argument 756 emulate_tpws_store(struct config_item *item, const char *page, size_t count) emulate_tpws_store() argument 782 pi_prot_type_store(struct config_item *item, const char *page, size_t count) pi_prot_type_store() argument 846 pi_prot_format_show(struct config_item *item, char *page) pi_prot_format_show() argument 851 pi_prot_format_store(struct config_item *item, const char *page, size_t count) pi_prot_format_store() argument 889 pi_prot_verify_store(struct config_item *item, const char *page, size_t count) pi_prot_verify_store() argument 918 force_pr_aptpl_store(struct config_item *item, const char *page, size_t count) force_pr_aptpl_store() argument 940 emulate_rest_reord_store(struct config_item *item, const char *page, size_t count) emulate_rest_reord_store() argument 962 unmap_zeroes_data_store(struct config_item *item, const char *page, size_t count) unmap_zeroes_data_store() argument 998 queue_depth_store(struct config_item *item, const char *page, size_t count) queue_depth_store() argument 1035 optimal_sectors_store(struct config_item *item, const char *page, size_t count) optimal_sectors_store() argument 1065 block_size_store(struct config_item *item, const char *page, size_t count) block_size_store() argument 1099 alua_support_show(struct config_item *item, char *page) alua_support_show() argument 1108 alua_support_store(struct config_item *item, const char *page, size_t count) alua_support_store() argument 1138 pgr_support_show(struct config_item *item, char *page) pgr_support_show() argument 1147 pgr_support_store(struct config_item *item, const char *page, size_t count) pgr_support_store() argument 1322 target_wwn_vendor_id_show(struct config_item *item, char *page) target_wwn_vendor_id_show() argument 1328 target_wwn_vendor_id_store(struct config_item *item, const char *page, size_t count) target_wwn_vendor_id_store() argument 1378 target_wwn_product_id_show(struct config_item *item, char *page) target_wwn_product_id_show() argument 1384 target_wwn_product_id_store(struct config_item *item, const char *page, size_t count) target_wwn_product_id_store() argument 1434 target_wwn_revision_show(struct config_item *item, char *page) target_wwn_revision_show() argument 1440 target_wwn_revision_store(struct config_item *item, const char *page, size_t count) target_wwn_revision_store() argument 1493 target_wwn_vpd_unit_serial_show(struct config_item *item, char *page) target_wwn_vpd_unit_serial_show() argument 1500 target_wwn_vpd_unit_serial_store(struct config_item *item, const char *page, size_t count) target_wwn_vpd_unit_serial_store() argument 1562 target_wwn_vpd_protocol_identifier_show(struct config_item *item, char *page) target_wwn_vpd_protocol_identifier_show() argument 1669 target_core_dev_pr_show_spc3_res(struct se_device *dev, char *page) target_core_dev_pr_show_spc3_res() argument 1690 target_core_dev_pr_show_spc2_res(struct se_device *dev, char *page) target_core_dev_pr_show_spc2_res() argument 1709 target_pr_res_holder_show(struct config_item *item, char *page) target_pr_res_holder_show() argument 1729 target_pr_res_pr_all_tgt_pts_show(struct config_item *item, char *page) target_pr_res_pr_all_tgt_pts_show() argument 1750 target_pr_res_pr_generation_show(struct config_item *item, char *page) target_pr_res_pr_generation_show() argument 1757 target_pr_res_pr_holder_tg_port_show(struct config_item *item, char *page) target_pr_res_pr_holder_tg_port_show() argument 1793 target_pr_res_pr_registered_i_pts_show(struct config_item *item, char *page) target_pr_res_pr_registered_i_pts_show() argument 1834 target_pr_res_pr_type_show(struct config_item *item, char *page) target_pr_res_pr_type_show() argument 1853 target_pr_res_type_show(struct config_item *item, char *page) target_pr_res_type_show() argument 1867 target_pr_res_aptpl_active_show(struct config_item *item, char *page) target_pr_res_aptpl_active_show() argument 1880 target_pr_res_aptpl_metadata_show(struct config_item *item, char *page) target_pr_res_aptpl_metadata_show() argument 1917 target_pr_res_aptpl_metadata_store(struct config_item *item, const char *page, size_t count) target_pr_res_aptpl_metadata_store() argument 2137 target_dev_info_show(struct config_item *item, char *page) target_dev_info_show() argument 2150 target_dev_control_store(struct config_item *item, const char *page, size_t count) target_dev_control_store() argument 2158 target_dev_alias_show(struct config_item *item, char *page) target_dev_alias_show() argument 2168 target_dev_alias_store(struct config_item *item, const char *page, size_t count) target_dev_alias_store() argument 2198 target_dev_udev_path_show(struct config_item *item, char *page) target_dev_udev_path_show() argument 2208 target_dev_udev_path_store(struct config_item *item, const char *page, size_t count) target_dev_udev_path_store() argument 2239 target_dev_enable_show(struct config_item *item, char *page) target_dev_enable_show() argument 2246 target_dev_enable_store(struct config_item *item, const char *page, size_t count) target_dev_enable_store() argument 2266 target_dev_alua_lu_gp_show(struct config_item *item, char *page) target_dev_alua_lu_gp_show() argument 2290 target_dev_alua_lu_gp_store(struct config_item *item, const char *page, size_t count) target_dev_alua_lu_gp_store() argument 2370 target_dev_lba_map_show(struct config_item *item, char *page) target_dev_lba_map_show() argument 2415 target_dev_lba_map_store(struct config_item *item, const char *page, size_t count) target_dev_lba_map_store() argument 2578 target_lu_gp_lu_gp_id_show(struct config_item *item, char *page) target_lu_gp_lu_gp_id_show() argument 2587 target_lu_gp_lu_gp_id_store(struct config_item *item, const char *page, size_t count) target_lu_gp_lu_gp_id_store() argument 2619 target_lu_gp_members_show(struct config_item *item, char *page) target_lu_gp_members_show() argument 2748 target_tg_pt_gp_alua_access_state_show(struct config_item *item, char *page) target_tg_pt_gp_alua_access_state_show() argument 2755 target_tg_pt_gp_alua_access_state_store(struct config_item *item, const char *page, size_t count) target_tg_pt_gp_alua_access_state_store() argument 2800 target_tg_pt_gp_alua_access_status_show(struct config_item *item, char *page) target_tg_pt_gp_alua_access_status_show() argument 2808 target_tg_pt_gp_alua_access_status_store( struct config_item *item, const char *page, size_t count) target_tg_pt_gp_alua_access_status_store() argument 2842 target_tg_pt_gp_alua_access_type_show(struct config_item *item, char *page) target_tg_pt_gp_alua_access_type_show() argument 2848 target_tg_pt_gp_alua_access_type_store(struct config_item *item, const char *page, size_t count) target_tg_pt_gp_alua_access_type_store() argument 2902 target_tg_pt_gp_alua_write_metadata_show( struct config_item *item, char *page) target_tg_pt_gp_alua_write_metadata_show() argument 2909 target_tg_pt_gp_alua_write_metadata_store( struct config_item *item, const char *page, size_t count) target_tg_pt_gp_alua_write_metadata_store() argument 2932 target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item, char *page) target_tg_pt_gp_nonop_delay_msecs_show() argument 2938 target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item, const char *page, size_t count) target_tg_pt_gp_nonop_delay_msecs_store() argument 2945 target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item, char *page) target_tg_pt_gp_trans_delay_msecs_show() argument 2951 target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item, const char *page, size_t count) target_tg_pt_gp_trans_delay_msecs_store() argument 2958 target_tg_pt_gp_implicit_trans_secs_show( struct config_item *item, char *page) target_tg_pt_gp_implicit_trans_secs_show() argument 2964 target_tg_pt_gp_implicit_trans_secs_store( struct config_item *item, const char *page, size_t count) target_tg_pt_gp_implicit_trans_secs_store() argument 2971 target_tg_pt_gp_preferred_show(struct config_item *item, char *page) target_tg_pt_gp_preferred_show() argument 2977 target_tg_pt_gp_preferred_store(struct config_item *item, const char *page, size_t count) target_tg_pt_gp_preferred_store() argument 2983 target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item, char *page) target_tg_pt_gp_tg_pt_gp_id_show() argument 2993 target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item, const char *page, size_t count) target_tg_pt_gp_tg_pt_gp_id_store() argument 3025 target_tg_pt_gp_members_show(struct config_item *item, char *page) target_tg_pt_gp_members_show() argument 3335 target_hba_info_show(struct config_item *item, char *page) target_hba_info_show() argument 3344 target_hba_mode_show(struct config_item *item, char *page) target_hba_mode_show() argument 3355 target_hba_mode_store(struct config_item *item, const char *page, size_t count) target_hba_mode_store() argument [all...] |
/kernel/linux/linux-5.10/fs/f2fs/ |
H A D | recovery.c | 147 static int recover_dentry(struct inode *inode, struct page *ipage, in recover_dentry() 155 struct page *page; in recover_dentry() local 177 de = __f2fs_find_entry(dir, &fname, &page); in recover_dentry() 202 f2fs_delete_entry(de, page, dir, einode); in recover_dentry() 205 } else if (IS_ERR(page)) { in recover_dentry() 206 err = PTR_ERR(page); in recover_dentry() 216 f2fs_put_page(page, 0); in recover_dentry() 228 static int recover_quota_data(struct inode *inode, struct page *page) in recover_quota_data() argument 267 recover_inode(struct inode *inode, struct page *page) recover_inode() argument 334 struct page *page = NULL; find_fsync_dnodes() local 559 do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page) do_recover_data() argument 715 struct page *page = NULL; recover_data() local [all...] |
/kernel/linux/linux-6.6/fs/ntfs/ |
H A D | aops.h | 3 * aops.h - Defines for NTFS kernel address space operations and page cache 21 * ntfs_unmap_page - release a page that was mapped using ntfs_map_page() 22 * @page: the page to release 24 * Unpin, unmap and release a page that was obtained from ntfs_map_page(). 26 static inline void ntfs_unmap_page(struct page *page) in ntfs_unmap_page() argument 28 kunmap(page); in ntfs_unmap_page() 29 put_page(page); in ntfs_unmap_page() 33 * ntfs_map_page - map a page int 75 struct page *page = read_mapping_page(mapping, index, NULL); ntfs_map_page() local [all...] |
/kernel/linux/linux-5.10/fs/ext4/ |
H A D | readpage.c | 21 * - encountering a page which has buffers 22 * - encountering a page which has a non-hole after a hole 23 * - encountering a page with non-contiguous blocks 26 * It does handle a page which has holes at the end - that is a common case: 72 struct page *page; in __read_end_io() local 77 page = bv->bv_page; in __read_end_io() 80 if (bio->bi_status || PageError(page)) { in __read_end_io() 81 ClearPageUptodate(page); in __read_end_io() 83 ClearPageError(page); in __read_end_io() 224 ext4_mpage_readpages(struct inode *inode, struct readahead_control *rac, struct page *page) ext4_mpage_readpages() argument [all...] |
/kernel/linux/linux-5.10/fs/udf/ |
H A D | file.c | 41 static void __udf_adinicb_readpage(struct page *page) in __udf_adinicb_readpage() argument 43 struct inode *inode = page->mapping->host; in __udf_adinicb_readpage() 52 kaddr = kmap_atomic(page); in __udf_adinicb_readpage() 55 flush_dcache_page(page); in __udf_adinicb_readpage() 56 SetPageUptodate(page); in __udf_adinicb_readpage() 60 static int udf_adinicb_readpage(struct file *file, struct page *page) in udf_adinicb_readpage() argument 62 BUG_ON(!PageLocked(page)); in udf_adinicb_readpage() 63 __udf_adinicb_readpage(page); in udf_adinicb_readpage() 69 udf_adinicb_writepage(struct page *page, struct writeback_control *wbc) udf_adinicb_writepage() argument 93 struct page *page; udf_adinicb_write_begin() local 113 udf_adinicb_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) udf_adinicb_write_end() argument [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | purgeable.c | 6 #include <asm/page.h> 61 struct page *page = NULL; in mm_clear_uxpgd() local 69 page = radix_tree_delete(mm->uxpgd, iter.index); in mm_clear_uxpgd() 70 put_page(page); in mm_clear_uxpgd() 79 static struct page *lookup_uxpte_page(struct vm_area_struct *vma, in lookup_uxpte_page() 83 struct page *page = NULL; in lookup_uxpte_page() local 85 struct page *new_page = NULL; in lookup_uxpte_page() 107 page in lookup_uxpte_page() 144 struct page *page = NULL; lookup_uxpte() local 236 struct page *page = NULL; do_uxpte_page_fault() local 258 struct page *page = NULL; __mm_purg_pages_info() local [all...] |
H A D | khugepaged.c | 82 * it would have happened if the vma was large enough during page 366 * register it here without waiting a page fault that in hugepage_madvise() 496 static void release_pte_page(struct page *page) in release_pte_page() argument 498 release_pte_folio(page_folio(page)); in release_pte_page() 527 static bool is_refcount_suitable(struct page *page) in is_refcount_suitable() argument 531 expected_refcount = total_mapcount(page); in is_refcount_suitable() 532 if (PageSwapCache(page)) in is_refcount_suitable() 533 expected_refcount += compound_nr(page); in is_refcount_suitable() 544 struct page *page = NULL; __collapse_huge_page_isolate() local 646 VM_BUG_ON_PAGE(PageLRU(page), page); __collapse_huge_page_isolate() local 774 __collapse_huge_page_copy(pte_t *pte, struct page *page, pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl, struct list_head *compound_pagelist) __collapse_huge_page_copy() argument 1249 struct page *page = NULL; hpage_collapse_scan_pmd() local 1546 struct page *page; collapse_pte_mapped_thp() local 1598 struct page *page; collapse_pte_mapped_thp() local 1794 struct page *page; collapse_file() local 2219 struct page *page = NULL; hpage_collapse_scan_file() local [all...] |
/third_party/node/deps/v8/src/heap/ |
H A D | new-spaces.cc | 24 Page* page = static_cast<Page*>(chunk); in InitializePage() local 25 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking()); in InitializePage() 26 page->list_node().Initialize(); in InitializePage() 28 page->AllocateYoungGenerationBitmap(); in InitializePage() 32 ->ClearLiveness(page); in InitializePage() 34 page->InitializationMemoryFence(); in InitializePage() 35 return page; in InitializePage() 70 // Clear new space flags to avoid this page being treated as a new in EnsureCurrentCapacity() 71 // space page that is potentially being swept. in EnsureCurrentCapacity() 199 // Duplicate the flags that was set on the old page in GrowTo() 261 RemovePage(Page* page) RemovePage() argument 276 PrependPage(Page* page) PrependPage() argument 289 MovePageToTheEnd(Page* page) MovePageToTheEnd() argument 332 Page* page = current_page(); AddRangeToActiveSystemPages() local 417 Page* page = Page::FromAllocationAreaAddress(start); AssertValidRange() local 617 Page* page = Page::FromAddress(start); AddParkedAllocationBuffer() local 688 Page* page = Page::FromAddress(to_top - kTaggedSize); MakeLinearAllocationAreaIterable() local 776 Page* page = Page::FromAllocationAreaAddress(current)->next_page(); Verify() local [all...] |
/kernel/linux/linux-5.10/arch/s390/include/asm/ |
H A D | page.h | 18 /* PAGE_SHIFT determines the page size */ 49 #define clear_page(page) memset((page), 0, PAGE_SIZE) 53 * bypass caches when copying a page. Especially when copying huge pages 68 #define clear_user_page(page, vaddr, pg) clear_page(page) 140 struct page; 141 void arch_free_page(struct page *page, int order); 142 void arch_alloc_page(struct page *pag [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | cleancache.c | 168 * successful, use it to fill the specified page with data and return 0. 176 int __cleancache_get_page(struct page *page) in __cleancache_get_page() argument 187 VM_BUG_ON_PAGE(!PageLocked(page), page); in __cleancache_get_page() 188 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page() 192 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page() 195 ret = cleancache_ops->get_page(pool_id, key, page->index, page); in __cleancache_get_page() 206 * "Put" data from a page t 215 __cleancache_put_page(struct page *page) __cleancache_put_page() argument 243 __cleancache_invalidate_page(struct address_space *mapping, struct page *page) __cleancache_invalidate_page() argument [all...] |
H A D | z3fold.c | 11 * can store up to three compressed pages per page which improves the 13 * storing an integral number of objects per page) and simplicity. 16 * number of object per page) when reclaim is used. 32 #include <linux/page-flags.h> 53 * in the beginning of an allocated page are occupied by z3fold header, so 55 * which shows the max number of free chunks in z3fold page, also there will 99 * struct z3fold_header - z3fold page metadata occupying first chunks of each 100 * z3fold page, except for HEADLESS pages 101 * @buddy: links the z3fold page into the relevant list in the 103 * @page_lock: per-page loc 300 struct page *page = virt_to_page(zhdr); put_z3fold_header() local 397 init_z3fold_page(struct page *page, bool headless, struct z3fold_pool *pool, gfp_t gfp) init_z3fold_page() argument 434 free_z3fold_page(struct page *page, bool headless) free_z3fold_page() argument 524 struct page *page = virt_to_page(zhdr); __release_z3fold_page() local 582 struct page *page = virt_to_page(zhdr); free_pages_work() local 774 struct page *page = virt_to_page(zhdr); z3fold_compact_page() local 822 struct page *page; do_compact_page() local 878 struct page *page; __z3fold_alloc() local 1094 struct page *page = NULL; z3fold_alloc() local 1215 struct page *page; z3fold_free() local 1332 struct page *page = NULL; z3fold_reclaim_page() local 1505 struct page *page; z3fold_map() local 1550 struct page *page; z3fold_unmap() local 1577 z3fold_page_isolate(struct page *page, isolate_mode_t mode) z3fold_page_isolate() argument 1583 VM_BUG_ON_PAGE(PageIsolated(page), page); z3fold_page_isolate() local 1616 z3fold_page_migrate(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) z3fold_page_migrate() argument 1682 z3fold_page_putback(struct page *page) z3fold_page_putback() argument [all...] |
H A D | purgeable.c | 6 #include <asm/page.h> 61 struct page *page = NULL; in mm_clear_uxpgd() local 69 page = radix_tree_delete(mm->uxpgd, iter.index); in mm_clear_uxpgd() 70 put_page(page); in mm_clear_uxpgd() 79 static struct page *lookup_uxpte_page(struct vm_area_struct *vma, in lookup_uxpte_page() 83 struct page *page = NULL; in lookup_uxpte_page() local 84 struct page *new_page = NULL; in lookup_uxpte_page() 106 page in lookup_uxpte_page() 142 struct page *page = NULL; lookup_uxpte() local 234 struct page *page = NULL; do_uxpte_page_fault() local 256 struct page *page = NULL; __mm_purg_pages_info() local [all...] |
H A D | readahead.c | 42 * see if a page needs releasing upon read_cache_pages() failure 49 struct page *page) in read_cache_pages_invalidate_page() 51 if (page_has_private(page)) { in read_cache_pages_invalidate_page() 52 if (!trylock_page(page)) in read_cache_pages_invalidate_page() 54 page->mapping = mapping; in read_cache_pages_invalidate_page() 55 do_invalidatepage(page, 0, PAGE_SIZE); in read_cache_pages_invalidate_page() 56 page->mapping = NULL; in read_cache_pages_invalidate_page() 57 unlock_page(page); in read_cache_pages_invalidate_page() 59 put_page(page); in read_cache_pages_invalidate_page() 48 read_cache_pages_invalidate_page(struct address_space *mapping, struct page *page) read_cache_pages_invalidate_page() argument 92 struct page *page; read_cache_pages() local 121 struct page *page; read_pages() local 199 struct page *page = xa_load(&mapping->i_pages, index + i); page_cache_ra_unbounded() local 581 page_cache_async_ra(struct readahead_control *ractl, struct file_ra_state *ra, struct page *page, unsigned long req_count) page_cache_async_ra() argument [all...] |
/kernel/linux/linux-6.6/mm/kmsan/ |
H A D | init.c | 28 * the page allocator becomes available. 97 struct page *shadow, *origin; 114 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument 116 struct page *shadow, *origin; in kmsan_memblock_free_pages() 119 held_back[order].shadow = page; in kmsan_memblock_free_pages() 123 held_back[order].origin = page; in kmsan_memblock_free_pages() 128 kmsan_setup_meta(page, shadow, origin, order); in kmsan_memblock_free_pages() 137 struct page *items[MAX_BLOCKS]; 147 static void smallstack_push(struct smallstack *stack, struct page *page 168 struct page *page, *shadow, *origin; do_collection() local 185 struct page *page; collect_split() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/mm/ |
H A D | pgtable_32.c | 3 * This file contains the routines setting up the linux page tables. 139 static int __change_page_attr_noflush(struct page *page, pgprot_t prot) in __change_page_attr_noflush() argument 144 BUG_ON(PageHighMem(page)); in __change_page_attr_noflush() 145 address = (unsigned long)page_address(page); in __change_page_attr_noflush() 152 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); in __change_page_attr_noflush() 158 * Change the page attributes of an page in the linear mapping. 162 static int change_page_attr(struct page *page, in argument 183 struct page *page = virt_to_page(_sinittext); mark_initmem_nx() local 196 struct page *page; mark_rodata_ro() local 226 __kernel_map_pages(struct page *page, int numpages, int enable) __kernel_map_pages() argument [all...] |
/kernel/linux/linux-6.6/kernel/power/ |
H A D | snapshot.c | 81 * The calls to set_direct_map_*() should not fail because remapping a page 86 static inline void hibernate_map_page(struct page *page) in hibernate_map_page() argument 89 int ret = set_direct_map_default_noflush(page); in hibernate_map_page() 92 pr_warn_once("Failed to remap page\n"); in hibernate_map_page() 94 debug_pagealloc_map_pages(page, 1); in hibernate_map_page() 98 static inline void hibernate_unmap_page(struct page *page) in hibernate_unmap_page() argument 101 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page() 102 int ret = set_direct_map_invalid_noflush(page); in hibernate_unmap_page() 227 struct page *page; alloc_image_page() local 255 struct page *page; free_image_page() local 1040 swsusp_set_page_free(struct page *page) swsusp_set_page_free() argument 1046 swsusp_page_is_free(struct page *page) swsusp_page_is_free() argument 1052 swsusp_unset_page_free(struct page *page) swsusp_unset_page_free() argument 1058 swsusp_set_page_forbidden(struct page *page) swsusp_set_page_forbidden() argument 1064 swsusp_page_is_forbidden(struct page *page) swsusp_page_is_forbidden() argument 1070 swsusp_unset_page_forbidden(struct page *page) swsusp_unset_page_forbidden() argument 1188 clear_or_poison_free_page(struct page *page) clear_or_poison_free_page() argument 1251 struct page *page; mark_free_pages() local 1321 struct page *page; saveable_highmem_page() local 1385 struct page *page; saveable_page() local 1624 struct page *page = pfn_to_page(fr_pfn); swsusp_free() local 1660 struct page *page; preallocate_image_pages() local 1760 struct page *page = pfn_to_page(pfn); free_unnecessary_pages() local 2062 struct page *page; alloc_highmem_pages() local 2101 struct page *page; swsusp_alloc() local 2268 struct page *page; snapshot_read_next() local 2476 struct page *page; prepare_highmem_image() local 2513 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) get_highmem_page_buffer() argument 2596 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) get_highmem_page_buffer() argument 2727 struct page *page; get_buffer() local [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | buffer_head.h | 46 struct page; 53 * within a page, and of course as the unit of I/O through the 57 * a page (via a page_mapping) and for wrapping bio submission 62 struct buffer_head *b_this_page;/* circular list of page's buffers */ 63 struct page *b_page; /* the page this bh is mapped to */ 67 char *b_data; /* pointer to data within the page */ 76 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to 78 * buffers in the page */ 174 /* If we *know* page 438 try_to_free_buffers(struct page *page) try_to_free_buffers() argument [all...] |
/kernel/linux/linux-5.10/kernel/ |
H A D | kexec_core.c | 41 #include <asm/page.h> 101 * others it is still a simple predictable page table to setup. 114 * page of memory is necessary, but some architectures require more. 127 * destination page in its final resting place (if it happens 132 * - allocating a page table with the control code buffer identity 144 static struct page *kimage_alloc_page(struct kimage *image, 161 * Since the kernel does everything in page size chunks ensure in sanity_check_segment_list() 162 * the destination addresses are page aligned. Too many in sanity_check_segment_list() 165 * simply because addresses are changed to page size in sanity_check_segment_list() 299 static struct page *kimage_alloc_page 326 kimage_free_pages(struct page *page) kimage_free_pages() argument 342 struct page *page, *next; kimage_free_page_list() local 541 struct page *page; kimage_add_entry() local 572 kimage_add_page(struct kimage *image, unsigned long page) kimage_add_page() argument 613 struct page *page; kimage_free_entry() local 665 kimage_dst_used(struct kimage *image, unsigned long page) kimage_dst_used() argument 706 struct page *page; kimage_alloc_page() local 804 struct page *page; kimage_load_normal_segment() local 872 struct page *page; kimage_load_crash_segment() local [all...] |
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | flush.c | 33 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 42 * Copy user data from/to a page which is mapped into a different processes 46 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 51 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page() 56 struct page *page = pte_page(pte); in __sync_icache_dcache() local 58 if (!test_bit(PG_dcache_clean, &page->flags)) { in __sync_icache_dcache() 59 sync_icache_aliases(page_address(page), page_siz in __sync_icache_dcache() 70 flush_dcache_page(struct page *page) flush_dcache_page() argument [all...] |
/kernel/linux/linux-5.10/arch/arm/include/asm/ |
H A D | page.h | 3 * arch/arm/include/asm/page.h 10 /* PAGE_SHIFT determines the page size */ 19 #include <asm/page-nommu.h> 30 * page-based copying and clearing for user space for the particular 107 struct page; 111 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 112 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 127 extern void __cpu_clear_user_highpage(struct page *pag [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | page_pool.h | 45 const struct page *page, u32 release), 47 TP_ARGS(pool, page, release), 51 __field(const struct page *, page) 58 __entry->page = page; 60 __entry->pfn = page_to_pfn(page); 63 TP_printk("page_pool=%p page=%p pfn=%lu release=%u", 64 __entry->pool, __entry->page, __entr [all...] |
H A D | kmem.h | 154 TP_PROTO(struct page *page, unsigned int order), 156 TP_ARGS(page, order), 164 __entry->pfn = page_to_pfn(page); 168 TP_printk("page=%p pfn=%lu order=%d", 176 TP_PROTO(struct page *page), 178 TP_ARGS(page), 185 __entry->pfn = page_to_pfn(page); 188 TP_printk("page [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | memremap.h | 38 * important to remember that there are certain points at which the struct page 39 * must be treated as an opaque object, rather than a "normal" struct page. 48 * type. Any page of a process can be migrated to such memory. However no one 53 * coherent and supports page pinning. In support of coordinating page 55 * wakeup event whenever a page is unpinned and becomes idle. This 61 * coherent and supports page pinning. This is for example used by DAX devices 79 * Called once the page refcount reaches 0. The reference count will be 81 * for handing out the page again. 83 void (*page_free)(struct page *pag 159 is_device_private_page(const struct page *page) is_device_private_page() argument 171 is_pci_p2pdma_page(const struct page *page) is_pci_p2pdma_page() argument 178 is_device_coherent_page(const struct page *page) is_device_coherent_page() argument [all...] |
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | page_pool.h | 45 const struct page *page, u32 release), 47 TP_ARGS(pool, page, release), 51 __field(const struct page *, page) 58 __entry->page = page; 60 __entry->pfn = page_to_pfn(page); 63 TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u", 64 __entry->pool, __entry->page, __entr [all...] |
H A D | kmem.h | 138 TP_PROTO(struct page *page, unsigned int order), 140 TP_ARGS(page, order), 148 __entry->pfn = page_to_pfn(page); 152 TP_printk("page=%p pfn=0x%lx order=%d", 160 TP_PROTO(struct page *page), 162 TP_ARGS(page), 169 __entry->pfn = page_to_pfn(page); 172 TP_printk("page [all...] |