/kernel/linux/linux-6.6/fs/verity/ |
H A D | verify.c | 17 * @hpage, has already been verified. 19 static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, in is_hash_block_verified() argument 36 return PageChecked(hpage); in is_hash_block_verified() 53 if (PageChecked(hpage)) { in is_hash_block_verified() 62 if (PageChecked(hpage)) { in is_hash_block_verified() 74 SetPageChecked(hpage); in is_hash_block_verified() 156 struct page *hpage; in verify_data_block() local 179 hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, in verify_data_block() 182 if (IS_ERR(hpage)) { in verify_data_block() 185 PTR_ERR(hpage), hpage_id in verify_data_block() 207 struct page *hpage = hblocks[level - 1].page; verify_data_block() local [all...] |
/kernel/linux/linux-5.10/fs/verity/ |
H A D | verify.c | 43 static void extract_hash(struct page *hpage, unsigned int hoffset, in extract_hash() argument 46 void *virt = kmap_atomic(hpage); in extract_hash() 121 struct page *hpage; in verify_page() local 128 hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hindex, in verify_page() 130 if (IS_ERR(hpage)) { in verify_page() 131 err = PTR_ERR(hpage); in verify_page() 138 if (PageChecked(hpage)) { in verify_page() 139 extract_hash(hpage, hoffset, hsize, _want_hash); in verify_page() 141 put_page(hpage); in verify_page() 148 hpages[level] = hpage; in verify_page() 158 struct page *hpage = hpages[level - 1]; verify_page() local [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | khugepaged.c | 863 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page() argument 865 if (IS_ERR(*hpage)) { in khugepaged_prealloc_page() 870 *hpage = NULL; in khugepaged_prealloc_page() 872 } else if (*hpage) { in khugepaged_prealloc_page() 873 put_page(*hpage); in khugepaged_prealloc_page() 874 *hpage = NULL; in khugepaged_prealloc_page() 881 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 883 VM_BUG_ON_PAGE(*hpage, *hpage); in khugepaged_alloc_page() 885 *hpage in khugepaged_alloc_page() 915 struct page *hpage; khugepaged_alloc_hugepage() local 933 khugepaged_prealloc_page(struct page **hpage, bool *wait) khugepaged_prealloc_page() argument 957 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) khugepaged_alloc_page() argument 1061 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, int node, int referenced, int unmapped) collapse_huge_page() argument 1230 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) khugepaged_scan_pmd() argument 1445 struct page *hpage; collapse_pte_mapped_thp() local 1675 collapse_file(struct mm_struct *mm, struct file *file, pgoff_t start, struct page **hpage, int node) collapse_file() argument 2012 khugepaged_scan_file(struct mm_struct *mm, struct file *file, pgoff_t start, struct page **hpage) khugepaged_scan_file() argument 2088 khugepaged_scan_file(struct mm_struct *mm, struct file *file, pgoff_t start, struct page **hpage) khugepaged_scan_file() argument 2240 struct page *hpage = NULL; khugepaged_do_scan() local [all...] |
H A D | hwpoison-inject.c | 18 struct page *hpage; in hwpoison_inject() local 28 hpage = compound_head(p); in hwpoison_inject() 33 shake_page(hpage, 0); in hwpoison_inject() 37 if (!PageLRU(hpage) && !PageHuge(p)) in hwpoison_inject() 45 err = hwpoison_filter(hpage); in hwpoison_inject()
|
H A D | memory-failure.c | 813 struct page *hpage = compound_head(p); in me_huge_page() local 816 if (!PageHuge(hpage)) in me_huge_page() 819 mapping = page_mapping(hpage); in me_huge_page() 821 res = truncate_error_page(hpage, pfn, mapping); in me_huge_page() 823 unlock_page(hpage); in me_huge_page() 829 if (PageAnon(hpage)) in me_huge_page() 830 put_page(hpage); in me_huge_page() 833 lock_page(hpage); in me_huge_page() 997 struct page *hpage = *hpagep; in hwpoison_user_mappings() local 998 bool mlocked = PageMlocked(hpage); in hwpoison_user_mappings() 1329 struct page *hpage; memory_failure() local 1792 struct page *hpage = compound_head(page); __soft_offline_page() local 1862 struct page *hpage = compound_head(page); soft_offline_in_use_page() local [all...] |
H A D | migrate.c | 1294 struct page *hpage, int force, in unmap_and_move_huge_page() 1310 if (!hugepage_migration_supported(page_hstate(hpage))) { in unmap_and_move_huge_page() 1311 putback_active_hugepage(hpage); in unmap_and_move_huge_page() 1315 new_hpage = get_new_page(hpage, private); in unmap_and_move_huge_page() 1319 if (!trylock_page(hpage)) { in unmap_and_move_huge_page() 1329 lock_page(hpage); in unmap_and_move_huge_page() 1337 if (page_private(hpage) && !page_mapping(hpage)) { in unmap_and_move_huge_page() 1342 if (PageAnon(hpage)) in unmap_and_move_huge_page() 1343 anon_vma = page_get_anon_vma(hpage); in unmap_and_move_huge_page() 1292 unmap_and_move_huge_page(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *hpage, int force, enum migrate_mode mode, int reason) unmap_and_move_huge_page() argument [all...] |
H A D | hugetlb.c | 1638 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) in hugetlb_page_mapping_lock_write() argument 1640 struct address_space *mapping = page_mapping(hpage); in hugetlb_page_mapping_lock_write()
|
/kernel/linux/linux-6.6/mm/ |
H A D | khugepaged.c | 890 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node, in hpage_collapse_alloc_page() argument 893 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask); in hpage_collapse_alloc_page() 894 if (unlikely(!*hpage)) { in hpage_collapse_alloc_page() 899 folio_prep_large_rmappable((struct folio *)*hpage); in hpage_collapse_alloc_page() 1058 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, in alloc_charge_hpage() argument 1066 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) in alloc_charge_hpage() 1069 folio = page_folio(*hpage); in alloc_charge_hpage() 1072 *hpage = NULL; in alloc_charge_hpage() 1075 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); in alloc_charge_hpage() 1088 struct page *hpage; in collapse_huge_page() local 1437 set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, struct page *hpage) set_huge_pmd() argument 1476 struct page *hpage; collapse_pte_mapped_thp() local 1793 struct page *hpage; collapse_file() local [all...] |
H A D | hwpoison-inject.c | 18 struct page *hpage; in hwpoison_inject() local 28 hpage = compound_head(p); in hwpoison_inject() 33 shake_page(hpage); in hwpoison_inject() 37 if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p)) in hwpoison_inject() 45 err = hwpoison_filter(hpage); in hwpoison_inject()
|
H A D | memory-failure.c | 1185 struct page *hpage = compound_head(p); in me_huge_page() local 1189 mapping = page_mapping(hpage); in me_huge_page() 1191 res = truncate_error_page(hpage, page_to_pfn(p), mapping); in me_huge_page() 1194 unlock_page(hpage); in me_huge_page() 1196 unlock_page(hpage); in me_huge_page() 1202 put_page(hpage); in me_huge_page() 1550 int flags, struct page *hpage) in hwpoison_user_mappings() 1552 struct folio *folio = page_folio(hpage); in hwpoison_user_mappings() 1558 bool mlocked = PageMlocked(hpage); in hwpoison_user_mappings() 1566 if (!(PageLRU(hpage) || PageHug in hwpoison_user_mappings() 1549 hwpoison_user_mappings(struct page *p, unsigned long pfn, int flags, struct page *hpage) hwpoison_user_mappings() argument 2173 struct page *hpage; memory_failure() local 2647 struct page *hpage = compound_head(page); soft_offline_in_use_page() local [all...] |
H A D | hugetlb.c | 2100 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) in hugetlb_page_mapping_lock_write() argument 2102 struct address_space *mapping = page_mapping(hpage); in hugetlb_page_mapping_lock_write()
|
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | huge_memory.h | 210 TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index, 213 TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result), 227 __entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
|
/kernel/linux/linux-6.6/io_uring/ |
H A D | rsrc.c | 816 int nr_pages, struct page *hpage) in headpage_already_acct() 824 if (compound_head(pages[i]) == hpage) in headpage_already_acct() 835 if (compound_head(imu->bvec[j].bv_page) == hpage) in headpage_already_acct() 854 struct page *hpage; in io_buffer_account_pin() local 856 hpage = compound_head(pages[i]); in io_buffer_account_pin() 857 if (hpage == *last_hpage) in io_buffer_account_pin() 859 *last_hpage = hpage; in io_buffer_account_pin() 860 if (headpage_already_acct(ctx, pages, i, hpage)) in io_buffer_account_pin() 862 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; in io_buffer_account_pin() 815 headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, int nr_pages, struct page *hpage) headpage_already_acct() argument
|
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_pr.c | 663 struct page *hpage; in kvmppc_patch_dcbz() local 668 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz() 669 if (is_error_page(hpage)) in kvmppc_patch_dcbz() 676 get_page(hpage); in kvmppc_patch_dcbz() 677 page = kmap_atomic(hpage); in kvmppc_patch_dcbz() 685 put_page(hpage); in kvmppc_patch_dcbz()
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_pr.c | 647 struct page *hpage; in kvmppc_patch_dcbz() local 652 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz() 653 if (is_error_page(hpage)) in kvmppc_patch_dcbz() 660 get_page(hpage); in kvmppc_patch_dcbz() 661 page = kmap_atomic(hpage); in kvmppc_patch_dcbz() 669 put_page(hpage); in kvmppc_patch_dcbz()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | hugetlb.h | 158 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 204 struct page *hpage) in hugetlb_page_mapping_lock_write() 203 hugetlb_page_mapping_lock_write( struct page *hpage) hugetlb_page_mapping_lock_write() argument
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | hugetlb.h | 178 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 301 struct page *hpage) in hugetlb_page_mapping_lock_write() 300 hugetlb_page_mapping_lock_write( struct page *hpage) hugetlb_page_mapping_lock_write() argument
|
/kernel/linux/linux-5.10/arch/s390/kvm/ |
H A D | kvm-s390.c | 178 static int hpage; variable 179 module_param(hpage, int, 0444); 180 MODULE_PARM_DESC(hpage, "1m huge page backing support"); 553 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension() 763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap() 5088 if (nested && hpage) { in kvm_s390_init()
|
/kernel/linux/linux-6.6/arch/s390/kvm/ |
H A D | kvm-s390.c | 200 static int hpage; variable 201 module_param(hpage, int, 0444); 202 MODULE_PARM_DESC(hpage, "1m huge page backing support"); 591 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension() 845 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap() 5859 if (nested && hpage) { in kvm_s390_init()
|
/kernel/linux/linux-5.10/io_uring/ |
H A D | io_uring.c | 8988 int nr_pages, struct page *hpage) in headpage_already_acct() 8996 if (compound_head(pages[i]) == hpage) in headpage_already_acct() 9007 if (compound_head(imu->bvec[j].bv_page) == hpage) in headpage_already_acct() 9026 struct page *hpage; in io_buffer_account_pin() local 9028 hpage = compound_head(pages[i]); in io_buffer_account_pin() 9029 if (hpage == *last_hpage) in io_buffer_account_pin() 9031 *last_hpage = hpage; in io_buffer_account_pin() 9032 if (headpage_already_acct(ctx, pages, i, hpage)) in io_buffer_account_pin() 9034 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; in io_buffer_account_pin() 8987 headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, int nr_pages, struct page *hpage) headpage_already_acct() argument
|
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | ring_buffer.c | 2027 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); in rb_insert_pages() local 2029 if (!hpage) in rb_insert_pages() 2031 head_page = &hpage->list; in rb_insert_pages()
|