Lines Matching refs:entry

380 	/* In this loop, we essentially handle an entry for the range
401 /* Add an entry for last_accounted_offset -> rg->from, and
511 * fail; region_chg will always allocate at least 1 entry and a region_add for
512 * 1 page will only require at most 1 entry.
579 * zero. -ENOMEM is returned if a new file_region structure or cache entry
654 * may be a "placeholder" entry in the map which is of the form
666 * Check for an entry in the cache before dropping
690 /* New entry for end of split region */
698 /* Original entry is trimmed */
740 * these counts, the reserve map entry which could not be deleted will
741 * appear as a "reserved" entry instead of simply dangling with incorrect
1634 * Upon entry, the page is locked which means that page_mapping() is
2214 * 1 page, and that adding to resv map a 1 page entry can only
2252 * entry is in the reserve map, it means a reservation exists.
2253 * If an entry exists in the reserve map, it means the
3759 pte_t entry;
3762 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3765 entry = huge_pte_wrprotect(mk_huge_pte(page,
3768 entry = pte_mkyoung(entry);
3769 entry = pte_mkhuge(entry);
3770 entry = arch_make_huge_pte(entry, vma, page, writable);
3772 return entry;
3778 pte_t entry;
3780 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3781 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3814 pte_t *src_pte, *dst_pte, entry, dst_entry;
3868 entry = huge_ptep_get(src_pte);
3870 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3872 * Skip if src entry none. Also, skip in the
3873 * unlikely case dst entry !none as this implies
3877 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3878 is_hugetlb_entry_hwpoisoned(entry))) {
3879 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3887 entry = swp_entry_to_pte(swp_entry);
3889 entry, sz);
3891 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3903 entry = huge_ptep_get(src_pte);
3904 ptepage = pte_page(entry);
3907 set_huge_pte_at(dst, addr, dst_pte, entry);
4424 * sure there really is no pte entry.
4459 * don't have hwpoisoned swap entry for errored virtual address.
4556 pte_t *ptep, entry;
4575 entry = huge_ptep_get(ptep);
4576 if (unlikely(is_hugetlb_entry_migration(entry))) {
4579 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4612 entry = huge_ptep_get(ptep);
4613 if (huge_pte_none(entry))
4623 * entry could be a migration/hwpoison entry at this point, so this
4629 if (!pte_present(entry))
4640 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4656 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4660 * hugetlb_cow() requires page locks of pte_page(entry) and
4664 page = pte_page(entry);
4674 if (!huge_pte_write(entry)) {
4679 entry = huge_pte_mkdirty(entry);
4681 entry = pte_mkyoung(entry);
4682 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5082 swp_entry_t entry = pte_to_swp_entry(pte);
5084 if (is_write_migration_entry(entry)) {
5087 make_migration_entry_read(&entry);
5088 newpte = swp_entry_to_pte(entry);
5109 * may have cleared our pud entry and done put_page on the page table:
5550 * entry at address @addr
5552 * Return: Pointer to page table entry (PUD or PMD) for
5553 * address @addr, or NULL if a !p*d_present() entry is encountered and the
5578 /* must have a valid entry and size to go further */
5649 * hwpoisoned entry is treated as no_page_table in