Lines Matching defs:folio

646 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
890 struct folio **prealloc, struct page *page)
892 struct folio *new_folio;
927 struct folio **prealloc)
933 struct folio *folio;
937 folio = page_folio(page);
938 if (page && folio_test_anon(folio)) {
945 folio_get(folio);
948 folio_put(folio);
954 folio_get(folio);
967 VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
984 static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
987 struct folio *new_folio;
1016 struct folio *prealloc = NULL;
2932 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
2949 folio_lock(folio);
2950 if (!folio->mapping) {
2951 folio_unlock(folio);
2956 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2969 struct folio *folio = page_folio(vmf->page);
2973 dirtied = folio_mark_dirty(folio);
2974 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
2976 * Take a local copy of the address_space - folio.mapping may be zeroed
2981 mapping = folio_raw_mapping(folio);
2982 folio_unlock(folio);
3067 struct folio *old_folio = NULL;
3068 struct folio *new_folio = NULL;
3158 pr_info("set wp new folio %lx purgeable\n", folio_pfn(new_folio));
3292 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3298 folio_get(folio);
3305 folio_put(folio);
3310 tmp = do_page_mkwrite(vmf, folio);
3313 folio_put(folio);
3318 folio_unlock(folio);
3319 folio_put(folio);
3324 folio_lock(folio);
3327 folio_put(folio);
3359 struct folio *folio = NULL;
3379 folio = page_folio(vmf->page);
3395 return wp_page_shared(vmf, folio);
3402 if (folio && folio_test_anon(folio)) {
3411 * We have to verify under folio lock: these early checks are
3412 * just an optimization to avoid locking the folio and freeing
3415 * KSM doesn't necessarily raise the folio refcount.
3417 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3419 if (!folio_test_lru(folio))
3425 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3427 if (!folio_trylock(folio))
3429 if (folio_test_swapcache(folio))
3430 folio_free_swap(folio);
3431 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3432 folio_unlock(folio);
3436 * Ok, we've got the only folio reference from our mapping
3437 * and the folio is locked, it's dark out, and we're wearing
3441 folio_unlock(folio);
3460 if (folio)
3461 folio_get(folio);
3465 if (folio && folio_test_ksm(folio))
3500 * unmap_mapping_folio() - Unmap single folio from processes.
3501 * @folio: The locked folio to be unmapped.
3503 * Unmap this folio from any userspace process which still has it mmaped.
3506 * truncation or invalidation holds the lock on a folio, it may find that
3510 void unmap_mapping_folio(struct folio *folio)
3512 struct address_space *mapping = folio->mapping;
3517 VM_BUG_ON(!folio_test_locked(folio));
3519 first_index = folio->index;
3520 last_index = folio_next_index(folio) - 1;
3523 details.single_folio = folio;
3604 struct folio *folio = page_folio(vmf->page);
3610 * We need a reference to lock the folio because we don't hold
3612 * entry and unmap it. If the folio is free the entry must
3617 if (!folio_try_get(folio))
3620 ret = folio_lock_or_retry(folio, vmf);
3622 folio_put(folio);
3637 folio_unlock(folio);
3638 folio_put(folio);
3644 static inline bool should_try_to_free_swap(struct folio *folio,
3648 if (!folio_test_swapcache(folio))
3650 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
3651 folio_test_mlocked(folio))
3659 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
3660 folio_ref_count(folio) == 2;
3741 struct folio *swapcache, *folio = NULL;
3806 folio = swap_cache_get_folio(entry, vma, vmf->address);
3807 if (folio)
3808 page = folio_file_page(folio, swp_offset(entry));
3809 swapcache = folio;
3811 if (!folio) {
3829 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
3831 page = &folio->page;
3832 if (folio) {
3833 __folio_set_locked(folio);
3834 __folio_set_swapbacked(folio);
3836 if (mem_cgroup_swapin_charge_folio(folio,
3846 workingset_refault(folio, shadow);
3848 folio_add_lru(folio);
3851 folio->swap = entry;
3853 folio->private = NULL;
3859 folio = page_folio(page);
3860 swapcache = folio;
3863 if (!folio) {
3889 ret |= folio_lock_or_retry(folio, vmf);
3901 if (unlikely(!folio_test_swapcache(folio) ||
3918 folio = page_folio(page);
3926 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
3927 !folio_test_ksm(folio) && !folio_test_lru(folio))
3931 folio_throttle_swaprate(folio, GFP_KERNEL);
3941 if (unlikely(!folio_test_uptodate(folio))) {
3954 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
3955 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
3961 if (!folio_test_ksm(folio)) {
3963 if (folio != swapcache) {
3969 } else if (exclusive && folio_test_writeback(folio) &&
3998 arch_swap_restore(entry, folio);
4006 if (should_try_to_free_swap(folio, vma, vmf->flags))
4007 folio_free_swap(folio);
4019 if (!folio_test_ksm(folio) &&
4020 (exclusive || folio_ref_count(folio) == 1)) {
4035 if (unlikely(folio != swapcache && swapcache)) {
4037 folio_add_lru_vma(folio, vma);
4042 VM_BUG_ON(!folio_test_anon(folio) ||
4047 folio_unlock(folio);
4048 if (folio != swapcache && swapcache) {
4084 folio_unlock(folio);
4086 folio_put(folio);
4087 if (folio != swapcache && swapcache) {
4107 struct folio *folio;
4161 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4162 if (!folio)
4165 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
4167 folio_throttle_swaprate(folio, GFP_KERNEL);
4174 __folio_mark_uptodate(folio);
4176 entry = mk_pte(&folio->page, vma->vm_page_prot);
4197 folio_put(folio);
4202 folio_add_new_anon_rmap(folio, vma, vmf->address);
4205 folio_set_purgeable(folio);
4207 folio_add_lru_vma(folio, vma);
4224 folio_put(folio);
4227 folio_put(folio);
4379 * set_pte_range - Set a range of PTEs to point to pages in a folio.
4381 * @folio: The folio that contains @page.
4386 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4410 VM_BUG_ON_FOLIO(nr != 1, folio);
4411 folio_add_new_anon_rmap(folio, vma, addr);
4412 folio_add_lru_vma(folio, vma);
4415 folio_add_file_rmap_range(folio, page, nr, vma, false);
4488 struct folio *folio = page_folio(page);
4490 set_pte_range(vmf, folio, page, 1, vmf->address);
4609 struct folio *folio;
4632 folio = page_folio(vmf->page);
4633 folio_unlock(folio);
4635 folio_put(folio);
4687 struct folio *folio;
4698 folio = page_folio(vmf->page);
4705 folio_unlock(folio);
4706 tmp = do_page_mkwrite(vmf, folio);
4709 folio_put(folio);
4717 folio_unlock(folio);
4718 folio_put(folio);
6059 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6100 int copy_user_large_folio(struct folio *dst, struct folio *src,
6119 long copy_folio_from_user(struct folio *dst_folio,