Lines Matching defs:false
119 return false;
153 * default, "false" means prefaulted entries will be 'young'.
155 return false;
564 * And for normal mappings this is false.
832 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
946 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
955 page_dup_file_rmap(page, false);
989 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
1239 * false when we can speed up fork() by allowing lazy page faults later until
1266 return false;
1372 return false;
1465 page_remove_rmap(page, vma, false);
1492 page_remove_rmap(page, vma, false);
1561 __split_huge_pmd(vma, pmd, addr, false, NULL);
1777 unmap_single_vma(&tlb, vma, address, end, details, false);
1853 page_add_file_rmap(page, vma, false);
2228 false);
2270 return false;
2303 * result in pfn_t_has_page() == false.
2322 return __vm_insert_mixed(vma, addr, pfn, false);
2781 return __apply_to_page_range(mm, addr, size, fn, data, false);
3087 vmf->address, false);
3195 page_remove_rmap(vmf->page, vma, false);
3522 details.even_cows = false;
3649 return false;
3745 bool need_clear_cache = false;
3746 bool exclusive = false;
3830 vma, vmf->address, false);
3989 exclusive = false;
4052 * (to avoid false positives from pte_same). For
4274 page->index, 1, false);
4415 folio_add_file_rmap_range(folio, page, nr, vma, false);
4592 /* Return true if we should do read fault-around, false otherwise */
4597 return false;
4600 return false;
4800 bool writable = false;
4873 writable = false;
4945 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5114 hugepage_vma_check(vma, vm_flags, false, true, true)) {
5148 hugepage_vma_check(vma, vm_flags, false, true, true)) {
5258 current->in_lru_fault = false;
5356 mem_cgroup_oom_synchronize(false);
5376 return false;
5392 return false;
5401 return false;
6167 return false;