Lines Matching defs:end

690 			/* New entry for end of split region */
721 } else { /* Trim end of region */
1200 * next node from which to allocate, handling wrap at end of node
1923 * if we would end up overcommiting the surpluses. Abuse
3692 unsigned long reserve, start, end;
3699 end = vma_hugecache_offset(h, vma, vma->vm_end);
3701 reserve = (end - start) - region_count(resv, start, end);
3702 hugetlb_cgroup_uncharge_counter(resv, start, end);
3923 unsigned long start, unsigned long end,
3939 BUG_ON(end & ~huge_page_mask(h));
3952 end);
3953 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3956 for (; address < end; address += sz) {
4042 unsigned long end, struct page *ref_page)
4044 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
4060 unsigned long end, struct page *ref_page)
4065 unsigned long tlb_end = end;
4069 * start/end for worst case tlb flushing.
4079 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4268 mmu_notifier_invalidate_range(mm, range.start, range.end);
4375 * once for faults beyond end of file.
4799 * to leave any page mapped (as page_mapped()) beyond the end
4802 * page in the radix tree in the vm_shared case beyond the end
5035 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
5039 unsigned long address, unsigned long end, pgprot_t newprot)
5052 * start/end. Set range.start/range.end to cover the maximum possible
5056 0, vma, mm, start, end);
5057 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5059 BUG_ON(address >= end);
5060 flush_cache_range(vma, range.start, range.end);
5064 for (; address < end; address += huge_page_size(h)) {
5115 flush_hugetlb_tlb_range(vma, range.start, range.end);
5117 flush_hugetlb_tlb_range(vma, start, end);
5292 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5306 chg = region_del(resv_map, start, end);
5310 * allocated. If end == LONG_MAX, it will not fail.
5359 unsigned long end = base + PUD_SIZE;
5364 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5370 * Determine if start,end range within vma could be mapped by shared pmd.
5371 * If yes, adjust start and end to cover range associated with possible
5375 unsigned long *start, unsigned long *end)
5381 * vma need span at least one aligned PUD size and the start,end range
5385 (*end <= v_start) || (*start >= v_end))
5392 if (*end < v_end)
5393 *end = ALIGN(*end, PUD_SIZE);
5512 unsigned long *start, unsigned long *end)