Lines Matching defs:start
385 /* Skip irrelevant regions that start before our range. */
3692 unsigned long reserve, start, end;
3698 start = vma_hugecache_offset(h, vma, vma->vm_start);
3701 reserve = (end - start) - region_count(resv, start, end);
3702 hugetlb_cgroup_uncharge_counter(resv, start, end);
3923 unsigned long start, unsigned long end,
3938 BUG_ON(start & ~huge_page_mask(h));
3951 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3953 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3955 address = start;
4041 struct vm_area_struct *vma, unsigned long start,
4044 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
4059 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
4064 unsigned long tlb_start = start;
4069 * start/end for worst case tlb flushing.
4079 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4268 mmu_notifier_invalidate_range(mm, range.start, range.end);
5042 unsigned long start = address;
5052 * start/end. Set range.start/range.end to cover the maximum possible
5056 0, vma, mm, start, end);
5057 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5060 flush_cache_range(vma, range.start, range.end);
5115 flush_hugetlb_tlb_range(vma, range.start, range.end);
5117 flush_hugetlb_tlb_range(vma, start, end);
5292 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5306 chg = region_del(resv_map, start, end);
5370 * Determine if start,end range within vma could be mapped by shared pmd.
5371 * If yes, adjust start and end to cover range associated with possible
5375 unsigned long *start, unsigned long *end)
5381 * vma need span at least one aligned PUD size and the start,end range
5385 (*end <= v_start) || (*start >= v_end))
5389 if (*start > v_start)
5390 *start = ALIGN_DOWN(*start, PUD_SIZE);
5512 unsigned long *start, unsigned long *end)