Lines Matching refs:from
246 /* Helper that removes a struct file_region from the resv_map cache and returns
250 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
260 nrg->from = from;
338 if (&prg->link != &resv->regions && prg->to == rg->from &&
350 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
352 nrg->from = rg->from;
381 * [last_accounted_offset, rg->from), at every iteration, with some
386 if (rg->from < f) {
398 if (rg->from > t)
401 /* Add an entry for last_accounted_offset -> rg->from, and
404 if (rg->from > last_accounted_offset) {
405 add += rg->from - last_accounted_offset;
408 resv, last_accounted_offset, rg->from);
499 * map. Regions will be taken from the cache to fill in this range.
628 * Delete the specified range [f, t) from the reserve map. If the
633 * Returns the number of huge pages deleted from the reserve map.
653 * ranges are normally of the form [from, to). However, there
655 * (from, to) with from == to. Check for placeholder entries
658 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
661 if (rg->from >= t)
664 if (f > rg->from && t < rg->to) { /* Must split region */
691 nrg->from = t;
706 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
707 del += rg->to - rg->from;
709 rg->to - rg->from, true);
715 if (f <= rg->from) { /* Trim beginning of region */
717 t - rg->from, false);
719 del += t - rg->from;
720 rg->from = t;
738 * and removed from the page cache. This routine will adjust the subpool
782 if (rg->from >= t)
785 seg_from = max(rg->from, f);
914 * reservations are to be un-charged from here.
1014 * coming from reserved pool in releasing step. Currently, we
1049 * Very Subtle - The value of chg comes from a previous
1198 * returns the previously saved node ["this node"] from which to
1200 * next node from which to allocate, handling wrap at end of node
1218 * node ["this node"] from which to free a huge page. Advance the
1450 * Can't pass hstate in here because it is called from the
1473 * remove the reserved page from the subpool.
1500 /* remove the page from active list */
1513 * As free_huge_page() can be called from a non-task context, we have
1587 * hugepages and clear the PG_reserved bit from all tail pages
1764 * Free huge page from pool from next node to free.
1853 * Move PageHWPoison flag from head page to the raw error page,
1874 * make specified memory blocks removable from the system.
1900 * Allocates a fresh surplus page from the page allocator.
1965 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2081 * process from stealing the pages as they are added to the pool but
2256 * value returned from reserve map manipulation routines above.
2316 * consumed. This may prevent the task from
2400 * from the global free pool (global change). gbl_chg == 0 indicates
2519 * in order to fix confusing memory reports from free(1) and
2755 /* Bail for signals. Probably ctrl-c from user */
3099 * Unregister hstate attributes from a single node device.
3421 * allocated here from bootmem allocator.
3644 * Apart from cpuset, we also have memory policy mechanism that
3645 * also determines from which node the kernel will allocate memory
4086 * from other VMAs and let the children be SIGKILLed if they are faulting the
4099 * from page cache lookup which is in HPAGE_SIZE units.
4126 * Unmap the page from other VMAs without their own reserves.
4130 * from the time of fork. This would look like data corruption
4143 * Keep the pte_same checks anyway to make transition from the mutex easier.
4196 * reliability, unmap the page from child processes. The child
4334 * set page dirty so that it will not be removed from cache/file
4363 * original mapper has unmapped pages from the child due to a failed
4405 * operation should be careful from here.
4417 * tasks from racing to fault in the same page which
4587 * 1) It prevents huge_pmd_unshare from being called elsewhere
4624 * check prevents the kernel from going below assuming that we have
4702 * the page is not used after unlocked before returning from the current
4703 * page fault. So we are safe from accessing freed page, even if we wait
4910 * caller from accessing to them.) In order to do this, we use
4914 * directly from any kind of swap entries.
4990 * tail pages from being rearranged in any way. So this
5131 long from, long to,
5143 if (from > to) {
5170 chg = region_chg(resv_map, from, to, ®ions_needed);
5178 chg = to - from;
5236 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5286 region_abort(resv_map, from, to, regions_needed);
5407 * NOTE: This routine is only called from huge_pte_alloc. Some callers of
5634 * prevents the head page and tail pages from being rearranged