Lines Matching refs:mlock
3 * linux/mm/mlock.c
140 /* else assume that was the last mlock: reclaim will fix it if not */
182 * mlock. We could use three separate folio batches instead, but one feels
189 unsigned long mlock;
195 mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196 folio = (struct folio *)((unsigned long)folio - mlock);
199 if (mlock & LRU_FOLIO)
201 else if (mlock & NEW_FOLIO)
240 * mlock_folio - mlock a folio already on (or temporarily off) LRU
265 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
359 * mlock_vma_pages_range() - mlock any pages already in the range,
361 * @vma - vma containing range to be mlock()ed or munlock()ed
366 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
404 * mlock_fixup - handle mlock[all]/munlock[all] requests.
564 * convert get_user_pages() return value to posix mlock() error
601 * should not be counted to new mlock increment count. So check
622 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)