Lines Matching refs:stack

223 	void *stack;
234 /* Clear the KASAN shadow of the stack. */
237 /* Clear stale pointers from reused stack. */
241 tsk->stack = s->addr;
250 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
261 if (stack) {
262 tsk->stack_vm_area = find_vm_area(stack);
263 tsk->stack = stack;
265 return stack;
271 tsk->stack = kasan_reset_tag(page_address(page));
272 return tsk->stack;
297 vfree_atomic(tsk->stack);
302 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
310 unsigned long *stack;
311 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
312 stack = kasan_reset_tag(stack);
313 tsk->stack = stack;
314 return stack;
319 kmem_cache_free(thread_stack_cache, tsk->stack);
387 void *stack = task_stack_page(tsk);
391 /* All stack pages are in the same node. */
396 mod_lruvec_slab_state(stack, NR_KERNEL_STACK_KB,
432 return; /* Better to leak the stack than to free prematurely */
436 tsk->stack = NULL;
459 * The task is finally done with both the stack and thread_info,
465 * If the task had a separate stack allocation, it should be gone
875 unsigned long *stack;
885 stack = alloc_thread_stack_node(tsk, node);
886 if (!stack)
901 * arch_dup_task_struct() clobbers the stack-related fields. Make
902 * sure they're properly initialized before using any stack-related
905 tsk->stack = stack;
2158 retval = copy_thread(clone_flags, args->stack, args->stack_size, p, args->tls);
2473 .stack = (unsigned long)fn,
2579 .stack = (unsigned long)fn,
2644 .stack = newsp,
2706 .stack = args.stack,
2724 * clone3_stack_valid - check and prepare stack
2727 * Verify that the stack arguments userspace gave us are sane.
2728 * In addition, set the stack direction for userspace since it's easy for us to
2733 if (kargs->stack == 0) {
2740 if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
2744 kargs->stack += kargs->stack_size;