/kernel/liteos_a/testsuites/unittest/process/basic/pthread/smoke/ |
H A D | pthread_test_018.cpp | 70 void *stack = malloc(stackSize); in GroupProcess() local 71 ICUNIT_ASSERT_NOT_EQUAL((int)stack, 0, stack); in GroupProcess() 73 ret = pthread_attr_setstack(&attr, stack, stackSize); in GroupProcess() 78 ICUNIT_ASSERT_EQUAL(getStack, stack, getStack); in GroupProcess() 89 free(stack); in GroupProcess() 94 stack = malloc(stackSize); in GroupProcess() 95 ICUNIT_ASSERT_NOT_EQUAL((int)stack, 0, stack); in GroupProcess() 97 ret = pthread_attr_setstack(&attr, stack, stackSiz in GroupProcess() [all...] |
/third_party/python/Objects/ |
H A D | call.c | 17 _PyStack_UnpackDict_Free(PyObject *const *stack, Py_ssize_t nargs, 383 _PyFunction_Vectorcall(PyObject *func, PyObject* const* stack, in _PyFunction_Vectorcall() argument 391 assert(nargs == 0 || stack != NULL); in _PyFunction_Vectorcall() 393 return _PyEval_Vector(tstate, f, NULL, stack, nargs, kwnames); in _PyFunction_Vectorcall() 396 return _PyEval_Vector(tstate, f, f->func_globals, stack, nargs, kwnames); in _PyFunction_Vectorcall() 462 PyObject **stack; in _PyObject_Call_Prepend() local 466 stack = small_stack; in _PyObject_Call_Prepend() 469 stack = PyMem_Malloc((argcount + 1) * sizeof(PyObject *)); in _PyObject_Call_Prepend() 470 if (stack == NULL) { in _PyObject_Call_Prepend() 477 stack[ in _PyObject_Call_Prepend() 500 PyObject **stack; _PyObject_CallFunctionVa() local 775 PyObject **stack; object_vacall() local 978 PyObject **stack = PyMem_Malloc((1 + nargs + nkwargs) * sizeof(args[0])); _PyStack_UnpackDict() local 1031 _PyStack_UnpackDict_Free(PyObject *const *stack, Py_ssize_t nargs, PyObject *kwnames) _PyStack_UnpackDict_Free() argument [all...] |
/test/xts/acts/arkui/ace_c_arkui_test/entry/src/main/cpp/commonattrs/ |
H A D | commonattrs_hittestbehavior_test.cpp | 73 // create stack, commonAttrs and commonAttrsBrother in CreateSubCommonAttrsNode() 74 auto stack = node_api->createNode(ARKUI_NODE_STACK); in CreateSubCommonAttrsNode() local 78 // set stack id in CreateSubCommonAttrsNode() 81 node_api->setAttribute(stack, NODE_ID, &idItem); in CreateSubCommonAttrsNode() 83 // set stack width and height in CreateSubCommonAttrsNode() 86 node_api->setAttribute(stack, NODE_WIDTH, &stackWidthItem); in CreateSubCommonAttrsNode() 90 node_api->setAttribute(stack, NODE_HEIGHT, &stackHeightItem); in CreateSubCommonAttrsNode() 92 // set stack backgroundColor in CreateSubCommonAttrsNode() 96 node_api->setAttribute(stack, NODE_BACKGROUND_COLOR, &stackBackgroundColorItem); in CreateSubCommonAttrsNode() 124 // add commonAttrs to stack in CreateSubCommonAttrsNode() 196 auto stack = CreateSubCommonAttrsNode(nodeAPI, hitTestBehavior, onTouchTestStack.c_str(), CreateNativeNode() local [all...] |
/test/xts/acts/arkui/ace_c_arkui_test/entry/src/main/cpp/customcomponent/ |
H A D | customcomponent_hittestbehavior_test.cpp | 77 // create stack, custom and customBrother in CreateSubCustomComponentNode() 78 auto stack = node_api->createNode(ARKUI_NODE_STACK); in CreateSubCustomComponentNode() local 82 // set stack id in CreateSubCustomComponentNode() 85 node_api->setAttribute(stack, NODE_ID, &idItem); in CreateSubCustomComponentNode() 87 // set stack width and height in CreateSubCustomComponentNode() 90 node_api->setAttribute(stack, NODE_WIDTH, &stackWidthItem); in CreateSubCustomComponentNode() 94 node_api->setAttribute(stack, NODE_HEIGHT, &stackHeightItem); in CreateSubCustomComponentNode() 96 // set stack backgroundColor in CreateSubCustomComponentNode() 100 node_api->setAttribute(stack, NODE_BACKGROUND_COLOR, &stackBackgroundColorItem); in CreateSubCustomComponentNode() 185 // add customBrother to stack in CreateSubCustomComponentNode() 223 auto stack = CreateSubCustomComponentNode(nodeAPI, hitTestBehavior, onTouchTestStack.c_str(), CreateNativeNode() local [all...] |
/test/xts/acts/arkui/ace_c_arkui_test/entry/src/main/cpp/image/ |
H A D | image_hittestbehavior_test.cpp | 71 // create stack, image and imageBrother in CreateSubImageNode() 72 auto stack = node_api->createNode(ARKUI_NODE_STACK); in CreateSubImageNode() local 76 // set stack id in CreateSubImageNode() 79 node_api->setAttribute(stack, NODE_ID, &idItem); in CreateSubImageNode() 81 // set stack width and height in CreateSubImageNode() 84 node_api->setAttribute(stack, NODE_WIDTH, &stackWidthItem); in CreateSubImageNode() 88 node_api->setAttribute(stack, NODE_HEIGHT, &stackHeightItem); in CreateSubImageNode() 90 // set stack backgroundColor in CreateSubImageNode() 94 node_api->setAttribute(stack, NODE_BACKGROUND_COLOR, &stackBackgroundColorItem); in CreateSubImageNode() 120 // add image to stack in CreateSubImageNode() 190 auto stack = CreateSubImageNode(nodeAPI, hitTestBehavior, onTouchTestStack.c_str(), onTouchTestImage.c_str(), CreateNativeNode() local [all...] |
/third_party/python/Lib/test/ |
H A D | test_contextlib_async.py | 573 async with AsyncExitStack() as stack: 576 f = stack.push_async_callback(_exit, *args, **kwds) 578 f = stack.push_async_callback(_exit, *args) 580 f = stack.push_async_callback(_exit, **kwds) 582 f = stack.push_async_callback(_exit) 584 for wrapper in stack._exit_callbacks: 592 async with AsyncExitStack() as stack: 594 stack.push_async_callback(arg=1) 598 stack.push_async_callback(callback=_exit, arg=3) 620 async with self.exit_stack() as stack [all...] |
/kernel/linux/linux-6.6/arch/x86/kernel/ |
H A D | dumpstack_64.c | 46 * On 64-bit, we have a generic entry stack that we in stack_type_name() 61 * @offs: Offset from the start of the exception stack area 62 * @size: Size of the exception stack 79 * Array of exception stack page descriptors. If the stack is larger than 80 * PAGE_SIZE, all pages covering a particular stack will have the same 81 * info. The guard pages including the not mapped DB2 stack are zeroed 94 static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) in in_exception_stack() argument 96 unsigned long begin, end, stk = (unsigned long)stack; in in_exception_stack() 105 * Handle the case where stack trac in in_exception_stack() 135 in_irq_stack(unsigned long *stack, struct stack_info *info) in_irq_stack() argument 170 get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, struct stack_info *info) get_stack_info_noinstr() argument 191 get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask) get_stack_info() argument [all...] |
/third_party/python/Lib/lib2to3/pgen2/ |
H A D | parse.py | 107 # Each stack entry is a tuple: (dfa, state, node). 112 self.stack = [stackentry] 122 dfa, state, node = self.stack[-1] 137 if not self.stack: 140 dfa, state, node = self.stack[-1] 156 if not self.stack: 180 dfa, state, node = self.stack[-1] 185 self.stack[-1] = (dfa, newstate, node) 189 dfa, state, node = self.stack[-1] 191 self.stack[ [all...] |
/kernel/linux/linux-5.10/arch/nios2/kernel/ |
H A D | traps.c | 60 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument 66 if (!stack) { in show_stack() 68 stack = (unsigned long *)task->thread.ksp; in show_stack() 70 stack = (unsigned long *)&stack; in show_stack() 73 addr = (unsigned long) stack; in show_stack() 76 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); in show_stack() 78 if (stack + 1 > endstack) in show_stack() 82 printk("%s %08lx", loglvl, *stack++); in show_stack() 87 while (stack in show_stack() [all...] |
/kernel/linux/linux-6.6/arch/nios2/kernel/ |
H A D | traps.c | 60 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument 66 if (!stack) { in show_stack() 68 stack = (unsigned long *)task->thread.ksp; in show_stack() 70 stack = (unsigned long *)&stack; in show_stack() 73 addr = (unsigned long) stack; in show_stack() 76 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); in show_stack() 78 if (stack + 1 > endstack) in show_stack() 82 printk("%s %08lx", loglvl, *stack++); in show_stack() 87 while (stack in show_stack() [all...] |
/third_party/musl/porting/linux/user/src/linux/ |
H A D | clone.c | 17 #define GET_SP_REG(stack) __asm__ __volatile__ ("mov %0, sp" : "=r"(stack)) 19 #define GET_SP_REG(stack) do { \ 20 __asm__ __volatile__ ("mov %%rsp, %0;" : "=r"(stack) :); \ 21 stack = (void *)((uintptr_t)stack - 16); \ 24 #define GET_SP_REG(stack) 52 int clone(int (*func)(void *), void *stack, int flags, void *arg, ...) in clone() argument 85 if (!stack) { in clone() 86 GET_SP_REG(stack); in clone() [all...] |
/third_party/musl/src/linux/linux/ |
H A D | clone.c | 32 #define GET_SP_REG(stack) __asm__ __volatile__ ("mov %0, sp" : "=r"(stack)) 34 #define GET_SP_REG(stack) do { \ 35 __asm__ __volatile__ ("mov %%rsp, %0;" : "=r"(stack) :); \ 36 stack = (void *)((uintptr_t)stack - 16); \ 39 #define GET_SP_REG(stack) 67 int clone(int (*func)(void *), void *stack, int flags, void *arg, ...) in clone() argument 100 if (!stack) { in clone() 101 GET_SP_REG(stack); in clone() [all...] |
/third_party/node/deps/npm/node_modules/diff/lib/diff/ |
H A D | json.js | 90 // object that is already on the "stack" of items being processed. Accepts an optional replacer 93 function canonicalize(obj, stack, replacementStack, replacer, key) { 94 stack = stack || []; 103 for (i = 0; i < stack.length; i += 1) { 104 if (stack[i] === obj) { 112 stack.push(obj); 117 canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key); 120 stack.pop(); 134 stack [all...] |
/third_party/ltp/testcases/kernel/syscalls/mmap/ |
H A D | mmap18.c | 13 * a thread as a stack and expect the mapping to grow when we touch the 15 * growable mapping as a stack. 17 * The kernel only grows the memory region when the stack pointer is within 22 * 'stack_guard_gap' pages to an existing mapping. So when we map the stack we 23 * make sure there is enough of free address space before the lowest stack 28 * The stack memory map would look like: 36 * ^ | - - stack size - - | 39 * stack bottom stack top 43 * We allocate stack a 130 grow_stack(void *stack, size_t size) grow_stack() argument 155 void *stack; grow_stack_success() local 179 void *stack; grow_stack_fail() local [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | stackdepot.c | 3 * Stack depot - a stack trace storage that avoids duplication. 5 * Internally, stack depot maintains a hash table of unique stacktraces. The 6 * stack traces themselves are stored contiguously one after another in a set 47 /* Compact structure that stores a reference to a stack. */ 78 /* Hash table of pointers to stored stack traces. */ 85 /* Array of memory regions that store stack traces. */ 136 * stack traces being stored in stack depot. in stack_depot_early_init() 221 /* Uses preallocated memory to initialize a new stack depot pool. */ 258 /* Allocates a new stack i 262 struct stack_record *stack; depot_alloc_stack() local 471 struct stack_record *stack; stack_depot_fetch() local 498 stack_depot_print(depot_stack_handle_t stack) stack_depot_print() argument [all...] |
/test/xts/acts/arkui/ace_c_arkui_test/entry/src/main/cpp/stack/ |
H A D | stack_backgroundcolor_test.cpp | 22 NAPI_START(stack, ARKUI_NODE_STACK); in TestStackBackgroundColor001() 27 auto ret = nodeAPI->setAttribute(stack, NODE_BACKGROUND_COLOR, &value_item); in TestStackBackgroundColor001() 29 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_COLOR)->value[PARAM_0].u32, backgroundColor); in TestStackBackgroundColor001() 35 NAPI_START(stack, ARKUI_NODE_STACK); in TestStackBackgroundColor002() 40 auto ret = nodeAPI->setAttribute(stack, NODE_BACKGROUND_COLOR, &value_item); in TestStackBackgroundColor002() 42 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_COLOR)->value[PARAM_0].u32, backgroundColor); in TestStackBackgroundColor002() 48 NAPI_START(stack, ARKUI_NODE_STACK); in TestStackBackgroundColor003() 53 auto ret = nodeAPI->setAttribute(stack, NODE_BACKGROUND_COLOR, &value_item); in TestStackBackgroundColor003() 55 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_COLOR)->value[PARAM_0].u32, backgroundColor); in TestStackBackgroundColor003()
|
H A D | stack_backgroundimageposition_test.cpp | 22 NAPI_START(stack, ARKUI_NODE_STACK); in TestStackBackgroundImagePosition001() 28 auto ret = nodeAPI->setAttribute(stack, NODE_BACKGROUND_IMAGE_POSITION, &value_item); in TestStackBackgroundImagePosition001() 30 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_IMAGE_POSITION)->value[PARAM_0].f32, positionX); in TestStackBackgroundImagePosition001() 31 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_IMAGE_POSITION)->value[PARAM_1].f32, positionY); in TestStackBackgroundImagePosition001() 37 NAPI_START(stack, ARKUI_NODE_STACK); in TestStackBackgroundImagePosition002() 43 auto ret = nodeAPI->setAttribute(stack, NODE_BACKGROUND_IMAGE_POSITION, &value_item); in TestStackBackgroundImagePosition002() 45 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_IMAGE_POSITION)->value[PARAM_0].f32, positionX); in TestStackBackgroundImagePosition002() 46 ASSERT_EQ(nodeAPI->getAttribute(stack, NODE_BACKGROUND_IMAGE_POSITION)->value[PARAM_1].f32, positionY); in TestStackBackgroundImagePosition002()
|
/third_party/libwebsockets/lib/system/async-dns/ |
H A D | async-dns-parse.c | 145 * Able to recurse using an explicit non-CPU stack to resolve CNAME usages 157 struct label_stack stack[4]; in lws_adns_iterate() local 163 lws_strncpy(stack[0].name, expname, sizeof(stack[0].name)); in lws_adns_iterate() 164 stack[0].enl = (int)strlen(expname); in lws_adns_iterate() 202 sp = stack[0].name; in lws_adns_iterate() 207 sizeof(stack[0].name) - in lws_adns_iterate() 208 lws_ptr_diff_size_t(sp, stack[0].name)); in lws_adns_iterate() 257 n = lws_ptr_diff(sp, stack[0].name); in lws_adns_iterate() 258 if (stack[ in lws_adns_iterate() [all...] |
/kernel/linux/linux-5.10/arch/um/kernel/skas/ |
H A D | mmu.c | 62 unsigned long stack = 0; in init_new_context() local 65 stack = get_zeroed_page(GFP_KERNEL); in init_new_context() 66 if (stack == 0) in init_new_context() 69 to_mm->id.stack = stack; in init_new_context() 75 to_mm->id.u.pid = copy_context_skas0(stack, in init_new_context() 77 else to_mm->id.u.pid = start_userspace(stack); in init_new_context() 95 if (to_mm->id.stack != 0) in init_new_context() 96 free_page(to_mm->id.stack); in init_new_context() 110 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); in uml_setup_stubs() [all...] |
/kernel/linux/linux-6.6/mm/kmsan/ |
H A D | init.c | 147 static void smallstack_push(struct smallstack *stack, struct page *pages) in smallstack_push() argument 149 KMSAN_WARN_ON(stack->index == MAX_BLOCKS); in smallstack_push() 150 stack->items[stack->index] = pages; in smallstack_push() 151 stack->index++; in smallstack_push() 155 static struct page *smallstack_pop(struct smallstack *stack) in smallstack_pop() argument 159 KMSAN_WARN_ON(stack->index == 0); in smallstack_pop() 160 stack->index--; in smallstack_pop() 161 ret = stack->items[stack in smallstack_pop() [all...] |
/third_party/node/deps/v8/tools/profview/ |
H A D | profile-utils.js | 124 // We store list of ticks and positions within the ticks stack by 142 function findNextFrame(file, stack, stackPos, step, filter) { 145 while (stackPos >= 0 && stackPos < stack.length) { 146 codeId = stack[stackPos]; 169 let stack = file.ticks[stackIndex].s; 170 console.assert(stackPos >= 0 && stackPos < stack.length); 171 let codeId = stack[stackPos]; 198 let stack = file.ticks[stackIndex].s; 201 let stackPos = findNextFrame(file, stack, depth + step, step, filter); 228 let stack [all...] |
/third_party/ltp/lib/ |
H A D | tst_bool_expr.c | 149 static inline void stack_push(struct tst_expr_tok *stack[], unsigned int *op_stack_pos, in stack_push() argument 152 stack[(*op_stack_pos)++] = op; in stack_push() 160 static inline struct tst_expr_tok *stack_pop(struct tst_expr_tok *stack[], in stack_pop() argument 166 return stack[--(*op_stack_pos)]; in stack_pop() 171 static inline int stack_peek_op(struct tst_expr_tok *stack[], in stack_peek_op() argument 177 return stack[op_stack_pos - 1]->op; in stack_peek_op() 291 * There can be at most one binary op on the stack in shunting_yard() 292 * since we pop the one present on the stack before we in shunting_yard() 366 int stack[MAX_STACK]; in tst_bool_expr_eval() local 372 stack[po in tst_bool_expr_eval() [all...] |
/third_party/selinux/libsepol/cil/src/ |
H A D | cil_parser.c | 52 static void push_hll_info(struct cil_stack *stack, uint32_t hll_offset, uint32_t hll_expand) in push_hll_info() argument 59 cil_stack_push(stack, CIL_NONE, new); in push_hll_info() 62 static void pop_hll_info(struct cil_stack *stack, uint32_t *hll_offset, uint32_t *hll_expand) in pop_hll_info() argument 64 struct cil_stack_item *curr = cil_stack_pop(stack); in pop_hll_info() 96 static int add_hll_linemark(struct cil_tree_node **current, uint32_t *hll_offset, uint32_t *hll_expand, struct cil_stack *stack, char *path) in add_hll_linemark() argument 114 if (cil_stack_is_empty(stack)) { in add_hll_linemark() 120 pop_hll_info(stack, hll_offset, hll_expand); in add_hll_linemark() 133 push_hll_info(stack, *hll_offset, *hll_expand); in add_hll_linemark() 134 if (cil_stack_number_of_items(stack) > CIL_PARSER_MAX_EXPR_DEPTH) { in add_hll_linemark() 223 struct cil_stack *stack; in cil_parser() local [all...] |
/kernel/linux/linux-5.10/arch/um/os-Linux/ |
H A D | helper.c | 45 unsigned long stack, sp; in run_helper() local 48 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper() 49 if (stack == 0) in run_helper() 67 sp = stack + UM_KERN_PAGE_SIZE - sizeof(void *); in run_helper() 109 free_stack(stack, 0); in run_helper() 116 unsigned long stack, sp; in run_helper_thread() local 119 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper_thread() 120 if (stack == 0) in run_helper_thread() 123 sp = stack + UM_KERN_PAGE_SIZE - sizeof(void *); in run_helper_thread() 142 free_stack(stack, in run_helper_thread() [all...] |
/kernel/linux/linux-6.6/arch/um/os-Linux/ |
H A D | helper.c | 46 unsigned long stack, sp; in run_helper() local 49 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper() 50 if (stack == 0) in run_helper() 68 sp = stack + UM_KERN_PAGE_SIZE; in run_helper() 114 free_stack(stack, 0); in run_helper() 121 unsigned long stack, sp; in run_helper_thread() local 124 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper_thread() 125 if (stack == 0) in run_helper_thread() 128 sp = stack + UM_KERN_PAGE_SIZE; in run_helper_thread() 147 free_stack(stack, in run_helper_thread() [all...] |