/arkcompiler/runtime_core/static_core/runtime/bridge/arch/arm/ |
H A D | compiled_code_to_interpreter_bridge_armhf.S | 55 CFI_REL_OFFSET(r9, -(3 * 4)) 124 // r7 - stack args, r8 - iframe, r9, r10, r12 - temps, lr - method 146 ldr r9, [lr, #METHOD_ACCESS_FLAGS_OFFSET] 147 tst r9, #ACCESS_STATIC 158 NEXT_SHORTY r9 159 cmp r9, #0 162 cmp r9, #SHORTY_REFERENCE 166 cmpne r9, #SHORTY_LAST_INT32 169 sub r9, r4, r3 170 cmp r9, # [all...] |
H A D | deoptimization_arm.S | 42 CFI_REL_OFFSET(r9, -((CFRAME_CALLEE_REGS_START_SLOT + 1) * 4)) 76 CFI_REL_OFFSET(r9, -((BOUNDARY_FRAME_SLOT + 1) * 4)) 165 ldrd r8, r9, [r3, #-8]! 166 CFI_RESTORE(r9) 228 CFI_REL_OFFSET(r9, -((CFRAME_CALLEE_REGS_START_SLOT + 1) * 4)) 252 ldrd r8, r9, [r3, #-8]! 253 CFI_RESTORE(r9) 308 CFI_REL_OFFSET(r9, -((CFRAME_CALLEE_REGS_START_SLOT + 1) * 4)) 322 ldrd r8, r9, [r0, #-8]! 323 CFI_RESTORE(r9) [all...] |
H A D | compiled_code_to_interpreter_bridge_arm.S | 59 CFI_REL_OFFSET(r9, -(3 * 4)) 128 // r3 - args, r4, r5, r9 - temp, r6 - iframe, r7 - method, r8 - method.shorty 131 mov r9, r5, lsl #3 170 str r5, [r2, r9] 182 str r4, [r2, r9] 240 CFI_RESTORE(r9)
|
H A D | interpreter_to_compiled_code_bridge_armhf.S | 310 // r6 - method, r7 - method.shorty, r8 - insn_ptr, r9 - iframe 313 mov r9, r1 325 // r7 - stack arg ptr, r8 - insn ptr, r9 - iframe, r10 - insn, 339 addeq r2, r9, #FRAME_ACC_OFFSET 355 // set r9 - frame.vregs 356 add r9, r9, #FRAME_VREGS_OFFSET 435 CFI_RESTORE(r9)
|
H A D | interpreter_to_compiled_code_bridge_arm.S | 145 push {r4, r5, r6, r7, r8, r9} 146 CFI_REL_OFFSET(r9, -((3 + 1) * 4)) 169 // r5 - iframe, r6, r7 and r9 - temps, r8 - pointer to stack, lr - method 261 pop {r4, r5, r6, r7, r8, r9, THREAD_REG, fp} 267 CFI_RESTORE(r9)
|
H A D | compiled_code_to_runtime_bridge_arm.S | 22 CFI_REL_OFFSET(r9, (\offset - CALLEE_REG0_OFFSET + 4*5)) 34 CFI_RESTORE(r9) 310 CFI_REL_OFFSET(r9, (5 * 4))
|
/arkcompiler/runtime_core/static_core/runtime/bridge/arch/amd64/ |
H A D | expand_compiled_code_args_dyn_amd64.S | 27 // %r9 - tmp4 44 movq (%r8), %r9 45 movq %r9, (%r8, %rdx) 55 movq %r9, (%r8, %rdx)
|
H A D | interpreter_to_compiled_code_bridge_amd64.S | 24 // load arguments into %rdi, %rsi, %rdx, %rcx, %r8, %r9 30 movq (5 * 8)(\begin_ptr), %r9 161 // %edi - float arg counter, %rdx - stack pointer, %r9 - temp 176 xorq %r9, %r9 186 movq %rsi, %r9 187 subq $6, %r9 189 shlq $3, %r9 190 subq %r9, %rdx 261 // %r9 [all...] |
H A D | proxy_entrypoint_amd64.S | 146 movq (-CALLER_REG0_OFFSET + 40)(%rbp), %r9
|
H A D | compiled_code_to_runtime_bridge_amd64.S | 56 movq %r9, (-CALLER_REG0_OFFSET + 40)(%\fp_reg) 77 movq (-CALLER_REG0_OFFSET + 40)(%\fp_reg), %r9
|
/arkcompiler/runtime_core/static_core/plugins/ets/runtime/napi/arch/arm32/ |
H A D | ets_napi_entry_point_arm32hf.S | 208 // | a | | | r9 | 17 304 CFI_REL_OFFSET(r9, -((CFRAME_LOCALS_COUNT + 18 + 2) * 4)) // Shorty return value 320 // save shorty return value to r9 321 ldr r9, [r4, #METHOD_SHORTY_OFFSET] 322 ldr r9, [r9] 374 and r9, r9, #0xF 375 cmp r9, #SHORTY_REFERENCE 385 4: sub r3, r9, #SHORTY_FIRST_FLOA [all...] |
H A D | ets_napi_entry_point_arm32.S | 209 // | a | | | r9 | 17 275 CFI_REL_OFFSET(r9, -((CFRAME_LOCALS_COUNT + 18 + 2) * 4)) // Shorty return value 290 // save shorty return value to r9 291 ldr r9, [r4, #METHOD_SHORTY_OFFSET] 292 ldr r9, [r9] 346 and r9, r9, #0xF 347 cmp r9, #SHORTY_REFERENCE 381 CFI_RESTORE(r9) [all...] |
H A D | ets_async_entry_point_arm32.S | 63 CFI_REL_OFFSET(r9, -((CFRAME_LOCALS_COUNT + 18 + 2) * 4)) 94 CFI_RESTORE(r9)
|
H A D | ets_async_entry_point_arm32hf.S | 59 CFI_REL_OFFSET(r9, -((CFRAME_LOCALS_COUNT + 18 + 2) * 4)) 91 CFI_RESTORE(r9)
|
/arkcompiler/ets_runtime/ecmascript/compiler/trampoline/x64/ |
H A D | optimized_fast_call.cpp | 74 // %r9 - this
96 Register thisObj = r9;
in OptimizedFastCallAndPushArgv() 131 __ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
in OptimizedFastCallAndPushArgv() 142 __ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
in OptimizedFastCallAndPushArgv() 153 __ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
in OptimizedFastCallAndPushArgv() 160 __ Movq(Operand(argvReg, 0), r9);
in OptimizedFastCallAndPushArgv() local 265 __ Movq(Operand(argV, 0), r9); // third arg
in JSFastCallWithArgV() local 294 // %r9 - expectedNumArgs
319 __ Movq(r9, r14);
in JSFastCallWithArgVAndPushArgv() 328 __ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
in JSFastCallWithArgVAndPushArgv() 354 __ Movq(Operand(argV, 0), r9); // third arg JSFastCallWithArgVAndPushArgv() local [all...] |
H A D | optimized_call.cpp | 68 __ Movq(rbx, r9); in JSFunctionEntry() 124 Register method = r9; in OptimizedCallAndPushArgv() 452 Register argV = r9; in GenJSCall() 533 __ Movq(argc, r9); in GenJSCall() 536 __ Subq(NUM_MANDATORY_JSFUNC_ARGS, r9); // argc in GenJSCall() 551 __ Cmpl(0, r9); // 0: callarg0 in GenJSCall() 553 __ Cmpl(1, r9); // 1: callarg1 in GenJSCall() 555 __ Cmpl(2, r9); // 2: callarg2 in GenJSCall() 557 __ Cmpl(3, r9); // 3: callarg3 in GenJSCall() 663 Register argV = r9; in AOTCallToAsmInterBridge() 893 __ Mov(Operand(rdx, FRAME_SLOT_SIZE), r9); // get origin argv JSBoundFunctionCallInternal() local 1108 __ Movq(Operand(glueReg, runtimeIdReg, Scale::Times8, JSThread::GlueData::GetRTStubEntriesOffset(false)), r9); CallRuntimeWithArgv() local [all...] |
H A D | asm_interpreter_call.cpp | 41 // argv - %r9(<callTarget, newTarget, this> are at the beginning of argv) 91 Register callTargetRegister = r9; in GeneratorReEnterAsmInterpDispatch() 135 // argv - %r9(<callTarget, newTarget, this> are at the beginning of argv) 145 Register argvRegister = r9; in AsmInterpEntryDispatch() 808 // %r9 - argV (...) 834 Register stackArgs = r9; in CallNativeWithArgv() 916 Register trampolineIdRegister = r9; in CallNativeWithArgv() 935 Register argv = r9; in CallNativeEntry() 974 Register argv = r9; in CallFastBuiltin() 999 __ Movq(argc, r9); // arg in CallFastBuiltin() 1562 __ Movq(Operand(rsp, FRAME_SLOT_SIZE * (PreserveRegisterIndex++)), r9); PreserveMostCall() local [all...] |
/arkcompiler/ets_runtime/ecmascript/compiler/assembler/x64/ |
H A D | extended_assembler_x64.cpp | 22 r13, rbp, r12, rbx, r14, rsi, rdi, r8, r9 25 rdi, rbp, rsi, rdx, rcx, r8, r9, rInvalid, rInvalid
|
H A D | macro_assembler_x64.h | 52 x64::rdi, x64::rsi, x64::rdx, x64::rcx, x64::r8, x64::r9 };
|
H A D | assembler_x64.h | 31 r9,
|
/arkcompiler/runtime_core/static_core/runtime/tests/arch/amd64/ |
H A D | invokation_helper.S | 78 movq 40(%r14), %r9
|
/arkcompiler/runtime_core/static_core/plugins/ets/runtime/napi/arch/amd64/ |
H A D | ets_async_entry_point_amd64.S | 115 movq (-CALLER_REG0_OFFSET + 40)(%rbp), %r9
|
H A D | ets_napi_entry_point_amd64.S | 151 movq (-CALLER_REG0_OFFSET + 40)(%rbp), %r9 217 // | | | r9 | 14 233 // | | | Napi r9 | 28 443 movq (-CALLER_REG0_OFFSET + 40)(%rbp), %r9
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/code_generator/ |
H A D | target_info.h | 98 REG(r9) /* 5 */ \
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/code_generator/target/aarch32/ |
H A D | target.h | 40 vixl::aarch32::r8.GetCode(), vixl::aarch32::r9.GetCode(), vixl::aarch32::r12.GetCode()};
|