Lines Matching refs:code

2 // Use of this source code is governed by a BSD-style license that can be
8 #include "src/codegen/code-factory.h"
37 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
39 DCHECK(!AreAliased(code, scratch));
40 // Verify that the code kind is baseline code via the CodeKind.
41 __ LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
95 // bytecode. If there is baseline code on the shared function info, converts an
97 // code. Otherwise execution continues with bytecode.
118 // Check if we have baseline code. For OSR entry it is safe to assume we
119 // always have baseline code.
125 // Start with bytecode as there is no baseline code.
132 // Start with baseline code.
179 // If the code deoptimizes during the implicit function entry stack interrupt
248 // Retry from the start after installing baseline code.
384 __ bkpt(0); // Unreachable code.
395 // If the code object is null, just return to the caller.
409 // Load deoptimization data from the code object.
410 // <deopt_data> = <code>[#deoptimization_data_offset]
430 // Compute the target address = code start + osr_offset
614 // Unreachable code.
804 // r4: code entry
837 // r4: code entry
919 // returns control to the code after the b(&invoke) above, which
924 // Notice that we cannot store a reference to the trampoline code directly in
1014 // Unreachable code.
1044 // Invoke the code.
1084 // Store code entry in the closure.
1151 // If the optimized code is cleared, go to runtime to update the optimization
1156 // Check if the optimized code is marked for deopt. If it is, call the
1169 // Optimized code is good, get it into the closure and link the closure
1170 // into the optimized functions list, then tail call the optimized code.
1177 // Optimized code slot contains deoptimized code or code is cleared and
1178 // optimized code marker isn't updated. Evict the code, update the marker
1179 // and re-enter the closure's code.
1221 // will restore the original bytecode. In order to simplify the code, we have
1291 // Check if optimized code is available
1311 // is optimized code or a tiering state that needs to be processed.
1392 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1400 // Baseline code frames store the feedback vector where interpreter would
1463 // Generate code for entering a JS function with the interpreter.
1509 // Check if feedback vector is valid. If valid, check for optimized code
1524 // Check if the optimized code slot is not empty or has a tiering state.
1546 // MANUAL indicates that the scope shouldn't actually generate code to set up
1804 Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1805 __ Jump(code, RelocInfo::CODE_TARGET);
1942 // If the code deoptimizes during the implicit function entry stack interrupt
1980 int code = config->GetAllocatableGeneralCode(i);
1981 __ Pop(Register::from_code(code));
1982 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1983 __ SmiUntag(Register::from_code(code));
2040 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
2281 Handle<Code> code) {
2345 __ Jump(code, RelocInfo::CODE_TARGET);
2354 Handle<Code> code) {
2438 // Tail-call to the {code} handler.
2439 __ Jump(code, RelocInfo::CODE_TARGET);
3184 // If we reach this code, 31 <= exponent <= 83.
3624 // This code tries to be close to ia32 code so that any changes can be
3643 int code = config->GetAllocatableDoubleCode(i);
3644 const DoubleRegister dreg = DoubleRegister::from_code(code);
3645 int offset = code * kDoubleSize;
3667 // Get the address of the location in the code object (r6) (return
3685 // r5: code address or 0 already loaded.
3711 int code = config->GetAllocatableDoubleCode(i);
3712 int dst_offset = code * kDoubleSize + double_regs_offset;
3714 code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
3800 int code = config->GetAllocatableDoubleCode(i);
3801 const DoubleRegister dreg = DoubleRegister::from_code(code);
3802 int src_offset = code * kDoubleSize + double_regs_offset;