Lines Matching refs:code
2 // Use of this source code is governed by a BSD-style license that can be
10 #include "src/codegen/code-factory.h"
23 #include "src/objects/code.h"
651 // Invoke the builtin code.
658 // context and the function left on the stack by the code
680 Register code, Register scratch) {
681 // Verify that the code kind is baseline code via the CodeKind.
682 __ movl(scratch, FieldOperand(code, CodeT::kFlagsOffset));
688 static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
690 DCHECK(!AreAliased(code, scratch));
691 return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
884 // Store the optimized code in the closure.
988 // If the optimized code is cleared, go to runtime to update the optimization
992 // Check if the optimized code is marked for deopt. If it is, call the
1009 // Optimized code is good, get it into the closure and link the closure into
1010 // the optimized functions list, then tail call the optimized code.
1017 // Optimized code slot contains deoptimized code or code is cleared and
1018 // optimized code marker isn't updated. Evict the code, update the marker
1019 // and re-enter the closure's code.
1040 // will restore the original bytecode. In order to simplify the code, we have
1060 // The code to load the next bytecode is common to both wide and extra wide.
1109 // is optimized code or a tiering state that needs to be processed.
1161 // Generate code for entering a JS function with the interpreter.
1206 // Check if feedback vector is valid. If valid, check for optimized code
1226 // MANUAL indicates that the scope shouldn't actually generate code to set up
1385 // Load the baseline code into the closure.
1664 // If the code deoptimizes during the implicit function entry stack interrupt
1738 // We'll use the bytecode for both code age/OSR resetting, and pushing
1745 // Baseline code frames store the feedback vector where interpreter would
1798 // Push the baseline code return address now, as if it had been pushed by
1838 int code = config->GetAllocatableGeneralCode(i);
1839 __ popq(Register::from_code(code));
1840 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1841 __ SmiUntag(Register::from_code(code));
1900 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
2168 Handle<CodeT> code) {
2230 __ Jump(code, RelocInfo::CODE_TARGET);
2239 Handle<CodeT> code) {
2318 // Tail-call to the {code} handler.
2319 __ Jump(code, RelocInfo::CODE_TARGET);
2482 // pointer decompression code.
2739 // If the code object is null, just return to the caller.
2756 // Load deoptimization data from the code object.
3749 // Deferred code.
3756 // Restore function_data register (which was clobbered by the code above,
4084 // resumed continuation, we return to the GenericJSToWasmWrapper code, which
4834 int code = config->GetAllocatableDoubleCode(i);
4835 XMMRegister xmm_reg = XMMRegister::from_code(code);
4836 int offset = code * kDoubleSize;
4857 // Get the address of the location in the code object
4976 int code = config->GetAllocatableDoubleCode(i);
4977 XMMRegister xmm_reg = XMMRegister::from_code(code);
4978 int src_offset = code * kDoubleSize + double_regs_offset;
5030 // bytecode. If there is baseline code on the shared function info, converts an
5032 // code. Otherwise execution continues with bytecode.
5051 // Check if we have baseline code. For OSR entry it is safe to assume we
5052 // always have baseline code.
5058 // Start with bytecode as there is no baseline code.
5065 // Start with baseline code.
5113 // If the code deoptimizes during the implicit function entry stack interrupt
5175 // Retry from the start after installing baseline code.