%PDF- %PDF-
| Direktori : /home2/vacivi36/vittasync.vacivitta.com.br/vittasync/node/deps/v8/src/builtins/ppc/ |
| Current File : //home2/vacivi36/vittasync.vacivitta.com.br/vittasync/node/deps/v8/src/builtins/ppc/builtins-ppc.cc |
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
namespace {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)), r0);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
Label* is_baseline) {
USE(GetSharedFunctionInfoBytecodeOrBaseline);
ASM_CODE_COMMENT(masm);
Label done;
__ LoadMap(scratch1, sfi_data);
#ifndef V8_JITLESS
__ CompareInstanceType(scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, ¬_baseline);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ beq(is_baseline);
__ bind(¬_baseline);
} else {
__ beq(is_baseline);
}
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
#else
__ CompareInstanceType(scratch1, scratch1, INTERPRETER_DATA_TYPE);
#endif // !V8_JITLESS
__ bne(&done);
__ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
__ bind(&done);
}
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
intptr_t offset) {
__ AddS64(ip, entry_address, Operand(offset), r0);
__ mtlr(ip);
// "return" to the OSR entry point of the function.
__ Ret();
}
void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi,
Register scratch) {
DCHECK(!AreAliased(sfi, scratch));
__ mov(scratch, Operand(0));
__ StoreU16(scratch, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset),
no_reg);
}
void ResetJSFunctionAge(MacroAssembler* masm, Register js_function,
Register scratch1, Register scratch2) {
__ LoadTaggedField(
scratch1,
FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset),
scratch2);
ResetSharedFunctionInfoAge(masm, scratch1, scratch2);
}
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
Register feedback_vector, Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(feedback_vector, scratch1));
__ LoadU8(scratch1,
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset),
scratch2);
__ andi(
scratch1, scratch1,
Operand(static_cast<uint8_t>(~FeedbackVector::OsrUrgencyBits::kMask)));
__ StoreU8(scratch1,
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset),
scratch2);
}
// Restarts execution either at the current or next (in execution order)
// bytecode. If there is baseline code on the shared function info, converts an
// interpreter frame into a baseline frame and continues execution in baseline
// code. Otherwise execution continues with bytecode.
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode,
bool is_osr = false) {
Label start;
__ bind(&start);
// Get function from the frame.
Register closure = r4;
__ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset),
r0);
// Get the InstructionStream object from the shared function info.
Register code_obj = r9;
__ LoadTaggedField(
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
r0);
if (is_osr) {
ResetSharedFunctionInfoAge(masm, code_obj, r6);
}
__ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
// Check if we have baseline code. For OSR entry it is safe to assume we
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
__ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
RelocInfo::CODE_TARGET);
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeIsBaseline(masm, code_obj, r6);
}
// Load the feedback cell and vector.
Register feedback_cell = r5;
Register feedback_vector = ip;
__ LoadTaggedField(feedback_cell,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset),
r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ CompareObjectType(feedback_vector, r6, r6, FEEDBACK_VECTOR_TYPE);
__ b(ne, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
__ LoadU64(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Replace bytecode offset with feedback cell.
static_assert(InterpreterFrameConstants::kBytecodeOffsetFromFp ==
BaselineFrameConstants::kFeedbackCellFromFp);
__ StoreU64(feedback_cell,
MemOperand(fp, BaselineFrameConstants::kFeedbackCellFromFp));
feedback_cell = no_reg;
// Update feedback vector cache.
static_assert(InterpreterFrameConstants::kFeedbackVectorFromFp ==
BaselineFrameConstants::kFeedbackVectorFromFp);
__ StoreU64(feedback_vector,
MemOperand(fp, InterpreterFrameConstants::kFeedbackVectorFromFp));
feedback_vector = no_reg;
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_next_executed_bytecode();
} else {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
Register get_baseline_pc = r6;
__ Move(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
// not a valid bytecode offset.
// TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode;
if (!is_osr) {
__ CmpS64(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset),
r0);
__ b(eq, &function_entry_bytecode);
}
__ SubS64(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ bind(&valid_bytecode_offset);
// Get bytecode array from the stack frame.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
// Save the accumulator register, since it's clobbered by the below call.
__ Push(kInterpreterAccumulatorRegister);
__ Push(code_obj);
{
__ mr(arg_reg_1, code_obj);
__ mr(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ mr(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(4, 0, ip);
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Pop(code_obj);
__ LoadCodeInstructionStart(code_obj, code_obj);
__ AddS64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
Generate_OSREntry(masm, code_obj, 0);
} else {
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
if (!is_osr) {
__ bind(&function_entry_bytecode);
// If the bytecode offset is kFunctionEntryOffset, get the start address of
// the first bytecode.
__ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
if (next_bytecode) {
__ Move(get_baseline_pc,
ExternalReference::baseline_pc_for_bytecode_offset());
}
__ b(&valid_bytecode_offset);
}
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
__ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ b(&start);
}
} // namespace
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
}
namespace {
enum class ArgumentsElementType {
kRaw, // Push arguments as they are.
kHandle // Dereference arguments before pushing.
};
void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
Register scratch,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, done;
__ subi(scratch, argc, Operand(kJSArgcReceiverSlots));
__ cmpi(scratch, Operand::Zero());
__ beq(&done);
__ mtctr(scratch);
__ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ add(scratch, array, scratch);
__ bind(&loop);
__ LoadU64WithUpdate(ip, MemOperand(scratch, -kSystemPointerSize));
if (element_type == ArgumentsElementType::kHandle) {
__ LoadU64(ip, MemOperand(ip));
}
__ push(ip);
__ bdnz(&loop);
__ bind(&done);
}
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r6 : new target
// -- cp : context
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Register scratch = r5;
Label stack_overflow;
__ StackOverflowCheck(r3, scratch, &stack_overflow);
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
__ SmiTag(r3);
__ Push(cp, r3);
__ SmiUntag(r3, SetRC);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
// should get the formal parameter count and copy the arguments in its
// correct position (including any undefined), instead of delaying this to
// InvokeFunction.
// Set up pointer to first argument (skip receiver).
__ addi(
r7, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
// Copy arguments and receiver to the expression stack.
// r7: Pointer to start of arguments.
// r3: Number of arguments.
Generate_PushArguments(masm, r7, r3, r8, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
// Call the function.
// r3: number of arguments (untagged)
// r4: constructor function
// r6: new target
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
}
// Restore context from the frame.
__ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
__ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&stack_overflow);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // Unreachable code.
}
}
enum class OsrSourceTier {
kInterpreter,
kBaseline,
};
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Register maybe_target_code) {
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0);
__ bne(&jump_to_optimized_code);
}
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
__ CmpSmiLiteral(r3, Smi::zero(), r0);
__ bne(&jump_to_optimized_code);
__ Ret();
__ bind(&jump_to_optimized_code);
DCHECK_EQ(maybe_target_code, r3); // Already in the right spot.
// OSR entry tracing.
{
Label next;
__ Move(r4, ExternalReference::address_of_log_or_trace_osr());
__ LoadU8(r4, MemOperand(r4));
__ andi(r0, r4, Operand(0xFF)); // Mask to the LSB.
__ beq(&next, cr0);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3); // Preserve the code object.
__ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
__ Pop(r3);
}
__ bind(&next);
}
if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedField(
r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
r0);
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3, r0, ip);
}
__ LoadCodeInstructionStart(r3, r3);
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ SmiUntag(r4,
FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)),
LeaveRC, r0);
// Compute the target address = code start + osr_offset
__ add(r0, r3, r4);
// And "return" to the OSR entry point of the function.
__ mtlr(r0);
__ blr();
}
}
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3: number of arguments (untagged)
// -- r4: constructor function
// -- r6: new target
// -- cp: context
// -- lr: return address
// -- sp[...]: constructor arguments
// -----------------------------------
FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
__ EnterFrame(StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
__ SmiTag(r3);
__ Push(cp, r3, r4);
__ PushRoot(RootIndex::kUndefinedValue);
__ Push(r6);
// ----------- S t a t e -------------
// -- sp[0*kSystemPointerSize]: new target
// -- sp[1*kSystemPointerSize]: padding
// -- r4 and sp[2*kSystemPointerSize]: constructor function
// -- sp[3*kSystemPointerSize]: number of arguments (tagged)
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
__ JumpIfIsInRange(
r7, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
¬_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
// Else: use TheHoleValue as receiver for constructor call
__ bind(¬_create_implicit_receiver);
__ LoadRoot(r3, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- r3: receiver
// -- Slot 4 / sp[0*kSystemPointerSize]: new target
// -- Slot 3 / sp[1*kSystemPointerSize]: padding
// -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
// -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
// -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
__ bind(&post_instantiation_deopt_entry);
// Restore new target.
__ Pop(r6);
// Push the allocated receiver to the stack.
__ Push(r3);
// We need two copies because we may have to return the original one
// and the calling conventions dictate that the called function pops the
// receiver. The second copy is pushed after the arguments, we saved in r6
// since r0 needs to store the number of arguments before
// InvokingFunction.
__ mr(r9, r3);
// Set up pointer to first argument (skip receiver).
__ addi(
r7, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
// ----------- S t a t e -------------
// -- r6: new target
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: implicit receiver
// -- sp[2*kSystemPointerSize]: padding
// -- sp[3*kSystemPointerSize]: constructor function
// -- sp[4*kSystemPointerSize]: number of arguments (tagged)
// -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
__ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r3);
Label stack_overflow;
__ StackOverflowCheck(r3, r8, &stack_overflow);
// Copy arguments to the expression stack.
// r7: Pointer to start of argument.
// r3: Number of arguments.
Generate_PushArguments(masm, r7, r3, r8, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r9);
// Call the function.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
}
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
// on page 74.
Label use_receiver, do_throw, leave_and_return, check_receiver;
// If the result is undefined, we jump out to using the implicit receiver.
__ JumpIfNotRoot(r3, RootIndex::kUndefinedValue, &check_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
__ LoadU64(r3, MemOperand(sp));
__ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_and_return);
// Restore smi-tagged arguments count from the frame.
__ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(r4, MacroAssembler::kCountIsSmi,
MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&check_receiver);
// If the result is a smi, it is *not* an object in the ECMA sense.
__ JumpIfSmi(r3, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
__ bge(&leave_and_return);
__ b(&use_receiver);
__ bind(&do_throw);
// Restore the context from the frame.
__ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
__ bkpt(0);
__ bind(&stack_overflow);
// Restore the context from the frame.
__ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the value to pass to the generator
// -- r4 : the JSGeneratorObject to resume
// -- lr : return address
// -----------------------------------
// Store input value into generator object.
__ StoreTaggedField(
r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Check that r4 is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(r4);
// Load suspended function and context.
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
__ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
Register scratch = r8;
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
__ Move(scratch, debug_hook);
__ LoadU8(scratch, MemOperand(scratch), r0);
__ extsb(scratch, scratch);
__ CmpSmiLiteral(scratch, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
__ Move(scratch, debug_suspended_generator);
__ LoadU64(scratch, MemOperand(scratch));
__ CmpS64(scratch, r4);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
__ CmpU64(sp, scratch);
__ blt(&stack_overflow);
// ----------- S t a t e -------------
// -- r4 : the JSGeneratorObject to resume
// -- r7 : generator function
// -- cp : generator context
// -- lr : return address
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
r0);
{
Label done_loop, loop;
__ bind(&loop);
__ subi(r6, r6, Operand(1));
__ cmpi(r6, Operand::Zero());
__ blt(&done_loop);
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
__ LoadTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
__ Push(scratch);
__ b(&loop);
__ bind(&done_loop);
// Push receiver.
__ LoadTaggedField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
__ Push(scratch);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
__ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadTaggedField(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
__ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
{
__ LoadTaggedField(
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(r3, FieldMemOperand(
r3, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
__ mr(r6, r4);
__ mr(r4, r7);
__ JumpJSFunction(r4, r0);
}
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r7);
// Push hole as receiver since we do not use it for stepping.
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
__ bind(&prepare_step_in_suspended_generator);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r4);
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
__ bind(&stack_overflow);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // This should be unreachable.
}
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r4);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
__ Trap(); // Unreachable.
}
namespace {
// Called with the native C calling convention. The corresponding function
// signature is either:
//
// using JSEntryFunction = GeneratedCode<Address(
// Address root_register_value, Address new_target, Address target,
// Address receiver, intptr_t argc, Address** args)>;
// or
// using JSEntryFunction = GeneratedCode<Address(
// Address root_register_value, MicrotaskQueue* microtask_queue)>;
void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
Builtin entry_trampoline) {
// The register state is either:
// r3: root_register_value
// r4: code entry
// r5: function
// r6: receiver
// r7: argc
// r8: argv
// or
// r3: root_register_value
// r4: microtask_queue
Label invoke, handler_entry, exit;
{
NoRootArrayScope no_root_array(masm);
// PPC LINUX ABI:
// preserve LR in pre-reserved slot in caller's frame
__ mflr(r0);
__ StoreU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved);
// Save callee-saved double registers.
__ MultiPushDoubles(kCalleeSavedDoubles);
// Set up the reserved register for 0.0.
__ LoadDoubleLiteral(kDoubleRegZero, base::Double(0.0), r0);
// Initialize the root register.
// C calling convention. The first argument is passed in r3.
__ mr(kRootRegister, r3);
#ifdef V8_COMPRESS_POINTERS
// Initialize the pointer cage base register.
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
}
// Push a frame with special values setup to mark it as an entry frame.
// r4: code entry
// r5: function
// r6: receiver
// r7: argc
// r8: argv
__ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ push(r0);
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
__ li(kConstantPoolRegister, Operand::Zero());
__ push(kConstantPoolRegister);
}
__ mov(r0, Operand(StackFrame::TypeToMarker(type)));
__ push(r0);
__ push(r0);
// Save copies of the top frame descriptor on the stack.
__ Move(r3, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
__ LoadU64(r0, MemOperand(r3));
__ push(r0);
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
// StackFrameIteratorForProfiler will assume we are executing C++ and miss the
// JS frames on top.
__ li(r0, Operand::Zero());
__ StoreU64(r0, MemOperand(r3));
Register scratch = r9;
// Set up frame pointer for the frame to be pushed.
__ addi(fp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp =
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r3, js_entry_sp);
__ LoadU64(scratch, MemOperand(r3));
__ cmpi(scratch, Operand::Zero());
__ bne(&non_outermost_js);
__ StoreU64(fp, MemOperand(r3));
__ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
__ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(scratch); // frame-type
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ b(&invoke);
// Block literal pool emission whilst taking the position of the handler
// entry. This avoids making the assumption that literal pools are always
// emitted after an instruction is emitted, rather than before.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ bind(&handler_entry);
// Store the current pc as the handler offset. It's used later to create the
// handler table.
masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ Move(scratch,
ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
}
__ StoreU64(r3, MemOperand(scratch));
__ LoadRoot(r3, RootIndex::kException);
__ b(&exit);
// Invoke: Link this frame into the handler chain.
__ bind(&invoke);
// Must preserve r4-r8.
__ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the b(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
__ bind(&exit); // r3 holds result
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r8);
__ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2);
__ mov(scratch, Operand::Zero());
__ Move(r8, js_entry_sp);
__ StoreU64(scratch, MemOperand(r8));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(r6);
__ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
__ StoreU64(r6, MemOperand(scratch));
// Reset the stack to the callee saved registers.
__ addi(sp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// Restore callee-saved double registers.
__ MultiPopDoubles(kCalleeSavedDoubles);
// Restore callee-saved registers.
__ MultiPop(kCalleeSaved);
// Return
__ LoadU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
} // namespace
void Builtins::Generate_JSEntry(MacroAssembler* masm) {
Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
}
void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
Builtin::kJSConstructEntryTrampoline);
}
void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
Generate_JSEntryVariant(masm, StackFrame::ENTRY,
Builtin::kRunMicrotasksTrampoline);
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
// r4: new.target
// r5: function
// r6: receiver
// r7: argc
// r8: argv
// r0,r3,r9, cp may be clobbered
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ Move(cp, context_address);
__ LoadU64(cp, MemOperand(cp));
// Push the function.
__ Push(r5);
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
__ mr(r3, r7);
__ StackOverflowCheck(r3, r9, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
__ bind(&enough_stack_space);
// Copy arguments to the stack.
// r4: function
// r7: argc
// r8: argv, i.e. points to first arg
Generate_PushArguments(masm, r8, r7, r9, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r6);
// r3: argc
// r4: function
// r6: new.target
__ mr(r3, r7);
__ mr(r6, r4);
__ mr(r4, r5);
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r7, RootIndex::kUndefinedValue);
__ mr(r8, r7);
__ mr(r14, r7);
__ mr(r15, r7);
__ mr(r16, r7);
__ mr(r17, r7);
// Invoke the code.
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS frame and remove the parameters (except function), and
// return.
}
__ blr();
// r3: result
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
// This expects two C++ function parameters passed by Invoke() in
// execution.cc.
// r3: root_register_value
// r4: microtask_queue
__ mr(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r4);
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
Register params_size = scratch1;
// Get the size of the formal parameters + receiver (in bytes).
__ LoadU64(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ lwz(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ LoadU64(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ CmpS64(params_size, actual_params_size);
__ bge(&corrected_args_count);
__ mr(params_size, actual_params_size);
__ bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
// label if the bytecode (without prefix) is a return bytecode. Will not advance
// the bytecode offset if the current bytecode is a JumpLoop, instead just
// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch3 = bytecode;
// The bytecode offset value will be increased by one in wide and extra wide
// cases. In the case of having a wide or extra wide JumpLoop bytecode, we
// will restore the original bytecode. In order to simplify the code, we have
// a backup of it.
Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
__ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
static_assert(0 == static_cast<int>(interpreter::Bytecode::kWide));
static_assert(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
static_assert(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
static_assert(3 ==
static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
__ cmpi(bytecode, Operand(0x3));
__ bgt(&process_bytecode);
__ andi(r0, bytecode, Operand(0x1));
__ bne(&extra_wide, cr0);
// Load the next bytecode and update table to the wide scaled table.
__ addi(bytecode_offset, bytecode_offset, Operand(1));
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ addi(bytecode_offset, bytecode_offset, Operand(1));
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
__ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ cmpi(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
__ beq(if_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
// If this is a JumpLoop, re-execute it to perform the jump to the beginning
// of the loop.
Label end, not_jump_loop;
__ cmpi(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ bne(¬_jump_loop);
// We need to restore the original bytecode_offset since we might have
// increased it to skip the wide / extra-wide prefix bytecode.
__ Move(bytecode_offset, original_bytecode_offset);
__ b(&end);
__ bind(¬_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ lbzx(scratch3, MemOperand(bytecode_size_table, bytecode));
__ add(bytecode_offset, bytecode_offset, scratch3);
__ bind(&end);
}
#if ENABLE_SPARKPLUG
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback cell and vector from the closure.
Register feedback_cell = r7;
Register feedback_vector = ip;
__ LoadTaggedField(feedback_cell,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
r0);
__ LoadTaggedField(feedback_vector,
FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset),
r0);
__ AssertFeedbackVector(feedback_vector, r11);
// Check for an tiering state.
Label flags_need_processing;
Register flags = r10;
{
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
}
{ ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r11, r0); }
// Increment invocation count for the function.
{
Register invocation_count = r11;
__ LoadU64(invocation_count,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset),
r0);
__ AddS64(invocation_count, invocation_count, Operand(1));
__ StoreU64(invocation_count,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset),
r0);
}
FrameScope frame_scope(masm, StackFrame::MANUAL);
{
ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
// Normally the first thing we'd do here is Push(lr, fp), but we already
// entered the frame in BaselineCompiler::Prologue, as we had to use the
// value lr before the call to this BaselineOutOfLinePrologue builtin.
Register callee_context = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext);
Register callee_js_function = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
ResetJSFunctionAge(masm, callee_js_function, r11, r0);
__ Push(callee_context, callee_js_function);
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
Register argc = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
__ Push(argc, bytecodeArray);
if (v8_flags.debug_code) {
Register scratch = r11;
__ CompareObjectType(feedback_vector, scratch, scratch,
FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
__ Push(feedback_cell);
__ Push(feedback_vector);
}
Label call_stack_guard;
Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real stack
// limit or tighter. By ensuring we have space until that limit after
// building the frame we can quickly precheck both at once.
Register sp_minus_frame_size = r11;
Register interrupt_limit = r0;
__ SubS64(sp_minus_frame_size, sp, frame_size);
__ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
__ CmpU64(sp_minus_frame_size, interrupt_limit);
__ blt(&call_stack_guard);
}
// Do "fast" return to the caller pc in lr.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
__ bind(&flags_need_processing);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
__ Pop(r0, fp, kConstantPoolRegister);
} else {
__ Pop(r0, fp);
}
__ mtlr(r0);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ Trap();
}
__ bind(&call_stack_guard);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(kJavaScriptCallNewTargetRegister);
__ SmiTag(frame_size);
__ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
}
#endif
// static
void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
// We're here because we got deopted during BaselineOutOfLinePrologue's stack
// check. Undo all its frame creation and call into the interpreter instead.
// Drop the feedback vector, the bytecode offset (was the feedback vector but
// got replaced during deopt) and bytecode array.
__ Drop(3);
// Context, closure, argc.
__ Pop(kContextRegister, kJavaScriptCallTargetRegister,
kJavaScriptCallArgCountRegister);
// Drop frame pointer
__ LeaveFrame(StackFrame::BASELINE);
// Enter the interpreter.
__ TailCallBuiltin(Builtin::kInterpreterEntryTrampoline);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
//
// The live registers are:
// o r3: actual argument count
// o r4: the JS function object being called.
// o r6: the incoming new target or generator object
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
// o sp: stack pointer
// o lr: return address
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frame-constants.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = r4;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedField(
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
ResetSharedFunctionInfoAge(masm, r7, ip);
// Load original bytecode array or the debug copy.
__ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline(
masm, kInterpreterBytecodeArrayRegister, ip, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r7, no_reg,
BYTECODE_ARRAY_TYPE);
__ bne(&compile_lazy);
Label push_stack_frame;
Register feedback_vector = r5;
__ LoadFeedbackVector(feedback_vector, closure, r7, &push_stack_frame);
#ifndef V8_JITLESS
// If feedback vector is valid, check for optimized code and update invocation
// count.
Register flags = r7;
Label flags_need_processing;
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&flags_need_processing);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, ip, r0);
// Increment invocation count for the function.
__ LoadU32(
r8,
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
__ addi(r8, r8, Operand(1));
__ StoreU32(
r8,
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
#else
// Note: By omitting the above code in jitless mode we also disable:
// - kFlagsLogNextExecution: only used for logging/profiling; and
// - kInvocationCountOffset: only used for tiering heuristics and code
// coverage.
#endif // !V8_JITLESS
__ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Load initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Push bytecode array and Smi tagged bytecode array offset.
__ SmiTag(r7, kInterpreterBytecodeOffsetRegister);
__ Push(kInterpreterBytecodeArrayRegister, r7, feedback_vector);
// Allocate the local and temporary register file on the stack.
Label stack_overflow;
{
// Load frame size (word) from the BytecodeArray object.
__ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
__ sub(r8, sp, r5);
__ LoadStackLimit(r0, StackLimitKind::kRealStackLimit);
__ CmpU64(r8, r0);
__ blt(&stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ ShiftRightU64(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
__ bind(&loop);
__ push(kInterpreterAccumulatorRegister);
__ bdnz(&loop);
__ bind(&no_args);
}
// If the bytecode array has a valid incoming new target or generator object
// register, initialize it with incoming value which was passed in r6.
Label no_incoming_new_target_or_generator_register;
__ LoadS32(r8,
FieldMemOperand(
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset),
r0);
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreU64(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
__ LoadStackLimit(r0, StackLimitKind::kInterruptStackLimit);
__ CmpU64(sp, r0);
__ blt(&stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftU64(r6, r6, Operand(kSystemPointerSizeLog2));
__ LoadU64(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r6));
__ Call(kJavaScriptCallCodeStartRegister);
__ RecordComment("--- InterpreterEntryReturnPC point ---");
if (mode == InterpreterEntryTrampolineMode::kDefault) {
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(
masm->pc_offset());
} else {
DCHECK_EQ(mode, InterpreterEntryTrampolineMode::kForProfiling);
// Both versions must be the same up to this point otherwise the builtins
// will not be interchangable.
CHECK_EQ(
masm->isolate()->heap()->interpreter_entry_return_pc_offset().value(),
masm->pc_offset());
}
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ LoadU64(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&do_return);
__ b(&do_dispatch);
__ bind(&do_return);
// The return value is in r3.
LeaveInterpreterFrame(masm, r5, r7);
__ blr();
__ bind(&stack_check_interrupt);
// Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
// for the call to the StackGuard.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset)));
__ StoreU64(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ CallRuntime(Runtime::kStackGuard);
// After the call, restore the bytecode array, bytecode offset and accumulator
// registers again. Also, restore the bytecode offset in the stack to its
// previous value.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
__ StoreU64(r0,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ jmp(&after_stack_check_interrupt);
#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
__ LoadTaggedField(
feedback_vector,
FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset), r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadTaggedField(
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
__ b(ne, &install_baseline_code);
// Check for an tiering state.
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
// Load the baseline code into the closure.
__ mr(r5, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(r5, closure, ip, r7);
__ JumpCodeObject(r5);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // Should not return.
}
static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register start_address,
Register scratch) {
ASM_CODE_COMMENT(masm);
__ subi(scratch, num_args, Operand(1));
__ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, r0,
MacroAssembler::PushArrayOrder::kReverse);
}
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r5 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -- r4 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
}
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ subi(r6, r3, Operand(kJSArgcReceiverSlots));
} else {
__ mr(r6, r3);
}
__ StackOverflowCheck(r6, ip, &stack_overflow);
// Push the arguments.
GenerateInterpreterPushArgs(masm, r6, r5, r7);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
}
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Pass the spread in the register r3.
// r2 already points to the penultimate argument, the spread
// lies in the next interpreter register.
__ LoadU64(r5, MemOperand(r5, -kSystemPointerSize));
}
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable Code.
__ bkpt(0);
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r3 : argument count
// -- r6 : new target
// -- r4 : constructor to call
// -- r5 : allocation site feedback if available, undefined otherwise.
// -- r7 : address of the first argument
// -----------------------------------
Label stack_overflow;
__ StackOverflowCheck(r3, ip, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
}
Register argc_without_receiver = ip;
__ subi(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots));
// Push the arguments.
GenerateInterpreterPushArgs(masm, argc_without_receiver, r7, r8);
// Push a slot for the receiver to be constructed.
__ li(r0, Operand::Zero());
__ push(r0);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Pass the spread in the register r2.
// r4 already points to the penultimate argument, the spread
// lies in the next interpreter register.
__ subi(r7, r7, Operand(kSystemPointerSize));
__ LoadU64(r5, MemOperand(r7));
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r4);
// Tail call to the array construct stub (still in the caller
// context at this point).
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r3, r4, and r6 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
RelocInfo::CODE_TARGET);
} else {
DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with r3, r4, and r6 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable Code.
__ bkpt(0);
}
}
namespace {
void NewImplicitReceiver(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count
// -- r4 : constructor to call (checked to be a JSFunction)
// -- r6 : new target
//
// Stack:
// -- Implicit Receiver
// -- [arguments without receiver]
// -- Implicit Receiver
// -- Context
// -- FastConstructMarker
// -- FramePointer
// -----------------------------------
Register implicit_receiver = r7;
// Save live registers.
__ SmiTag(r3);
__ Push(r3, r4, r6);
__ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
// Save result.
__ Move(implicit_receiver, r3);
// Restore live registers.
__ Pop(r3, r4, r6);
__ SmiUntag(r3);
// Patch implicit receiver (in arguments)
__ StoreU64(implicit_receiver, MemOperand(sp, 0 * kSystemPointerSize), r0);
// Patch second implicit (in construct frame)
__ StoreU64(
implicit_receiver,
MemOperand(fp, FastConstructFrameConstants::kImplicitReceiverOffset), r0);
// Restore context.
__ LoadU64(cp, MemOperand(fp, FastConstructFrameConstants::kContextOffset),
r0);
}
} // namespace
// static
void Builtins::Generate_InterpreterPushArgsThenFastConstructFunction(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count
// -- r4 : constructor to call (checked to be a JSFunction)
// -- r6 : new target
// -- r7 : address of the first argument
// -- cp/r30 : context pointer
// -----------------------------------
__ AssertFunction(r4);
// Check if target has a [[Construct]] internal method.
Label non_constructor;
__ LoadMap(r5, r4);
__ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
__ TestBit(r5, Map::Bits1::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
// Add a stack check before pushing arguments.
Label stack_overflow;
__ StackOverflowCheck(r3, r5, &stack_overflow);
// Enter a construct frame.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::FAST_CONSTRUCT);
// Implicit receiver stored in the construct frame.
__ LoadRoot(r5, RootIndex::kTheHoleValue);
__ Push(cp, r5);
// Push arguments + implicit receiver.
Register argc_without_receiver = r9;
__ SubS64(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots));
// Push the arguments. r7 and r8 will be modified.
GenerateInterpreterPushArgs(masm, argc_without_receiver, r7, r8);
// Implicit receiver as part of the arguments (patched later if needed).
__ push(r5);
// Check if it is a builtin call.
Label builtin_call;
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ and_(r0, r5, ip, SetRC);
__ bne(&builtin_call, cr0);
// Check if we need to create an implicit receiver.
Label not_create_implicit_receiver;
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r5);
__ JumpIfIsInRange(
r5, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
¬_create_implicit_receiver);
NewImplicitReceiver(masm);
__ bind(¬_create_implicit_receiver);
// Call the function.
__ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
// ----------- S t a t e -------------
// -- r0 constructor result
//
// Stack:
// -- Implicit Receiver
// -- Context
// -- FastConstructMarker
// -- FramePointer
// -----------------------------------
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
masm->pc_offset());
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
// on page 74.
Label use_receiver, do_throw, leave_and_return, check_receiver;
// If the result is undefined, we jump out to using the implicit receiver.
__ JumpIfNotRoot(r3, RootIndex::kUndefinedValue, &check_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
__ LoadU64(
r3, MemOperand(fp, FastConstructFrameConstants::kImplicitReceiverOffset),
r0);
__ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_and_return);
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
__ blr();
__ bind(&check_receiver);
// If the result is a smi, it is *not* an object in the ECMA sense.
__ JumpIfSmi(r3, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(r3, r7, r8, FIRST_JS_RECEIVER_TYPE);
__ bge(&leave_and_return);
__ b(&use_receiver);
__ bind(&builtin_call);
// TODO(victorgomes): Check the possibility to turn this into a tailcall.
__ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
__ LeaveFrame(StackFrame::FAST_CONSTRUCT);
__ blr();
__ bind(&do_throw);
// Restore the context from the frame.
__ LoadU64(cp, MemOperand(fp, FastConstructFrameConstants::kContextOffset),
r0);
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
__ bkpt(0);
__ bind(&stack_overflow);
// Restore the context from the frame.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
RelocInfo::CODE_TARGET);
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
Tagged<Smi> interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
// If the SFI function_data is an InterpreterData, the function will have a
// custom copy of the interpreter entry trampoline for profiling. If so,
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadTaggedField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
__ LoadTaggedField(
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
r0);
__ LoadCodeInstructionStart(r5, r5);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
__ Move(r5, ExternalReference::
address_of_interpreter_entry_trampoline_instruction_start(
masm->isolate()));
__ LoadU64(r5, MemOperand(r5));
__ bind(&trampoline_loaded);
__ addi(r0, r5, Operand(interpreter_entry_return_pc_offset.value()));
__ mtlr(r0);
// Initialize the dispatch table register.
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (v8_flags.debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
__ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(
eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
__ LoadU64(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
if (v8_flags.debug_code) {
Label okay;
__ cmpi(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
__ bge(&okay);
__ bkpt(0);
__ bind(&okay);
}
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadU64(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ LoadU64(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
Label enter_bytecode, function_entry_bytecode;
__ cmpi(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
__ beq(&function_entry_bytecode);
// Load the current bytecode.
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&if_return);
__ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
__ StoreU64(r5,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
__ bind(&function_entry_bytecode);
// If the code deoptimizes during the implicit function entry stack interrupt
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
// not a valid bytecode offset. Detect this case and advance to the first
// actual bytecode.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ b(&enter_bytecode);
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = ip;
if (with_result) {
if (java_script_builtin) {
__ mr(scratch, r3);
} else {
// Overwrite the hole inserted by the deoptimizer with the return value
// from the LAZY deopt point.
__ StoreU64(
r3, MemOperand(
sp, config->num_allocatable_general_registers() *
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
__ Pop(Register::from_code(code));
if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
__ SmiUntag(Register::from_code(code));
}
}
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
constexpr int return_value_offset =
BuiltinContinuationFrameConstants::kFixedSlotCount -
kJSArgcReceiverSlots;
__ addi(r3, r3, Operand(return_value_offset));
__ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r0));
// Recover arguments count.
__ subi(r3, r3, Operand(return_value_offset));
}
__ LoadU64(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
// address from the builtins table.
UseScratchRegisterScope temps(masm);
Register builtin = temps.Acquire();
__ Pop(builtin);
__ addi(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ mtlr(r0);
__ LoadEntryFromBuiltinIndex(builtin, builtin);
__ Jump(builtin);
}
} // namespace
void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
Generate_ContinueToBuiltinHelper(masm, false, false);
}
void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
MacroAssembler* masm) {
Generate_ContinueToBuiltinHelper(masm, false, true);
}
void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
Generate_ContinueToBuiltinHelper(masm, true, false);
}
void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
MacroAssembler* masm) {
Generate_ContinueToBuiltinHelper(masm, true, true);
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
__ LoadU64(r3, MemOperand(sp, 0 * kSystemPointerSize));
__ addi(sp, sp, Operand(1 * kSystemPointerSize));
__ Ret();
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
using D = OnStackReplacementDescriptor;
static_assert(D::kParameterCount == 1);
OnStackReplacement(masm, OsrSourceTier::kInterpreter,
D::MaybeTargetCodeRegister());
}
#if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
using D = OnStackReplacementDescriptor;
static_assert(D::kParameterCount == 1);
__ LoadU64(kContextRegister,
MemOperand(fp, BaselineFrameConstants::kContextOffset), r0);
OnStackReplacement(masm, OsrSourceTier::kBaseline,
D::MaybeTargetCodeRegister());
}
#endif
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
// -- sp[0] : receiver
// -- sp[4] : thisArg
// -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r4, argArray into r5 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
__ LoadRoot(r8, RootIndex::kUndefinedValue);
__ mr(r5, r8);
Label done;
__ LoadU64(r4, MemOperand(sp)); // receiver
__ CmpS64(r3, Operand(JSParameterCount(1)), r0);
__ blt(&done);
__ LoadU64(r8, MemOperand(sp, kSystemPointerSize)); // thisArg
__ CmpS64(r3, Operand(JSParameterCount(2)), r0);
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
// -- r5 : argArray
// -- r4 : receiver
// -- sp[0] : thisArg
// -----------------------------------
// 2. We don't need to check explicitly for callable receiver here,
// since that's the first thing the Call/CallWithArrayLike builtins
// will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
__ JumpIfRoot(r5, RootIndex::kNullValue, &no_arguments);
__ JumpIfRoot(r5, RootIndex::kUndefinedValue, &no_arguments);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
__ bind(&no_arguments);
{
__ mov(r3, Operand(JSParameterCount(0)));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r4);
// 2. Make sure we have at least one argument.
// r3: actual number of arguments
{
Label done;
__ CmpS64(r3, Operand(JSParameterCount(0)), r0);
__ bne(&done);
__ PushRoot(RootIndex::kUndefinedValue);
__ addi(r3, r3, Operand(1));
__ bind(&done);
}
// 3. Adjust the actual number of arguments.
__ subi(r3, r3, Operand(1));
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
// -- sp[0] : receiver
// -- sp[4] : target (if argc >= 1)
// -- sp[8] : thisArgument (if argc >= 2)
// -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into r4 (if present), argumentsList into r5 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(r8, r4);
__ mr(r5, r4);
Label done;
__ CmpS64(r3, Operand(JSParameterCount(1)), r0);
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ CmpS64(r3, Operand(JSParameterCount(2)), r0);
__ blt(&done);
__ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ CmpS64(r3, Operand(JSParameterCount(3)), r0);
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
// -- r5 : argumentsList
// -- r4 : target
// -- sp[0] : thisArgument
// -----------------------------------
// 2. We don't need to check explicitly for callable target here,
// since that's the first thing the Call/CallWithArrayLike builtins
// will do.
// 3. Apply the target to the given argumentsList.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
// -- sp[0] : receiver
// -- sp[4] : target
// -- sp[8] : argumentsList
// -- sp[12] : new.target (optional)
// -----------------------------------
// 1. Load target into r4 (if present), argumentsList into r5 (if present),
// new.target into r6 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(r5, r4);
Label done;
__ mr(r7, r4);
__ CmpS64(r3, Operand(JSParameterCount(1)), r0);
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ mr(r6, r4);
__ CmpS64(r3, Operand(JSParameterCount(2)), r0);
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ CmpS64(r3, Operand(JSParameterCount(3)), r0);
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
// -- r5 : argumentsList
// -- r6 : new.target
// -- r4 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. We don't need to check explicitly for constructor target here,
// since that's the first thing the Construct/ConstructWithArrayLike
// builtins will do.
// 3. We don't need to check explicitly for constructor new.target here,
// since that's the second thing the Construct/ConstructWithArrayLike
// builtins will do.
// 4. Construct the target with the given new.target and argumentsList.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
RelocInfo::CODE_TARGET);
}
namespace {
// Allocate new stack space for |count| arguments and shift all existing
// arguments already on the stack. |pointer_to_new_space_out| points to the
// first free slot on the stack to copy additional arguments to and
// |argc_in_out| is updated to include |count|.
void Generate_AllocateSpaceAndShiftExistingArguments(
MacroAssembler* masm, Register count, Register argc_in_out,
Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
scratch2));
Register old_sp = scratch1;
Register new_space = scratch2;
__ addi(old_sp, sp, Operand(-kSystemPointerSize));
__ ShiftLeftU64(new_space, count, Operand(kSystemPointerSizeLog2));
__ AllocateStackSpace(new_space);
Register dest = pointer_to_new_space_out;
__ addi(dest, sp, Operand(-kSystemPointerSize));
Label loop, skip;
__ mr(r0, argc_in_out);
__ cmpi(r0, Operand::Zero());
__ ble(&skip);
__ mtctr(r0);
__ bind(&loop);
__ LoadU64WithUpdate(r0, MemOperand(old_sp, kSystemPointerSize));
__ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
__ bdnz(&loop);
__ bind(&skip);
// Update total number of arguments, restore dest.
__ add(argc_in_out, argc_in_out, count);
__ addi(dest, dest, Operand(kSystemPointerSize));
}
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r4 : target
// -- r3 : number of parameters on the stack
// -- r5 : arguments list (a FixedArray)
// -- r7 : len (number of elements to push from args)
// -- r6 : new.target (for [[Construct]])
// -----------------------------------
Register scratch = ip;
if (v8_flags.debug_code) {
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
r0);
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
__ beq(&ok);
__ cmpi(scratch, Operand(FIXED_DOUBLE_ARRAY_TYPE));
__ bne(&fail);
__ cmpi(r7, Operand::Zero());
__ beq(&ok);
// Fall through.
__ bind(&fail);
__ Abort(AbortReason::kOperandIsNotAFixedArray);
__ bind(&ok);
}
// Check for stack overflow.
Label stack_overflow;
__ StackOverflowCheck(r7, scratch, &stack_overflow);
// Move the arguments already in the stack,
// including the receiver and the return address.
// r7: Number of arguments to make room for.
// r3: Number of arguments already on the stack.
// r8: Points to first free slot on the stack after arguments were shifted.
Generate_AllocateSpaceAndShiftExistingArguments(masm, r7, r3, r8, ip, r9);
// Push arguments onto the stack (thisArgument is already on the stack).
{
Label loop, no_args, skip;
__ cmpi(r7, Operand::Zero());
__ beq(&no_args);
__ addi(r5, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r7);
__ bind(&loop);
__ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
__ addi(r5, r5, Operand(kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
__ StoreU64(scratch, MemOperand(r8));
__ addi(r8, r8, Operand(kSystemPointerSize));
__ bdnz(&loop);
__ bind(&no_args);
}
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r6 : the new.target (for [[Construct]] calls)
// -- r4 : the target to call (can be any Object)
// -- r5 : start index (to support rest parameters)
// -----------------------------------
Register scratch = r9;
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r6, &new_target_not_constructor);
__ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
r0);
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
__ bind(&new_target_not_constructor);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
__ Push(r6);
__ CallRuntime(Runtime::kThrowNotConstructor);
__ Trap(); // Unreachable.
}
__ bind(&new_target_constructor);
}
Label stack_done, stack_overflow;
__ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ subi(r8, r8, Operand(kJSArgcReceiverSlots));
__ sub(r8, r8, r5, LeaveOE, SetRC);
__ ble(&stack_done, cr0);
{
// ----------- S t a t e -------------
// -- r3 : the number of arguments already in the stack
// -- r4 : the target to call (can be any Object)
// -- r5 : start index (to support rest parameters)
// -- r6 : the new.target (for [[Construct]] calls)
// -- fp : point to the caller stack frame
// -- r8 : number of arguments to copy, i.e. arguments count - start index
// -----------------------------------
// Check for stack overflow.
__ StackOverflowCheck(r8, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
// Point to the first argument to copy (skipping the receiver).
__ addi(r7, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ ShiftLeftU64(scratch, r5, Operand(kSystemPointerSizeLog2));
__ add(r7, r7, scratch);
// Move the arguments already in the stack,
// including the receiver and the return address.
// r8: Number of arguments to make room for.
// r3: Number of arguments already on the stack.
// r5: Points to first free slot on the stack after arguments were shifted.
Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, r3, r5, scratch,
ip);
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
__ bind(&loop);
{
__ subi(r8, r8, Operand(1));
__ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2));
__ LoadU64(r0, MemOperand(r7, scratch));
__ StoreU64(r0, MemOperand(r5, scratch));
__ cmpi(r8, Operand::Zero());
__ bne(&loop);
}
}
}
__ b(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&stack_done);
// Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertCallableFunction(r4);
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ andi(r0, r6,
Operand(SharedFunctionInfo::IsStrictBit::kMask |
SharedFunctionInfo::IsNativeBit::kMask));
__ bne(&done_convert, cr0);
{
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -- r5 : the shared function info.
// -- cp : the function context.
// -----------------------------------
if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(r6);
} else {
Label convert_to_object, convert_receiver;
__ LoadReceiver(r6);
__ JumpIfSmi(r6, &convert_to_object);
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
__ bge(&done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(r6, RootIndex::kUndefinedValue, &convert_global_proxy);
__ JumpIfNotRoot(r6, RootIndex::kNullValue, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
__ LoadGlobalProxy(r6);
}
__ b(&convert_receiver);
}
__ bind(&convert_to_object);
{
// Convert receiver using ToObject.
// TODO(bmeurer): Inline the allocation here to avoid building the frame
// in the fast case? (fall back to AllocateInNewSpace?)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r3);
__ Push(r3, r4);
__ mr(r3, r6);
__ Push(cp);
__ Call(BUILTIN_CODE(masm->isolate(), ToObject),
RelocInfo::CODE_TARGET);
__ Pop(cp);
__ mr(r6, r3);
__ Pop(r3, r4);
__ SmiUntag(r3);
}
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ bind(&convert_receiver);
}
__ StoreReceiver(r6);
}
__ bind(&done_convert);
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -- r5 : the shared function info.
// -- cp : the function context.
// -----------------------------------
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump);
}
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : target (checked to be a JSBoundFunction)
// -- r6 : new.target (only in case of [[Construct]])
// -----------------------------------
// Load [[BoundArguments]] into r5 and length of that into r7.
Label no_bound_arguments;
__ LoadTaggedField(
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
__ beq(&no_bound_arguments, cr0);
{
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : target (checked to be a JSBoundFunction)
// -- r5 : the [[BoundArguments]] (implemented as FixedArray)
// -- r6 : new.target (only in case of [[Construct]])
// -- r7 : the number of [[BoundArguments]]
// -----------------------------------
Register scratch = r9;
// Reserve stack space for the [[BoundArguments]].
{
Label done;
__ ShiftLeftU64(r10, r7, Operand(kSystemPointerSizeLog2));
__ sub(r0, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
{
__ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
__ CmpU64(r0, scratch);
}
__ bgt(&done); // Signed comparison.
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
}
__ bind(&done);
}
// Pop receiver.
__ Pop(r8);
// Push [[BoundArguments]].
{
Label loop, done;
__ add(r3, r3, r7); // Adjust effective number of arguments.
__ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ mtctr(r7);
__ bind(&loop);
__ subi(r7, r7, Operand(1));
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
__ add(scratch, scratch, r5);
__ LoadTaggedField(scratch, MemOperand(scratch), r0);
__ Push(scratch);
__ bdnz(&loop);
__ bind(&done);
}
// Push receiver.
__ Push(r8);
}
__ bind(&no_bound_arguments);
}
} // namespace
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
__ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
r0);
__ StoreReceiver(r6);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the target to call (can be any Object).
// -----------------------------------
Register target = r4;
Register map = r7;
Register instance_type = r8;
DCHECK(!AreAliased(r3, target, map, instance_type));
Label non_callable, class_constructor;
__ JumpIfSmi(target, &non_callable);
__ LoadMap(map, target);
__ CompareInstanceTypeRange(map, instance_type,
FIRST_CALLABLE_JS_FUNCTION_TYPE,
LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, le);
__ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
{
Register flags = r7;
__ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
map = no_reg;
__ TestBit(flags, Map::Bits1::IsCallableBit::kShift, r0);
__ beq(&non_callable, cr0);
}
// Check if target is a proxy and call CallProxy external builtin
__ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
// Check if target is a wrapped function and call CallWrappedFunction external
// builtin
__ cmpi(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
RelocInfo::CODE_TARGET, eq);
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
__ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
__ beq(&class_constructor);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
__ StoreReceiver(target);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
__ Trap(); // Unreachable.
}
// 4. The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(target);
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
__ Trap(); // Unreachable.
}
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the constructor to call (checked to be a JSFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertConstructor(r4);
__ AssertFunction(r4);
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
__ LoadRoot(r5, RootIndex::kUndefinedValue);
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ and_(r7, r7, ip, SetRC);
__ beq(&call_generic_stub, cr0);
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
RelocInfo::CODE_TARGET);
__ bind(&call_generic_stub);
__ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertConstructor(r4);
__ AssertBoundFunction(r4);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
Label skip;
__ CompareTagged(r4, r6);
__ bne(&skip);
__ LoadTaggedField(
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r4 : the constructor to call (can be any Object)
// -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
Register target = r4;
Register map = r7;
Register instance_type = r8;
DCHECK(!AreAliased(r3, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
{
Register flags = r5;
DCHECK(!AreAliased(r3, target, map, instance_type, flags));
__ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
__ TestBit(flags, Map::Bits1::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
}
// Dispatch based on instance type.
__ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
__ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
__ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ bne(&non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
// Called Construct on an exotic Object with a [[Construct]] internal method.
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
__ StoreReceiver(target);
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(target,
Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
struct SaveWasmParamsScope {
explicit SaveWasmParamsScope(MacroAssembler* masm) : masm(masm) {
for (Register gp_param_reg : wasm::kGpParamRegisters) {
gp_regs.set(gp_param_reg);
}
for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
fp_regs.set(fp_param_reg);
}
CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(simd_regs.Count(), arraysize(wasm::kFpParamRegisters));
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs + 1,
gp_regs.Count());
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
fp_regs.Count());
CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedFpParamRegs,
simd_regs.Count());
__ MultiPush(gp_regs);
__ MultiPushF64AndV128(fp_regs, simd_regs, ip, r0);
}
~SaveWasmParamsScope() {
__ MultiPopF64AndV128(fp_regs, simd_regs, ip, r0);
__ MultiPop(gp_regs);
}
RegList gp_regs;
DoubleRegList fp_regs;
// List must match register numbers under kFpParamRegisters.
Simd128RegList simd_regs = {v1, v2, v3, v4, v5, v6, v7, v8};
MacroAssembler* masm;
};
void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
Register vector = r11;
Register scratch = ip;
Label allocate_vector, done;
__ LoadTaggedField(
vector,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset),
scratch);
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
__ AddS64(vector, vector, scratch);
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
scratch);
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ push(kWasmInstanceRegister);
__ push(vector);
__ Ret();
__ bind(&allocate_vector);
// Feedback vector doesn't exist yet. Call the runtime to allocate it.
// We temporarily change the frame type for this, because we need special
// handling by the stack walker in case of GC.
__ mov(scratch,
Operand(StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP)));
__ StoreU64(scratch, MemOperand(sp));
// Save current return address as it will get clobbered during CallRuntime.
__ mflr(scratch);
__ push(scratch);
{
SaveWasmParamsScope save_params(masm); // Will use r0 and ip as scratch.
// Arguments to the runtime function: instance, func_index.
__ push(kWasmInstanceRegister);
__ SmiTag(func_index);
__ push(func_index);
// Allocate a stack slot where the runtime function can spill a pointer
// to the {NativeModule}.
__ push(r11);
__ LoadSmiLiteral(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
__ mr(vector, kReturnRegister0);
// Saved parameters are restored at the end of this block.
}
__ pop(scratch);
__ mtlr(scratch);
__ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
__ StoreU64(scratch, MemOperand(sp));
__ b(&done);
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
{
SaveWasmParamsScope save_params(masm); // Will use r0 and ip as scratch.
// Push the Wasm instance as an explicit argument to the runtime function.
__ push(kWasmInstanceRegister);
// Push the function index as second argument.
__ push(kWasmCompileLazyFuncIndexRegister);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// The runtime function returns the jump table slot offset as a Smi. Use
// that to compute the jump target in r11.
__ SmiUntag(kReturnRegister0);
__ mr(r11, kReturnRegister0);
// Saved parameters are restored at the end of this block.
}
// After the instance register has been restored, we can add the jump table
// start to the jump table offset already stored in r11.
__ LoadU64(ip,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kJumpTableStartOffset),
r0);
__ AddS64(r11, r11, ip);
}
// Finally, jump to the jump table slot for the function.
__ Jump(r11);
}
void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
{
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
// Save all parameter registers. They might hold live values, we restore
// them after the runtime call.
__ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
__ MultiPushF64AndV128(WasmDebugBreakFrameConstants::kPushedFpRegs,
WasmDebugBreakFrameConstants::kPushedSimd128Regs, ip,
r0);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmDebugBreak, 0);
// Restore registers.
__ MultiPopF64AndV128(WasmDebugBreakFrameConstants::kPushedFpRegs,
WasmDebugBreakFrameConstants::kPushedSimd128Regs, ip,
r0);
__ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
}
__ Ret();
}
void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) {
__ Trap();
}
void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { __ Trap(); }
void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// TODO(v8:12191): Implement for this platform.
__ Trap();
}
void Builtins::Generate_WasmResume(MacroAssembler* masm) {
// TODO(v8:12191): Implement for this platform.
__ Trap();
}
void Builtins::Generate_WasmReject(MacroAssembler* masm) {
// TODO(v8:12191): Implement for this platform.
__ Trap();
}
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
}
void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { __ Trap(); }
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
ArgvMode argv_mode, bool builtin_exit_frame,
bool switch_to_central_stack) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r3: number of arguments including receiver
// r4: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
// If argv_mode == ArgvMode::kRegister:
// r5: pointer to the first argument
__ mr(r15, r4);
if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mr(r4, r5);
} else {
// Compute the argv pointer.
__ ShiftLeftU64(r4, r3, Operand(kSystemPointerSizeLog2));
__ add(r4, r4, sp);
__ subi(r4, r4, Operand(kSystemPointerSize));
}
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
// Need at least one extra slot for return address location.
int arg_stack_space = 1;
// Pass buffer for return value on stack if necessary
bool needs_return_buffer =
(result_size == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
if (needs_return_buffer) {
arg_stack_space += result_size;
}
__ EnterExitFrame(arg_stack_space, builtin_exit_frame
? StackFrame::BUILTIN_EXIT
: StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mr(r14, r3);
// r3, r14: number of arguments including receiver (C callee-saved)
// r4: pointer to the first argument
// r15: pointer to builtin function (C callee-saved)
// Result returned in registers or stack, depending on result size and ABI.
Register isolate_reg = r5;
if (needs_return_buffer) {
// The return value is a non-scalar value.
// Use frame storage reserved by calling function to pass return
// buffer as implicit first argument.
__ mr(r5, r4);
__ mr(r4, r3);
__ addi(r3, sp,
Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
isolate_reg = r6;
}
// Call C built-in.
__ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
Register target = r15;
__ StoreReturnAddressAndCall(target);
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
__ LoadU64(r4, MemOperand(r3, kSystemPointerSize));
__ LoadU64(r3, MemOperand(r3));
}
// Check result for exception sentinel.
Label exception_returned;
__ CompareRoot(r3, RootIndex::kException);
__ beq(&exception_returned);
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (v8_flags.debug_code) {
Label okay;
ExternalReference pending_exception_address = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ Move(r6, pending_exception_address);
__ LoadU64(r6, MemOperand(r6));
__ CompareRoot(r6, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
__ stop();
__ bind(&okay);
}
// Exit C frame and return.
// r3:r4: result
// sp: stack pointer
// fp: frame pointer
Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// r14: still holds argc (callee-saved).
: r14;
__ LeaveExitFrame(argc, false);
__ blr();
// Handling of exception.
__ bind(&exception_returned);
ExternalReference pending_handler_context_address = ExternalReference::Create(
IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
ExternalReference pending_handler_entrypoint_address =
ExternalReference::Create(
IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
ExternalReference pending_handler_constant_pool_address =
ExternalReference::Create(
IsolateAddressId::kPendingHandlerConstantPoolAddress,
masm->isolate());
ExternalReference pending_handler_fp_address = ExternalReference::Create(
IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
ExternalReference pending_handler_sp_address = ExternalReference::Create(
IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
// Ask the runtime for help to determine the handler. This will set r3 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler =
ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r3);
__ li(r3, Operand::Zero());
__ li(r4, Operand::Zero());
__ Move(r5, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(find_handler, 3);
}
// Retrieve the handler context, SP and FP.
__ Move(cp, pending_handler_context_address);
__ LoadU64(cp, MemOperand(cp));
__ Move(sp, pending_handler_sp_address);
__ LoadU64(sp, MemOperand(sp));
__ Move(fp, pending_handler_fp_address);
__ LoadU64(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
Label skip;
__ cmpi(cp, Operand::Zero());
__ beq(&skip);
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
__ Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
__ mov(r0, Operand::Zero());
__ StoreU64(r0, MemOperand(ip));
}
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ Move(ip, pending_handler_entrypoint_address);
__ LoadU64(ip, MemOperand(ip));
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
__ Move(kConstantPoolRegister, pending_handler_constant_pool_address);
__ LoadU64(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
}
__ Jump(ip);
}
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register result_reg = r3;
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
// Immediate values for this stub fit in instructions, so it's safe to use ip.
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
Register scratch_high =
GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
DoubleRegister double_scratch = kScratchDoubleReg;
__ Push(result_reg, scratch);
// Account for saved regs.
int argument_offset = 2 * kSystemPointerSize;
// Load double input.
__ lfd(double_scratch, MemOperand(sp, argument_offset));
// Do fast-path convert from double to int.
__ ConvertDoubleToInt64(double_scratch,
#if !V8_TARGET_ARCH_PPC64
scratch,
#endif
result_reg, d0);
// Test for overflow
#if V8_TARGET_ARCH_PPC64
__ TestIfInt32(result_reg, r0);
#else
__ TestIfInt32(scratch, result_reg, r0);
#endif
__ beq(&fastpath_done);
__ Push(scratch_high, scratch_low);
// Account for saved regs.
argument_offset += 2 * kSystemPointerSize;
__ lwz(scratch_high,
MemOperand(sp, argument_offset + Register::kExponentOffset));
__ lwz(scratch_low,
MemOperand(sp, argument_offset + Register::kMantissaOffset));
__ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
// Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
static_assert(HeapNumber::kExponentBias + 1 == 1024);
__ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// Compare exponent with 84 (compare exponent - 1 with 83).
__ cmpi(scratch, Operand(83));
__ bge(&out_of_range);
// If we reach this code, 31 <= exponent <= 83.
// So, we don't have to handle cases where 0 <= exponent <= 20 for
// which we would need to shift right the high part of the mantissa.
// Scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
__ subfic(scratch, scratch, Operand(51));
__ cmpi(scratch, Operand::Zero());
__ ble(&only_low);
// 21 <= exponent <= 51, shift scratch_low and scratch_high
// to generate the result.
__ srw(scratch_low, scratch_low, scratch);
// Scratch contains: 52 - exponent.
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
__ subfic(scratch, scratch, Operand(32));
__ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
// Set the implicit 1 before the mantissa part in scratch_high.
static_assert(HeapNumber::kMantissaBitsInTopWord >= 16);
__ oris(result_reg, result_reg,
Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
__ ShiftLeftU32(r0, result_reg, scratch);
__ orx(result_reg, scratch_low, r0);
__ b(&negate);
__ bind(&out_of_range);
__ mov(result_reg, Operand::Zero());
__ b(&done);
__ bind(&only_low);
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
__ neg(scratch, scratch);
__ ShiftLeftU32(result_reg, scratch_low, scratch);
__ bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
// Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
// New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ srawi(r0, scratch_high, 31);
#if V8_TARGET_ARCH_PPC64
__ srdi(r0, r0, Operand(32));
#endif
__ xor_(result_reg, result_reg, r0);
__ srwi(r0, scratch_high, Operand(31));
__ add(result_reg, result_reg, r0);
__ bind(&done);
__ Pop(scratch_high, scratch_low);
// Account for saved regs.
argument_offset -= 2 * kSystemPointerSize;
__ bind(&fastpath_done);
__ StoreU64(result_reg, MemOperand(sp, argument_offset));
__ Pop(result_reg, scratch);
__ Ret();
}
void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
CallApiCallbackMode mode) {
// ----------- S t a t e -------------
// CallApiCallbackMode::kGeneric mode:
// -- r5 : arguments count (not including the receiver)
// -- r6 : call handler info
// -- r3 : holder
// CallApiCallbackMode::kOptimizedNoProfiling/kOptimized modes:
// -- r4 : api function address
// -- r5 : arguments count (not including the receiver)
// -- r6 : call data
// -- r3 : holder
// Both modes:
// -- cp : context
// -- sp[0] : receiver
// -- sp[8] : first argument
// -- ...
// -- sp[(argc) * 8] : last argument
// -----------------------------------
Register function_callback_info_arg = arg_reg_1;
Register api_function_address = no_reg;
Register argc = no_reg;
Register call_data = no_reg;
Register callback = no_reg;
Register holder = no_reg;
Register scratch = r7;
Register scratch2 = r8;
switch (mode) {
case CallApiCallbackMode::kGeneric:
api_function_address = r4;
argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister();
callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister();
holder = CallApiCallbackGenericDescriptor::HolderRegister();
break;
case CallApiCallbackMode::kOptimizedNoProfiling:
case CallApiCallbackMode::kOptimized:
api_function_address =
CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister();
argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister();
call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister();
holder = CallApiCallbackOptimizedDescriptor::HolderRegister();
break;
}
DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback,
scratch, scratch2));
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kUnusedIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
// sp[1 * kSystemPointerSize]: kHolder <= FCA::implicit_args_
// sp[2 * kSystemPointerSize]: kIsolate
// sp[3 * kSystemPointerSize]: Smi::zero(padding, unused)
// sp[4 * kSystemPointerSize]: undefined (kReturnValue)
// sp[5 * kSystemPointerSize]: kData
// sp[6 * kSystemPointerSize]: undefined (kNewTarget)
// Existing state:
// sp[7 * kSystemPointerSize]: <= FCA:::values_
// Reserve space on the stack.
__ subi(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
// kHolder.
__ StoreU64(holder, MemOperand(sp, FCA::kHolderIndex * kSystemPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ StoreU64(scratch, MemOperand(sp, FCA::kIsolateIndex * kSystemPointerSize));
// kUnused
__ Move(scratch, Smi::zero());
__ StoreU64(scratch, MemOperand(sp, FCA::kUnusedIndex * kSystemPointerSize));
// kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ StoreU64(scratch,
MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize));
// kData.
switch (mode) {
case CallApiCallbackMode::kGeneric:
__ LoadTaggedField(
scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset),
r0);
__ StoreU64(scratch2,
MemOperand(sp, FCA::kDataIndex * kSystemPointerSize));
break;
case CallApiCallbackMode::kOptimizedNoProfiling:
case CallApiCallbackMode::kOptimized:
__ StoreU64(call_data,
MemOperand(sp, FCA::kDataIndex * kSystemPointerSize));
break;
}
// kNewTarget.
__ StoreU64(scratch,
MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a {holder} register.
// We use it below to set up the FunctionCallbackInfo object.
__ mr(holder, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
// PPC LINUX ABI:
//
// Create 4 extra slots on stack:
// [0] space for DirectCEntryStub's LR save
// [1-3] FunctionCallbackInfo
// [4] number of bytes to drop from the stack after returning
static constexpr int kSlotsToDropOnStackSize = 1 * kSystemPointerSize;
static constexpr int kApiStackSpace = 5;
static_assert(FCA::kImplicitArgsOffset == 0);
static_assert(FCA::kValuesOffset == 1 * kSystemPointerSize);
static_assert(FCA::kLengthOffset == 2 * kSystemPointerSize);
const int exit_frame_params_count =
mode == CallApiCallbackMode::kGeneric
? ApiCallbackExitFrameConstants::kAdditionalParametersCount
: 0;
FrameScope frame_scope(masm, StackFrame::MANUAL);
if (mode == CallApiCallbackMode::kGeneric) {
ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments");
__ AllocateStackSpace(exit_frame_params_count * kSystemPointerSize);
// No padding is required.
static_assert(ApiCallbackExitFrameConstants::kOptionalPaddingSize == 0);
// Context parameter.
static_assert(ApiCallbackExitFrameConstants::kContextOffset ==
4 * kSystemPointerSize);
__ StoreU64(kContextRegister, MemOperand(sp, 2 * kSystemPointerSize));
// Argc parameter as a Smi.
static_assert(ApiCallbackExitFrameConstants::kArgcOffset ==
3 * kSystemPointerSize);
__ SmiTag(scratch, argc);
__ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// Target parameter.
static_assert(ApiCallbackExitFrameConstants::kTargetOffset ==
2 * kSystemPointerSize);
__ LoadTaggedField(
scratch,
FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset), r0);
__ StoreU64(scratch, MemOperand(sp, 0 * kSystemPointerSize));
__ LoadU64(api_function_address,
FieldMemOperand(callback,
CallHandlerInfo::kMaybeRedirectedCallbackOffset),
r0);
__ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT);
} else {
__ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
}
{
ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo");
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
__ StoreU64(holder, ExitFrameStackSlotOperand(FCA::kImplicitArgsOffset));
// FunctionCallbackInfo::values_ (points at the first varargs argument
// passed on the stack).
__ addi(holder, holder,
Operand(FCA::kArgsLengthWithReceiver * kSystemPointerSize));
__ StoreU64(holder, ExitFrameStackSlotOperand(FCA::kValuesOffset));
// FunctionCallbackInfo::length_.
__ stw(argc, ExitFrameStackSlotOperand(FCA::kLengthOffset));
}
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
MemOperand stack_space_operand =
ExitFrameStackSlotOperand(FCA::kLengthOffset + kSlotsToDropOnStackSize);
__ mov(scratch,
Operand((FCA::kArgsLengthWithReceiver + exit_frame_params_count) *
kSystemPointerSize));
__ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
__ StoreU64(scratch, stack_space_operand);
__ RecordComment("v8::FunctionCallback's argument");
__ addi(function_callback_info_arg, sp,
Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
DCHECK(!AreAliased(api_function_address, function_callback_info_arg));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(mode);
// Pass api function address to thunk wrapper in case profiler or side-effect
// checking is enabled.
Register thunk_arg = api_function_address;
MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
FCA::kReturnValueIndex + exit_frame_params_count);
static constexpr int kUseStackSpaceOperand = 0;
const bool with_profiling =
mode != CallApiCallbackMode::kOptimizedNoProfiling;
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
thunk_ref, thunk_arg, kUseStackSpaceOperand,
&stack_space_operand, return_value_operand);
}
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r4 : receiver
// -- r6 : accessor info
// -- r3 : holder
// -----------------------------------
int arg0Slot = 0;
int accessorInfoSlot = 0;
int apiStackSpace = 0;
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
using PCA = PropertyCallbackArguments;
static_assert(PCA::kShouldThrowOnErrorIndex == 0);
static_assert(PCA::kHolderIndex == 1);
static_assert(PCA::kIsolateIndex == 2);
static_assert(PCA::kUnusedIndex == 3);
static_assert(PCA::kReturnValueIndex == 4);
static_assert(PCA::kDataIndex == 5);
static_assert(PCA::kThisIndex == 6);
static_assert(PCA::kArgsLength == 7);
// Set up PropertyCallbackInfo's args_ on the stack as follows:
// Target state:
// sp[0 * kSystemPointerSize]: name
// sp[1 * kSystemPointerSize]: kShouldThrowOnErrorIndex <= PCI:args_
// sp[2 * kSystemPointerSize]: kHolderIndex
// sp[3 * kSystemPointerSize]: kIsolateIndex
// sp[4 * kSystemPointerSize]: kUnusedIndex
// sp[5 * kSystemPointerSize]: kReturnValueIndex
// sp[6 * kSystemPointerSize]: kDataIndex
// sp[7 * kSystemPointerSize]: kThisIndex / receiver
Register name_arg = arg_reg_1;
Register property_callback_info_arg = arg_reg_2;
Register api_function_address = r5;
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
Register callback = ApiGetterDescriptor::CallbackRegister();
Register scratch = r7;
Register smi_zero = r8;
DCHECK(!AreAliased(receiver, holder, callback, scratch, smi_zero));
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
__ Push(receiver, scratch); // kThisIndex, kDataIndex
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Move(smi_zero, Smi::zero());
__ Push(scratch, smi_zero); // kReturnValueIndex, kUnusedIndex
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder); // kIsolateIndex, kHolderIndex
__ LoadTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
__ Push(smi_zero, scratch); // should_throw_on_error -> false, name
__ RecordComment(
"Load address of v8::PropertyAccessorInfo::args_ array and name handle.");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
// name_arg = Handle<Name>(&name), name value was pushed to GC-ed stack space.
__ mr(name_arg, sp);
// property_callback_info_arg = v8::PCI::args_ (= &ShouldThrow)
__ addi(property_callback_info_arg, name_arg,
Operand(1 * kSystemPointerSize));
// If ABI passes Handles (pointer-sized struct) in a register:
//
// Create 2 extra slots on stack:
// [0] space for DirectCEntryStub's LR save
// [1] AccessorInfo&
//
// Otherwise:
//
// Create 3 extra slots on stack:
// [0] space for DirectCEntryStub's LR save
// [1] copy of Handle (first arg)
// [2] AccessorInfo&
if (ABI_PASSES_HANDLES_IN_REGS) {
accessorInfoSlot = kStackFrameExtraParamSlot + 1;
apiStackSpace = 2;
} else {
arg0Slot = kStackFrameExtraParamSlot + 1;
accessorInfoSlot = arg0Slot + 1;
apiStackSpace = 3;
}
constexpr int kNameOnStackSize = 1;
constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameOnStackSize;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(apiStackSpace, StackFrame::EXIT);
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
__ StoreU64(r3, MemOperand(sp, arg0Slot * kSystemPointerSize));
__ addi(r3, sp, Operand(arg0Slot * kSystemPointerSize));
}
__ RecordComment("Create v8::PropertyCallbackInfo object on the stack.");
// Initialize v8::PropertyCallbackInfo::args_ field.
__ StoreU64(property_callback_info_arg,
MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
// property_callback_info_arg = v8::PropertyCallbackInfo&
__ addi(property_callback_info_arg, sp,
Operand(accessorInfoSlot * kSystemPointerSize));
__ RecordComment("Load api_function_address");
__ LoadU64(
api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
r0);
DCHECK(
!AreAliased(api_function_address, property_callback_info_arg, name_arg));
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
// Pass AccessorInfo to thunk wrapper in case profiler or side-effect
// checking is enabled.
Register thunk_arg = callback;
MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
PCA::kReturnValueIndex + kNameOnStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
const bool with_profiling = true;
CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
thunk_ref, thunk_arg, kStackUnwindSpace,
kUseStackSpaceConstant, return_value_operand);
}
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
Register temp2 = temps.Acquire();
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
__ mflr(r0);
__ StoreU64(r0,
MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux use a function descriptor;
__ LoadU64(ToRegister(ABI_TOC_REGISTER),
MemOperand(temp2, kSystemPointerSize));
__ LoadU64(temp2, MemOperand(temp2, 0)); // Instruction address
}
__ Call(temp2); // Call the C++ function.
__ LoadU64(r0,
MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
namespace {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ stfd(dreg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
__ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs.bits() & (1 << i)) != 0) {
__ StoreU64(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ Move(scratch, ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate));
__ StoreU64(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r7.
__ mflr(r5);
__ addi(r6, sp, Operand(kSavedRegistersAreaSize));
__ sub(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r3 to r8.
__ PrepareCallCFunction(5, r8);
__ li(r3, Operand::Zero());
Label context_check;
__ LoadU64(r4,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r4, &context_check);
__ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta already loaded.
__ Move(r7, ExternalReference::isolate_address(isolate));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register r3 and get the input
// frame descriptor pointer to r4 (deoptimizer->input_);
__ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadU64(r5, MemOperand(sp, i * kSystemPointerSize));
__ StoreU64(r5, MemOperand(r4, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r7;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ li(zero, Operand(0));
__ stb(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ addi(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
__ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ add(r5, r5, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r7);
__ StoreU64(r7, MemOperand(r6, 0));
__ addi(r6, r6, Operand(kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpS64(r5, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r3); // Preserve deoptimizer object across call.
// r3: deoptimizer object; r4: scratch.
__ PrepareCallCFunction(1, r4);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
__ LoadU64(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r7 = current "FrameDescription** output_",
// r4 = one past the last FrameDescription**.
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
__ LoadU64(r7,
MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
__ ShiftLeftU64(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r5 = current FrameDescription*, r6 = loop index.
__ LoadU64(r5, MemOperand(r7, 0)); // output_[ix]
__ LoadU64(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header);
__ bind(&inner_push_loop);
__ addi(r6, r6, Operand(-sizeof(intptr_t)));
__ add(r9, r5, r6);
__ LoadU64(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
__ push(r9);
__ bind(&inner_loop_header);
__ cmpi(r6, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ addi(r7, r7, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpS64(r7, r4);
__ blt(&outer_push_loop);
__ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ lfd(dreg, MemOperand(r4, src_offset));
}
// Push pc, and continuation from the last output frame.
__ LoadU64(r9, MemOperand(r5, FrameDescription::pc_offset()));
__ push(r9);
__ LoadU64(r9, MemOperand(r5, FrameDescription::continuation_offset()));
__ push(r9);
// Restore the registers from the last output frame.
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
DCHECK(!(restored_regs.has(scratch)));
__ mr(scratch, r5);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs.bits() & (1 << i)) != 0) {
__ LoadU64(ToRegister(i), MemOperand(scratch, offset));
}
}
}
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r7;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ li(one, Operand(1));
__ stb(one, MemOperand(is_iterable));
}
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
__ Jump(scratch);
}
__ stop();
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
Generate_BaselineOrInterpreterEntry(masm, false);
}
void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2800112.
Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) {
// Frame is being dropped:
// - Look up current function on the frame.
// - Leave the frame.
// - Restart the frame by calling the function.
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ LeaveFrame(StackFrame::INTERPRETED);
// The arguments are already in the stack (including any necessary padding),
// we should not try to massage the arguments again.
__ mov(r5, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(r4, r5, r3, InvokeType::kJump);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC64