%PDF- %PDF-
| Direktori : /home2/vacivi36/vittasync.vacivitta.com.br/vittasync/node/deps/v8/src/maglev/x64/ |
| Current File : //home2/vacivi36/vittasync.vacivitta.com.br/vittasync/node/deps/v8/src/maglev/x64/maglev-ir-x64.cc |
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/logging.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/x64/assembler-x64-inl.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/codegen/x64/register-x64.h"
#include "src/maglev/maglev-assembler-inl.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir-inl.h"
#include "src/maglev/maglev-ir.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-function.h"
namespace v8 {
namespace internal {
namespace maglev {
#define __ masm->
// ---
// Nodes
// ---
void FoldedAllocation::SetValueLocationConstraints() {
UseRegister(raw_allocation());
DefineAsRegister(this);
}
void FoldedAllocation::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
__ leaq(ToRegister(result()),
Operand(ToRegister(raw_allocation()), offset()));
}
void CheckJSTypedArrayBounds::SetValueLocationConstraints() {
UseRegister(receiver_input());
if (ElementsKindSize(elements_kind_) == 1) {
UseRegister(index_input());
} else {
UseAndClobberRegister(index_input());
}
}
void CheckJSTypedArrayBounds::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(receiver_input());
Register index = ToRegister(index_input());
Register byte_length = kScratchRegister;
if (v8_flags.debug_code) {
__ AssertNotSmi(object);
__ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
}
__ LoadBoundedSizeFromObject(byte_length, object,
JSTypedArray::kRawByteLengthOffset);
int element_size = ElementsKindSize(elements_kind_);
if (element_size > 1) {
DCHECK(element_size == 2 || element_size == 4 || element_size == 8);
__ shlq(index, Immediate(base::bits::CountTrailingZeros(element_size)));
}
__ cmpq(index, byte_length);
// We use {above_equal} which does an unsigned comparison to handle negative
// indices as well.
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
}
void CheckJSDataViewBounds::SetValueLocationConstraints() {
UseRegister(receiver_input());
UseRegister(index_input());
}
void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(receiver_input());
Register index = ToRegister(index_input());
Register byte_length = kScratchRegister;
if (v8_flags.debug_code) {
__ AssertNotSmi(object);
__ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
}
// Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
__ LoadBoundedSizeFromObject(byte_length, object,
JSDataView::kRawByteLengthOffset);
int element_size = ExternalArrayElementSize(element_type_);
if (element_size > 1) {
__ subq(byte_length, Immediate(element_size - 1));
__ EmitEagerDeoptIf(negative, DeoptimizeReason::kOutOfBounds, this);
}
__ cmpl(index, byte_length);
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
}
int CheckedObjectToIndex::MaxCallStackArgs() const {
return MaglevAssembler::ArgumentStackSlotsForCFunctionCall(1);
}
int BuiltinStringFromCharCode::MaxCallStackArgs() const {
return AllocateDescriptor::GetStackParameterCount();
}
void BuiltinStringFromCharCode::SetValueLocationConstraints() {
if (code_input().node()->Is<Int32Constant>()) {
UseAny(code_input());
} else {
UseAndClobberRegister(code_input());
set_temporaries_needed(1);
}
DefineAsRegister(this);
}
void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register result_string = ToRegister(result());
if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
int32_t char_code = constant->value();
if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
__ LoadSingleCharacterString(result_string, char_code);
} else {
__ AllocateTwoByteString(register_snapshot(), result_string, 1);
__ movw(FieldOperand(result_string, SeqTwoByteString::kHeaderSize),
Immediate(char_code & 0xFFFF));
}
} else {
MaglevAssembler::ScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
Register char_code = ToRegister(code_input());
__ StringFromCharCode(register_snapshot(), nullptr, result_string,
char_code, scratch);
}
}
int BuiltinStringPrototypeCharCodeOrCodePointAt::MaxCallStackArgs() const {
DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2);
return 2;
}
void BuiltinStringPrototypeCharCodeOrCodePointAt::
SetValueLocationConstraints() {
UseAndClobberRegister(string_input());
UseAndClobberRegister(index_input());
DefineAsRegister(this);
set_temporaries_needed(1);
}
void BuiltinStringPrototypeCharCodeOrCodePointAt::GenerateCode(
MaglevAssembler* masm, const ProcessingState& state) {
MaglevAssembler::ScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
Register string = ToRegister(string_input());
Register index = ToRegister(index_input());
ZoneLabelRef done(masm);
RegisterSnapshot save_registers = register_snapshot();
__ StringCharCodeOrCodePointAt(mode_, save_registers, ToRegister(result()),
string, index, scratch, *done);
__ bind(*done);
}
void Int32AddWithOverflow::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ addl(left, right);
// None of the mutated input registers should be a register input into the
// eager deopt info.
DCHECK_REGLIST_EMPTY(RegList{left} &
GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
void Int32SubtractWithOverflow::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ subl(left, right);
// None of the mutated input registers should be a register input into the
// eager deopt info.
DCHECK_REGLIST_EMPTY(RegList{left} &
GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
set_temporaries_needed(1);
}
void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register result = ToRegister(this->result());
Register right = ToRegister(right_input());
DCHECK_EQ(result, ToRegister(left_input()));
MaglevAssembler::ScratchRegisterScope temps(masm);
Register saved_left = temps.Acquire();
__ movl(saved_left, result);
// TODO(leszeks): peephole optimise multiplication by a constant.
__ imull(result, right);
// None of the mutated input registers should be a register input into the
// eager deopt info.
DCHECK_REGLIST_EMPTY(RegList{saved_left, result} &
GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
// If the result is zero, check if either lhs or rhs is negative.
Label end;
__ cmpl(result, Immediate(0));
__ j(not_zero, &end);
{
__ orl(saved_left, right);
__ cmpl(saved_left, Immediate(0));
// If one of them is negative, we must have a -0 result, which is non-int32,
// so deopt.
// TODO(leszeks): Consider splitting these deopts to have distinct deopt
// reasons. Otherwise, the reason has to match the above.
__ EmitEagerDeoptIf(less, DeoptimizeReason::kOverflow, this);
}
__ bind(&end);
}
void Int32ModulusWithOverflow::SetValueLocationConstraints() {
UseRegister(left_input());
UseAndClobberRegister(right_input());
DefineAsFixed(this, rdx);
// rax,rdx are clobbered by div.
RequireSpecificTemporary(rax);
RequireSpecificTemporary(rdx);
}
void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// If AreAliased(lhs, rhs):
// deopt if lhs < 0 // Minus zero.
// 0
//
// Otherwise, use the same algorithm as in EffectControlLinearizer:
// if rhs <= 0 then
// rhs = -rhs
// deopt if rhs == 0
// if lhs < 0 then
// let lhs_abs = -lhs in
// let res = lhs_abs % rhs in
// deopt if res == 0
// -res
// else
// let msk = rhs - 1 in
// if rhs & msk == 0 then
// lhs & msk
// else
// lhs % rhs
Register lhs = ToRegister(left_input());
Register rhs = ToRegister(right_input());
static constexpr DeoptimizeReason deopt_reason =
DeoptimizeReason::kDivisionByZero;
if (lhs == rhs) {
// For the modulus algorithm described above, lhs and rhs must not alias
// each other.
__ testl(lhs, lhs);
// TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
// allows one deopt reason per IR.
__ EmitEagerDeoptIf(negative, deopt_reason, this);
__ Move(ToRegister(result()), 0);
return;
}
DCHECK(!AreAliased(lhs, rhs, rax, rdx));
ZoneLabelRef done(masm);
ZoneLabelRef rhs_checked(masm);
__ cmpl(rhs, Immediate(0));
__ JumpToDeferredIf(
less_equal,
[](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
Int32ModulusWithOverflow* node) {
__ negl(rhs);
__ j(not_zero, *rhs_checked);
__ EmitEagerDeopt(node, deopt_reason);
},
rhs_checked, rhs, this);
__ bind(*rhs_checked);
__ cmpl(lhs, Immediate(0));
__ JumpToDeferredIf(
less,
[](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
Int32ModulusWithOverflow* node) {
// `divl(divisor)` divides rdx:rax by the divisor and stores the
// quotient in rax, the remainder in rdx.
__ movl(rax, lhs);
__ negl(rax);
__ xorl(rdx, rdx);
__ divl(rhs);
__ negl(rdx);
__ j(not_zero, *done);
// TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
// allows one deopt reason per IR.
__ EmitEagerDeopt(node, deopt_reason);
},
done, lhs, rhs, this);
Label rhs_not_power_of_2;
Register mask = rax;
__ leal(mask, Operand(rhs, -1));
__ testl(rhs, mask);
__ j(not_zero, &rhs_not_power_of_2, Label::kNear);
// {rhs} is power of 2.
__ andl(mask, lhs);
__ movl(ToRegister(result()), mask);
__ jmp(*done, Label::kNear);
__ bind(&rhs_not_power_of_2);
// `divl(divisor)` divides rdx:rax by the divisor and stores the
// quotient in rax, the remainder in rdx.
__ movl(rax, lhs);
__ xorl(rdx, rdx);
__ divl(rhs);
// Result is implicitly written to rdx.
DCHECK_EQ(ToRegister(result()), rdx);
__ bind(*done);
}
void Int32DivideWithOverflow::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineAsFixed(this, rax);
// rax,rdx are clobbered by idiv.
RequireSpecificTemporary(rax);
RequireSpecificTemporary(rdx);
}
void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ movl(rax, left);
// TODO(leszeks): peephole optimise division by a constant.
// Sign extend eax into edx.
__ cdq();
// Pre-check for overflow, since idiv throws a division exception on overflow
// rather than setting the overflow flag. Logic copied from
// effect-control-linearizer.cc
// Check if {right} is positive (and not zero).
__ cmpl(right, Immediate(0));
ZoneLabelRef done(masm);
__ JumpToDeferredIf(
less_equal,
[](MaglevAssembler* masm, ZoneLabelRef done, Register right,
Int32DivideWithOverflow* node) {
// {right} is negative or zero.
// Check if {right} is zero.
// We've already done the compare and flags won't be cleared yet.
// TODO(leszeks): Using kNotInt32 here, but kDivisionByZero would be
// better. Right now all eager deopts in a node have to be the same --
// we should allow a node to emit multiple eager deopts with different
// reasons.
__ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
// Check if {left} is zero, as that would produce minus zero. Left is in
// rax already.
__ cmpl(rax, Immediate(0));
// TODO(leszeks): Better DeoptimizeReason = kMinusZero.
__ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
// Check if {left} is kMinInt and {right} is -1, in which case we'd have
// to return -kMinInt, which is not representable as Int32.
__ cmpl(rax, Immediate(kMinInt));
__ j(not_equal, *done);
__ cmpl(right, Immediate(-1));
__ j(not_equal, *done);
// TODO(leszeks): Better DeoptimizeReason = kOverflow, but
// eager_deopt_info is already configured as kNotInt32.
__ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
},
done, right, this);
__ bind(*done);
// Perform the actual integer division.
__ idivl(right);
// Check that the remainder is zero.
__ cmpl(rdx, Immediate(0));
// None of the mutated input registers should be a register input into the
// eager deopt info.
DCHECK_REGLIST_EMPTY(RegList{rax, rdx} &
GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
DCHECK_EQ(ToRegister(result()), rax);
}
void Int32BitwiseAnd::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Int32BitwiseAnd::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ andl(left, right);
}
void Int32BitwiseOr::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Int32BitwiseOr::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ orl(left, right);
}
void Int32BitwiseXor::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Int32BitwiseXor::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ xorl(left, right);
}
void Int32ShiftLeft::SetValueLocationConstraints() {
UseRegister(left_input());
// Use the "shift by cl" variant of shl.
// TODO(leszeks): peephole optimise shifts by a constant.
UseFixed(right_input(), rcx);
DefineSameAsFirst(this);
}
void Int32ShiftLeft::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ shll_cl(left);
}
void Int32ShiftRight::SetValueLocationConstraints() {
UseRegister(left_input());
// Use the "shift by cl" variant of sar.
// TODO(leszeks): peephole optimise shifts by a constant.
UseFixed(right_input(), rcx);
DefineSameAsFirst(this);
}
void Int32ShiftRight::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ sarl_cl(left);
}
void Int32ShiftRightLogical::SetValueLocationConstraints() {
UseRegister(left_input());
// Use the "shift by cl" variant of shr.
// TODO(leszeks): peephole optimise shifts by a constant.
UseFixed(right_input(), rcx);
DefineSameAsFirst(this);
}
void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ shrl_cl(left);
}
void Int32IncrementWithOverflow::SetValueLocationConstraints() {
UseRegister(value_input());
DefineSameAsFirst(this);
}
void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register value = ToRegister(value_input());
__ incl(value);
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
void Int32DecrementWithOverflow::SetValueLocationConstraints() {
UseRegister(value_input());
DefineSameAsFirst(this);
}
void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register value = ToRegister(value_input());
__ decl(value);
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
void Int32NegateWithOverflow::SetValueLocationConstraints() {
UseRegister(value_input());
DefineSameAsFirst(this);
}
void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register value = ToRegister(value_input());
// Deopt when the result would be -0.
__ testl(value, value);
__ EmitEagerDeoptIf(zero, DeoptimizeReason::kOverflow, this);
__ negl(value);
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
void Int32BitwiseNot::SetValueLocationConstraints() {
UseRegister(value_input());
DefineSameAsFirst(this);
}
void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register value = ToRegister(value_input());
__ notl(value);
}
void Float64Add::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Float64Add::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Addsd(left, right);
}
void Float64Subtract::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Float64Subtract::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Subsd(left, right);
}
void Float64Multiply::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Float64Multiply::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Mulsd(left, right);
}
void Float64Divide::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(this);
}
void Float64Divide::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Divsd(left, right);
}
void Float64Modulus::SetValueLocationConstraints() {
UseRegister(left_input());
UseRegister(right_input());
RequireSpecificTemporary(rax);
DefineAsRegister(this);
}
void Float64Modulus::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// Approach copied from code-generator-x64.cc
// Allocate space to use fld to move the value to the FPU stack.
__ AllocateStackSpace(kDoubleSize);
Operand scratch_stack_space = Operand(rsp, 0);
__ Movsd(scratch_stack_space, ToDoubleRegister(right_input()));
__ fld_d(scratch_stack_space);
__ Movsd(scratch_stack_space, ToDoubleRegister(left_input()));
__ fld_d(scratch_stack_space);
// Loop while fprem isn't done.
Label mod_loop;
__ bind(&mod_loop);
// This instructions traps on all kinds inputs, but we are assuming the
// floating point control word is set to ignore them all.
__ fprem();
// The following 2 instruction implicitly use rax.
__ fnstsw_ax();
if (CpuFeatures::IsSupported(SAHF)) {
CpuFeatureScope sahf_scope(masm, SAHF);
__ sahf();
} else {
__ shrl(rax, Immediate(8));
__ andl(rax, Immediate(0xFF));
__ pushq(rax);
__ popfq();
}
__ j(parity_even, &mod_loop);
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(scratch_stack_space);
__ Movsd(ToDoubleRegister(result()), scratch_stack_space);
__ addq(rsp, Immediate(kDoubleSize));
}
void Float64Negate::SetValueLocationConstraints() {
UseRegister(input());
DefineSameAsFirst(this);
}
void Float64Negate::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister value = ToDoubleRegister(input());
__ Negpd(value, value, kScratchRegister);
}
void Float64Round::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister in = ToDoubleRegister(input());
DoubleRegister out = ToDoubleRegister(result());
if (kind_ == Kind::kNearest) {
MaglevAssembler::ScratchRegisterScope temps(masm);
DoubleRegister temp = temps.AcquireDouble();
__ Move(temp, in);
__ Roundsd(out, in, kRoundToNearest);
// RoundToNearest rounds to even on tie, while JS expects it to round
// towards +Infinity. Fix the difference by checking if we rounded down by
// exactly 0.5, and if so, round to the other side.
__ Subsd(temp, out);
__ Move(kScratchDoubleReg, 0.5);
Label done;
__ Ucomisd(temp, kScratchDoubleReg);
__ JumpIf(not_equal, &done, Label::kNear);
// Fix wrong tie-to-even by adding 0.5 twice.
__ Addsd(out, kScratchDoubleReg);
__ Addsd(out, kScratchDoubleReg);
__ bind(&done);
} else if (kind_ == Kind::kFloor) {
__ Roundsd(out, in, kRoundDown);
} else if (kind_ == Kind::kCeil) {
__ Roundsd(out, in, kRoundUp);
}
}
int Float64Exponentiate::MaxCallStackArgs() const {
return MaglevAssembler::ArgumentStackSlotsForCFunctionCall(2);
}
void Float64Exponentiate::SetValueLocationConstraints() {
UseFixed(left_input(), xmm0);
UseFixed(right_input(), xmm1);
DefineSameAsFirst(this);
}
void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
__ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
}
int Float64Ieee754Unary::MaxCallStackArgs() const {
return MaglevAssembler::ArgumentStackSlotsForCFunctionCall(1);
}
void Float64Ieee754Unary::SetValueLocationConstraints() {
UseFixed(input(), xmm0);
DefineSameAsFirst(this);
}
void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(1);
__ CallCFunction(ieee_function_, 1);
}
void HoleyFloat64ToMaybeNanFloat64::SetValueLocationConstraints() {
UseRegister(input());
DefineSameAsFirst(this);
}
void HoleyFloat64ToMaybeNanFloat64::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DoubleRegister value = ToDoubleRegister(input());
// The hole value is a signalling NaN, so just silence it to get the float64
// value.
__ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
__ Subsd(value, kScratchDoubleReg);
}
namespace {
enum class ReduceInterruptBudgetType { kLoop, kReturn };
void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
Node* node, ReduceInterruptBudgetType type,
Register scratch0) {
// For loops, first check for interrupts. Don't do this for returns, as we
// can't lazy deopt to the end of a return.
if (type == ReduceInterruptBudgetType::kLoop) {
Label next;
// Here, we only care about interrupts since we've already guarded against
// real stack overflows on function entry.
__ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
__ j(above, &next);
// An interrupt has been requested and we must call into runtime to handle
// it; since we already pay the call cost, combine with the TieringManager
// call.
{
SaveRegisterStateForCall save_register_state(masm,
node->register_snapshot());
__ Move(kContextRegister, masm->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
}
__ jmp(*done); // All done, continue.
__ bind(&next);
}
// No pending interrupts. Call into the TieringManager if needed.
{
SaveRegisterStateForCall save_register_state(masm,
node->register_snapshot());
__ Move(kContextRegister, masm->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
// Note: must not cause a lazy deopt!
__ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
save_register_state.DefineSafepoint();
}
__ jmp(*done);
}
void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
ReduceInterruptBudgetType type, int amount) {
MaglevAssembler::ScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedField(scratch,
FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
Immediate(amount));
ZoneLabelRef done(masm);
__ JumpToDeferredIf(less, HandleInterruptsAndTiering, done, node, type,
scratch);
__ bind(*done);
}
} // namespace
int ReduceInterruptBudgetForLoop::MaxCallStackArgs() const { return 1; }
void ReduceInterruptBudgetForLoop::SetValueLocationConstraints() {
set_temporaries_needed(1);
}
void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
GenerateReduceInterruptBudget(masm, this, ReduceInterruptBudgetType::kLoop,
amount());
}
int ReduceInterruptBudgetForReturn::MaxCallStackArgs() const { return 1; }
void ReduceInterruptBudgetForReturn::SetValueLocationConstraints() {
set_temporaries_needed(1);
}
void ReduceInterruptBudgetForReturn::GenerateCode(
MaglevAssembler* masm, const ProcessingState& state) {
GenerateReduceInterruptBudget(masm, this, ReduceInterruptBudgetType::kReturn,
amount());
}
// ---
// Control nodes
// ---
void Return::SetValueLocationConstraints() {
UseFixed(value_input(), kReturnRegister0);
}
void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
// Read the formal number of parameters from the top level compilation unit
// (i.e. the outermost, non inlined function).
int formal_params_size =
masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
// We're not going to continue execution, so we can use an arbitrary register
// here instead of relying on temporaries from the register allocator.
Register actual_params_size = r8;
// Compute the size of the actual parameters + receiver (in bytes).
// TODO(leszeks): Consider making this an input into Return to re-use the
// incoming argc's register (if it's still valid).
__ movq(actual_params_size,
MemOperand(rbp, StandardFrameConstants::kArgCOffset));
// Leave the frame.
__ LeaveFrame(StackFrame::MAGLEV);
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label drop_dynamic_arg_size;
__ cmpq(actual_params_size, Immediate(formal_params_size));
__ j(greater, &drop_dynamic_arg_size);
// Drop receiver + arguments according to static formal arguments size.
__ Ret(formal_params_size * kSystemPointerSize, kScratchRegister);
__ bind(&drop_dynamic_arg_size);
// Drop receiver + arguments according to dynamic arguments size.
__ DropArguments(actual_params_size, r9, MacroAssembler::kCountIsInteger,
MacroAssembler::kCountIncludesReceiver);
__ Ret();
}
} // namespace maglev
} // namespace internal
} // namespace v8