From e391da45cf2f655c1c027c3182c68dab1b2f7c74 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 17 Jan 2022 15:50:49 +0800 Subject: [PATCH] deps: V8: cherry-pick d8dc66f92169 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Original commit message: [riscv64][sparkplug] Fix sparkplug verify framesize failed Change-Id: I7481749ba3d5c41d7405b0d88a51defbc8bec9d6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3093009 Auto-Submit: Yahan Lu Reviewed-by: Ji Qiu Commit-Queue: Yahan Lu Cr-Commit-Position: refs/heads/master@{#76277} Refs: v8/v8@d8dc66f9 PR-URL: https://github.com/nodejs/node/pull/41566 Refs: https://github.com/v8/v8/commit/d8dc66f Refs: https://github.com/v8/v8/commit/3cab84c Refs: https://github.com/v8/v8/commit/471f862 Refs: https://github.com/v8/v8/commit/77599ff Reviewed-By: Jiawen Geng Reviewed-By: Michaƫl Zasso --- common.gypi | 2 +- .../riscv64/baseline-assembler-riscv64-inl.h | 260 +++++------------- .../riscv64/baseline-compiler-riscv64-inl.h | 64 +---- .../src/builtins/riscv64/builtins-riscv64.cc | 68 +++-- .../codegen/riscv64/macro-assembler-riscv64.h | 2 +- 5 files changed, 120 insertions(+), 276 deletions(-) diff --git a/common.gypi b/common.gypi index b8d61f0d32709f..89fac788d3d293 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.14', + 'v8_embedder_string': '-node.15', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h index 01f5a5802bb698..f39f5786469c3b 100644 --- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -109,30 +109,19 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, } void BaselineAssembler::CallBuiltin(Builtin builtin) { - if (masm()->options().short_builtin_calls) { - __ CallBuiltin(builtin); - } else { - ASM_CODE_COMMENT_STRING(masm_, - __ CommentForOffHeapTrampoline("call", builtin)); - Register temp = t6; - __ LoadEntryFromBuiltin(builtin, temp); - __ Call(temp); - } + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("call", builtin)); + Register temp = t6; + __ LoadEntryFromBuiltin(builtin, temp); + __ Call(temp); } void BaselineAssembler::TailCallBuiltin(Builtin builtin) { - if (masm()->options().short_builtin_calls) { - // Generate pc-relative jump. - __ TailCallBuiltin(builtin); - } else { - ASM_CODE_COMMENT_STRING( - masm_, __ CommentForOffHeapTrampoline("tail call", builtin)); - // t6 be used for function call in RISCV64 - // For example 'jalr t6' or 'jal t6' - Register temp = t6; - __ LoadEntryFromBuiltin(builtin, temp); - __ Jump(temp); - } + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("tail call", builtin)); + Register temp = t6; + __ LoadEntryFromBuiltin(builtin, temp); + __ Jump(temp); } void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, @@ -140,7 +129,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, ScratchRegisterScope temps(this); Register tmp = temps.AcquireScratch(); __ And(tmp, value, Operand(mask)); - __ Branch(target, AsMasmCondition(cc), tmp, Operand(mask)); + __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg)); } void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, @@ -161,6 +150,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, Label* target, Label::Distance) { ScratchRegisterScope temps(this); Register type = temps.AcquireScratch(); + if (FLAG_debug_code) { + __ AssertNotSmi(map); + __ GetObjectType(map, type, type); + __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); + } __ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); } @@ -182,44 +176,28 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, } void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target, Label::Distance) { - ScratchRegisterScope temps(this); - Register temp = temps.AcquireScratch(); + // todo: compress pointer __ AssertSmi(lhs); __ AssertSmi(rhs); - if (COMPRESS_POINTERS_BOOL) { - __ Sub32(temp, lhs, rhs); - } else { - __ Sub64(temp, lhs, rhs); - } - __ Branch(target, AsMasmCondition(cc), temp, Operand(zero_reg)); + __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); } void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { + // todo: compress pointer ScratchRegisterScope temps(this); - Register tmp1 = temps.AcquireScratch(); - Register tmp2 = temps.AcquireScratch(); - __ Ld(tmp1, operand); - if (COMPRESS_POINTERS_BOOL) { - __ Sub32(tmp2, value, tmp1); - } else { - __ Sub64(tmp2, value, tmp1); - } - __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg)); + Register scratch = temps.AcquireScratch(); + __ Ld(scratch, operand); + __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); } void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, Label::Distance) { + // todo: compress pointer ScratchRegisterScope temps(this); - Register tmp1 = temps.AcquireScratch(); - Register tmp2 = temps.AcquireScratch(); - __ Ld(tmp1, operand); - if (COMPRESS_POINTERS_BOOL) { - __ Sub32(tmp2, tmp1, value); - } else { - __ Sub64(tmp2, tmp1, value); - } - __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg)); + Register scratch = temps.AcquireScratch(); + __ Ld(scratch, operand); + __ Branch(target, AsMasmCondition(cc), scratch, Operand(value)); } void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, Label* target, Label::Distance) { @@ -267,137 +245,51 @@ inline Register ToRegister(BaselineAssembler* basm, return reg; } -template -struct CountPushHelper; -template <> -struct CountPushHelper<> { - static int Count() { return 0; } -}; -template -struct CountPushHelper { - static int Count(Arg arg, Args... args) { - return 1 + CountPushHelper::Count(args...); - } -}; -template -struct CountPushHelper { - static int Count(interpreter::RegisterList list, Args... args) { - return list.register_count() + CountPushHelper::Count(args...); - } -}; - template struct PushAllHelper; -template -void PushAll(BaselineAssembler* basm, Args... args) { - PushAllHelper::Push(basm, args...); -} -template -void PushAllReverse(BaselineAssembler* basm, Args... args) { - PushAllHelper::PushReverse(basm, args...); -} - template <> struct PushAllHelper<> { - static void Push(BaselineAssembler* basm) {} - static void PushReverse(BaselineAssembler* basm) {} + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } }; - -inline void PushSingle(MacroAssembler* masm, RootIndex source) { - masm->PushRoot(source); -} -inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); } - -inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); } -inline void PushSingle(MacroAssembler* masm, Handle object) { - masm->Push(object); -} -inline void PushSingle(MacroAssembler* masm, int32_t immediate) { - masm->li(kScratchReg, (int64_t)(immediate)); - PushSingle(masm, kScratchReg); -} - -inline void PushSingle(MacroAssembler* masm, TaggedIndex value) { - masm->li(kScratchReg, static_cast(value.ptr())); - PushSingle(masm, kScratchReg); -} -inline void PushSingle(MacroAssembler* masm, MemOperand operand) { - masm->Ld(kScratchReg, operand); - PushSingle(masm, kScratchReg); -} -inline void PushSingle(MacroAssembler* masm, interpreter::Register source) { - return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source)); -} - template struct PushAllHelper { - static void Push(BaselineAssembler* basm, Arg arg) { - PushSingle(basm->masm(), arg); + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; } - static void PushReverse(BaselineAssembler* basm, Arg arg) { - // Push the padding register to round up the amount of values pushed. + static int PushReverse(BaselineAssembler* basm, Arg arg) { return Push(basm, arg); } }; -template -struct PushAllHelper { - static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2, - Args... args) { - { - BaselineAssembler::ScratchRegisterScope scope(basm); - basm->masm()->Push(ToRegister(basm, &scope, arg1), - ToRegister(basm, &scope, arg2)); - } - PushAll(basm, args...); - } - static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2, - Args... args) { - PushAllReverse(basm, args...); - { - BaselineAssembler::ScratchRegisterScope scope(basm); - basm->masm()->Push(ToRegister(basm, &scope, arg2), - ToRegister(basm, &scope, arg1)); - } - } -}; -// Currently RegisterLists are always be the last argument, so we don't -// specialize for the case where they're not. We do still specialise for the -// aligned and unaligned cases. -template -struct PushAllHelper { - static void Push(BaselineAssembler* basm, Arg arg, - interpreter::RegisterList list) { - DCHECK_EQ(list.register_count() % 2, 1); - PushAll(basm, arg, list[0], list.PopLeft()); +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper::Push(basm, arg); + return 1 + PushAllHelper::Push(basm, args...); } - static void PushReverse(BaselineAssembler* basm, Arg arg, - interpreter::RegisterList list) { - if (list.register_count() == 0) { - PushAllReverse(basm, arg); - } else { - PushAllReverse(basm, arg, list[0], list.PopLeft()); - } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper::PushReverse(basm, args...); + PushAllHelper::Push(basm, arg); + return nargs + 1; } }; template <> struct PushAllHelper { - static void Push(BaselineAssembler* basm, interpreter::RegisterList list) { - DCHECK_EQ(list.register_count() % 2, 0); - for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) { - PushAll(basm, list[reg_index], list[reg_index + 1]); + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper::Push(basm, list[reg_index]); } + return list.register_count(); } - static void PushReverse(BaselineAssembler* basm, - interpreter::RegisterList list) { - int reg_index = list.register_count() - 1; - if (reg_index % 2 == 0) { - // Push the padding register to round up the amount of values pushed. - PushAllReverse(basm, list[reg_index]); - reg_index--; - } - for (; reg_index >= 1; reg_index -= 2) { - PushAllReverse(basm, list[reg_index - 1], list[reg_index]); + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper::Push(basm, list[reg_index]); } + return list.register_count(); } }; @@ -414,10 +306,9 @@ struct PopAllHelper { } }; template -struct PopAllHelper { - static void Pop(BaselineAssembler* basm, Register reg1, Register reg2, - T... tail) { - basm->masm()->Pop(reg1, reg2); +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper::Pop(basm, reg); PopAllHelper::Pop(basm, tail...); } }; @@ -426,20 +317,12 @@ struct PopAllHelper { template int BaselineAssembler::Push(T... vals) { - // We have to count the pushes first, to decide whether to add padding before - // the first push. - int push_count = detail::CountPushHelper::Count(vals...); - if (push_count % 2 == 0) { - detail::PushAll(this, vals...); - } else { - detail::PushAll(this, vals...); - } - return push_count; + return detail::PushAllHelper::Push(this, vals...); } template void BaselineAssembler::PushReverse(T... vals) { - detail::PushAllReverse(this, vals...); + detail::PushAllHelper::PushReverse(this, vals...); } template @@ -461,7 +344,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, } void BaselineAssembler::LoadByteField(Register output, Register source, int offset) { - __ Ld(output, FieldMemOperand(source, offset)); + __ Lb(output, FieldMemOperand(source, offset)); } void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, Smi value) { @@ -495,11 +378,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); - __ Ld(interrupt_budget, + __ Lw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); // Remember to set flags as part of the add! - __ Add64(interrupt_budget, interrupt_budget, weight); - __ Sd(interrupt_budget, + __ Add32(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); if (skip_interrupt_label) { DCHECK_LT(weight, 0); @@ -517,11 +400,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); - __ Ld(interrupt_budget, + __ Lw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); // Remember to set flags as part of the add! - __ Add64(interrupt_budget, interrupt_budget, weight); - __ Sd(interrupt_budget, + __ Add32(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); if (skip_interrupt_label) __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight)); @@ -556,20 +439,19 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); int32_t Lo12 = (int32_t)imm64 << 20 >> 20; __ auipc(temp, Hi20); // Read PC + Hi20 into t6 - __ lui(temp, Lo12); // jump PC + Hi20 + Lo12 + __ addi(temp, temp, Lo12); // jump PC + Hi20 + Lo12 - int entry_size_log2 = 2; - Register temp2 = scope.AcquireScratch(); - __ CalcScaledAddress(temp2, temp, reg, entry_size_log2); + int entry_size_log2 = 3; + __ CalcScaledAddress(temp, temp, reg, entry_size_log2); __ Jump(temp); { TurboAssembler::BlockTrampolinePoolScope(masm()); - __ BlockTrampolinePoolFor(num_labels * kInstrSize); + __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); __ bind(&table); for (int i = 0; i < num_labels; ++i) { - __ Branch(labels[i]); + __ BranchLong(labels[i]); } - DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table)); + DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table)); __ bind(&fallthrough); } } @@ -598,7 +480,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->Push(kJSFunctionRegister); __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); - __ masm()->Pop(kInterpreterAccumulatorRegister, params_size); + __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->SmiUntag(params_size); __ Bind(&skip_interrupt_label); diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h index fc73105b8e9ad6..1fbdaa0761e1ea 100644 --- a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h @@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() { const int kLoopUnrollSize = 8; const int new_target_index = new_target_or_generator_register.index(); const bool has_new_target = new_target_index != kMaxInt; - // BaselineOutOfLinePrologue already pushed one undefined. - register_count -= 1; if (has_new_target) { - if (new_target_index == 0) { - // Oops, need to fix up that undefined that BaselineOutOfLinePrologue - // pushed. - __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp)); - } else { - DCHECK_LE(new_target_index, register_count); - int index = 1; - for (; index + 2 <= new_target_index; index += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); - } - if (index == new_target_index) { - __ masm()->Push(kJavaScriptCallNewTargetRegister, - kInterpreterAccumulatorRegister); - } else { - DCHECK_EQ(index, new_target_index - 1); - __ masm()->Push(kInterpreterAccumulatorRegister, - kJavaScriptCallNewTargetRegister); - } - // We pushed "index" registers, minus the one the prologue pushed, plus - // the two registers that included new_target. - register_count -= (index - 1 + 2); + DCHECK_LE(new_target_index, register_count); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index))); + for (int i = 0; i < new_target_index; i++) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; } if (register_count < 2 * kLoopUnrollSize) { // If the frame is small enough, just unroll the frame fill completely. - for (int i = 0; i < register_count; i += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); } } else { - BaselineAssembler::ScratchRegisterScope temps(&basm_); - Register scratch = temps.AcquireScratch(); - - // Extract the first few registers to round to the unroll size. - int first_registers = register_count % kLoopUnrollSize; - for (int i = 0; i < first_registers; i += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); - } - __ Move(scratch, register_count / kLoopUnrollSize); - // We enter the loop unconditionally, so make sure we need to loop at least - // once. - DCHECK_GT(register_count / kLoopUnrollSize, 0); - Label loop; - __ Bind(&loop); - for (int i = 0; i < kLoopUnrollSize; i += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); } - __ masm()->Branch(&loop, gt, scratch, Operand(1)); } } void BaselineCompiler::VerifyFrameSize() { ASM_CODE_COMMENT(&masm_); __ masm()->Add64(kScratchReg, sp, - RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp + - bytecode_->frame_size(), - 2 * kSystemPointerSize)); + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, Operand(fp)); } diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index f79e392f4800fc..38136ed53a154a 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -1160,9 +1160,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // store the bytecode offset. if (FLAG_debug_code) { UseScratchRegisterScope temps(masm); - Register type = temps.Acquire(); - __ GetObjectType(feedback_vector, type, type); - __ Assert(eq, AbortReason::kExpectedFeedbackVector, type, + Register invocation_count = temps.Acquire(); + __ GetObjectType(feedback_vector, invocation_count, invocation_count); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count, Operand(FEEDBACK_VECTOR_TYPE)); } // Our stack is currently aligned. We have have to push something along with @@ -1171,8 +1171,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves // `undefined` in the accumulator register, to skip the load in the baseline // code. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ Push(feedback_vector, kInterpreterAccumulatorRegister); + __ Push(feedback_vector); } Label call_stack_guard; @@ -1203,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. - __ Pop(fp, ra); + __ Pop(ra, fp); MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, feedback_vector); __ Trap(); @@ -1212,14 +1211,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ bind(&call_stack_guard); { ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); - Register new_target = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget); - FrameScope frame_scope(masm, StackFrame::INTERNAL); // Save incoming new target or generator - __ Push(zero_reg, new_target); - __ CallRuntime(Runtime::kStackGuard); - __ Pop(new_target, zero_reg); + __ Push(kJavaScriptCallNewTargetRegister); + __ SmiTag(frame_size); + __ Push(frame_size); + __ CallRuntime(Runtime::kStackGuardWithGap); + __ Pop(kJavaScriptCallNewTargetRegister); } __ Ret(); temps.Exclude(kScratchReg.bit() | kScratchReg2.bit()); @@ -1466,31 +1464,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&is_baseline); { // Load the feedback vector from the closure. - __ Ld(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ Ld(feedback_vector, - FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - __ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ LoadTaggedPointerField( + scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ Branch(&install_baseline_code, ne, scratch, Operand(FEEDBACK_VECTOR_TYPE)); - // Read off the optimization state in the feedback vector. - // TODO(v8:11429): Is this worth doing here? Baseline code will check it - // anyway... - __ Ld(optimization_state, - FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); - - // Check if there is optimized code or a optimization marker that needes to - // be processed. - __ And( - scratch, optimization_state, - Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); - __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg)); + // Check for an optimization marker. + LoadOptimizationStateAndJumpIfNeedsProcessing( + masm, optimization_state, feedback_vector, + &has_optimized_code_or_marker); // Load the baseline code into the closure. __ LoadTaggedPointerField( @@ -2713,6 +2705,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +#if V8_ENABLE_WEBASSEMBLY void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in t0 by the jump table trampoline. // Convert to Smi for the runtime call @@ -2786,6 +2779,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { } __ Ret(); } +#endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, SaveFPRegsMode save_doubles, ArgvMode argv_mode, @@ -3640,7 +3634,6 @@ namespace { void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, bool next_bytecode, bool is_osr = false) { - __ Push(zero_reg, kInterpreterAccumulatorRegister); Label start; __ bind(&start); @@ -3667,7 +3660,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE)); // Start with bytecode as there is no baseline code. - __ Pop(zero_reg, kInterpreterAccumulatorRegister); Builtin builtin_id = next_bytecode ? Builtin::kInterpreterEnterAtNextBytecode : Builtin::kInterpreterEnterAtBytecode; @@ -3701,7 +3693,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, UseScratchRegisterScope temps(masm); Register type = temps.Acquire(); __ GetObjectType(feedback_vector, type, type); - __ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE)); + __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE)); // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); @@ -3711,7 +3703,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, feedback_vector = no_reg; // Compute baseline pc for bytecode offset. - __ Push(zero_reg, kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { get_baseline_pc_extref = @@ -3744,6 +3735,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get bytecode array from the stack frame. __ Ld(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Push(kInterpreterAccumulatorRegister); { Register arg_reg_1 = a0; Register arg_reg_2 = a1; @@ -3755,13 +3747,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ CallCFunction(get_baseline_pc, 3, 0); } __ Add64(code_obj, code_obj, kReturnRegister0); - __ Pop(kInterpreterAccumulatorRegister, zero_reg); + __ Pop(kInterpreterAccumulatorRegister); if (is_osr) { // Reset the OSR loop nesting depth to disarm back edges. // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. - __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); @@ -3786,8 +3780,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ Branch(&start); diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h index 04285916bca162..75d99a34059b1b 100644 --- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h @@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Branch(Label* target); void Branch(int32_t target); + void BranchLong(Label* L); void Branch(Label* target, Condition cond, Register r1, const Operand& r2, Label::Distance near_jump = Label::kFar); void Branch(int32_t target, Condition cond, Register r1, const Operand& r2, @@ -945,7 +946,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { Register rs, const Operand& rt); bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt); - void BranchLong(Label* L); void BranchAndLinkLong(Label* L); template