Version 12.3.200.1 (cherry-pick)
Merged f6a1b4e7060f2af02e02742404ca582d3776018c
Revert "[compiler] Add fake handles slots to safepoint table"
Change-Id: If054d56ba6a57b5d0dede8a195dca2508e57f644
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/5300294
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Owners-Override: Matthias Liedtke <mliedtke@chromium.org>
Cr-Commit-Position: refs/heads/12.3.200@{#2}
Cr-Branched-From: de06cf0f00c19e02aaceb3178bf33e3569475477-refs/heads/main@{#92318}
diff --git a/include/v8-version.h b/include/v8-version.h
index 8308144..1c295e9 100644
--- a/include/v8-version.h
+++ b/include/v8-version.h
@@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 12
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 200
-#define V8_PATCH_LEVEL 0
+#define V8_PATCH_LEVEL 1
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/compiler/backend/arm/code-generator-arm.cc b/src/compiler/backend/arm/code-generator-arm.cc
index b7278f3..f0613fb 100644
--- a/src/compiler/backend/arm/code-generator-arm.cc
+++ b/src/compiler/backend/arm/code-generator-arm.cc
@@ -3842,17 +3842,6 @@
}
}
- if (!frame()->tagged_slots().IsEmpty()) {
- UseScratchRegisterScope temps(masm());
- Register zero = temps.Acquire();
- __ mov(zero, Operand(0));
- for (int spill_slot : frame()->tagged_slots()) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot);
- Register base = offset.from_stack_pointer() ? sp : fp;
- __ str(zero, MemOperand(base, offset.offset()));
- }
- }
-
if (!saves_fp.is_empty()) {
// Save callee-saved FP registers.
static_assert(DwVfpRegister::kNumRegisters == 32);
diff --git a/src/compiler/backend/arm/instruction-selector-arm.cc b/src/compiler/backend/arm/instruction-selector-arm.cc
index 1bd87c7..6f03675 100644
--- a/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -736,8 +736,7 @@
template <typename Adapter>
void InstructionSelectorT<Adapter>::VisitStackSlot(node_t node) {
StackSlotRepresentation rep = this->stack_slot_representation_of(node);
- int slot =
- frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc
index eb5f373..a838774 100644
--- a/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -3391,12 +3391,6 @@
__ Claim(required_slots);
}
- for (int spill_slot : frame()->tagged_slots()) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot);
- Register base = offset.from_stack_pointer() ? sp : fp;
- __ str(xzr, MemOperand(base, offset.offset()));
- }
-
// Save FP registers.
DCHECK_IMPLIES(saves_fp.Count() != 0,
saves_fp.bits() == CPURegList::GetCalleeSavedV().bits());
diff --git a/src/compiler/backend/arm64/instruction-selector-arm64.cc b/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 7dce058..051ffe2 100644
--- a/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -1145,8 +1145,7 @@
template <typename Adapter>
void InstructionSelectorT<Adapter>::VisitStackSlot(node_t node) {
StackSlotRepresentation rep = this->stack_slot_representation_of(node);
- int slot =
- frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
diff --git a/src/compiler/backend/code-generator.cc b/src/compiler/backend/code-generator.cc
index 4c7129e..bf31064 100644
--- a/src/compiler/backend/code-generator.cc
+++ b/src/compiler/backend/code-generator.cc
@@ -535,11 +535,6 @@
void CodeGenerator::RecordSafepoint(ReferenceMap* references, int pc_offset) {
auto safepoint = safepoints()->DefineSafepoint(masm(), pc_offset);
-
- for (int tagged : frame()->tagged_slots()) {
- safepoint.DefineTaggedStackSlot(tagged);
- }
-
int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
diff --git a/src/compiler/backend/ia32/code-generator-ia32.cc b/src/compiler/backend/ia32/code-generator-ia32.cc
index 6a911d7..eef1025 100644
--- a/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -4103,12 +4103,6 @@
}
}
- for (int spill_slot : frame()->tagged_slots()) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot);
- Register base = offset.from_stack_pointer() ? esp : ebp;
- __ mov(Operand(base, offset.offset()), Immediate(0));
- }
-
if (!saves.is_empty()) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
for (Register reg : base::Reversed(saves)) {
diff --git a/src/compiler/backend/ia32/instruction-selector-ia32.cc b/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 116943f..bb8c3ab 100644
--- a/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -877,8 +877,7 @@
template <typename Adapter>
void InstructionSelectorT<Adapter>::VisitStackSlot(node_t node) {
StackSlotRepresentation rep = this->stack_slot_representation_of(node);
- int slot =
- frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
diff --git a/src/compiler/backend/instruction-selector-adapter.h b/src/compiler/backend/instruction-selector-adapter.h
index 1ac063b..5e59e17 100644
--- a/src/compiler/backend/instruction-selector-adapter.h
+++ b/src/compiler/backend/instruction-selector-adapter.h
@@ -1305,8 +1305,7 @@
DCHECK(is_stack_slot(node));
const turboshaft::StackSlotOp& stack_slot =
graph_->Get(node).Cast<turboshaft::StackSlotOp>();
- return StackSlotRepresentation(stack_slot.size, stack_slot.alignment,
- stack_slot.is_tagged);
+ return StackSlotRepresentation(stack_slot.size, stack_slot.alignment);
}
bool is_integer_constant(node_t node) const {
if (const auto constant =
diff --git a/src/compiler/backend/x64/code-generator-x64.cc b/src/compiler/backend/x64/code-generator-x64.cc
index 274ca85..d4faf79 100644
--- a/src/compiler/backend/x64/code-generator-x64.cc
+++ b/src/compiler/backend/x64/code-generator-x64.cc
@@ -7128,12 +7128,6 @@
}
}
- for (int spill_slot : frame()->tagged_slots()) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot);
- Register base = offset.from_stack_pointer() ? rsp : rbp;
- __ movq(Operand(base, offset.offset()), Immediate(0));
- }
-
if (!saves_fp.is_empty()) { // Save callee-saved XMM registers.
const uint32_t saves_fp_count = saves_fp.Count();
const int stack_size = saves_fp_count * kQuadWordSize;
diff --git a/src/compiler/backend/x64/instruction-selector-x64.cc b/src/compiler/backend/x64/instruction-selector-x64.cc
index 09b57b4..9650326 100644
--- a/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -1036,8 +1036,7 @@
template <typename Adapter>
void InstructionSelectorT<Adapter>::VisitStackSlot(node_t node) {
StackSlotRepresentation rep = this->stack_slot_representation_of(node);
- int slot =
- frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged());
+ int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
diff --git a/src/compiler/fast-api-calls.h b/src/compiler/fast-api-calls.h
index c05e302..d6916d7 100644
--- a/src/compiler/fast-api-calls.h
+++ b/src/compiler/fast-api-calls.h
@@ -70,7 +70,7 @@
#else
// With indirect locals, the argument has to be stored on the stack and the
// slot address is passed.
- Node* stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true);
+ Node* stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, __ BitcastTaggedToWord(argument));
diff --git a/src/compiler/frame.cc b/src/compiler/frame.cc
index 7dc7a87..6413adf 100644
--- a/src/compiler/frame.cc
+++ b/src/compiler/frame.cc
@@ -8,11 +8,10 @@
namespace internal {
namespace compiler {
-Frame::Frame(int fixed_frame_size_in_slots, Zone* zone)
+Frame::Frame(int fixed_frame_size_in_slots)
: fixed_slot_count_(fixed_frame_size_in_slots),
allocated_registers_(nullptr),
- allocated_double_registers_(nullptr),
- zone_(zone) {
+ allocated_double_registers_(nullptr) {
slot_allocator_.AllocateUnaligned(fixed_frame_size_in_slots);
}
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 6786698..96eb901 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -90,7 +90,7 @@
//
class V8_EXPORT_PRIVATE Frame : public ZoneObject {
public:
- explicit Frame(int fixed_frame_size_in_slots, Zone* zone);
+ explicit Frame(int fixed_frame_size_in_slots);
Frame(const Frame&) = delete;
Frame& operator=(const Frame&) = delete;
@@ -135,11 +135,9 @@
slot_allocator_.AllocateUnaligned(count);
}
- int AllocateSpillSlot(int width, int alignment = 0, bool is_tagged = false) {
+ int AllocateSpillSlot(int width, int alignment = 0) {
DCHECK_EQ(GetTotalFrameSlotCount(),
fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
- DCHECK_IMPLIES(is_tagged, width == sizeof(uintptr_t));
- DCHECK_IMPLIES(is_tagged, alignment == sizeof(uintptr_t));
// Never allocate spill slots after the callee-saved slots are defined.
DCHECK(!spill_slots_finished_);
DCHECK(!frame_aligned_);
@@ -165,9 +163,7 @@
int end = slot_allocator_.Size();
spill_slot_count_ += end - old_end;
- int result_slot = slot + slots - 1;
- if (is_tagged) tagged_slots_bits_.Add(result_slot, zone_);
- return result_slot;
+ return slot + slots - 1;
}
void EnsureReturnSlots(int count) {
@@ -185,8 +181,6 @@
return slot_allocator_.Size() - 1;
}
- const GrowableBitVector& tagged_slots() const { return tagged_slots_bits_; }
-
private:
int fixed_slot_count_;
int spill_slot_count_ = 0;
@@ -196,8 +190,6 @@
AlignedSlotAllocator slot_allocator_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
- Zone* zone_;
- GrowableBitVector tagged_slots_bits_;
#if DEBUG
bool spill_slots_finished_ = false;
bool frame_aligned_ = false;
diff --git a/src/compiler/graph-assembler.cc b/src/compiler/graph-assembler.cc
index 7a12075..40f3bd6 100644
--- a/src/compiler/graph-assembler.cc
+++ b/src/compiler/graph-assembler.cc
@@ -1011,10 +1011,9 @@
graph()->NewNode(common()->Unreachable(), effect(), control()));
}
-TNode<RawPtrT> GraphAssembler::StackSlot(int size, int alignment,
- bool is_tagged) {
+TNode<RawPtrT> GraphAssembler::StackSlot(int size, int alignment) {
return AddNode<RawPtrT>(
- graph()->NewNode(machine()->StackSlot(size, alignment, is_tagged)));
+ graph()->NewNode(machine()->StackSlot(size, alignment)));
}
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
diff --git a/src/compiler/graph-assembler.h b/src/compiler/graph-assembler.h
index f7c8b77..6e67842 100644
--- a/src/compiler/graph-assembler.h
+++ b/src/compiler/graph-assembler.h
@@ -401,7 +401,7 @@
}
Node* Checkpoint(FrameState frame_state);
- TNode<RawPtrT> StackSlot(int size, int alignment, bool is_tagged = false);
+ TNode<RawPtrT> StackSlot(int size, int alignment);
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
Node* Store(StoreRepresentation rep, Node* object, int offset, Node* value);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 4af8c68..e34232f 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -1094,15 +1094,14 @@
SIMD_I16x8_LANES(V) V(8) V(9) V(10) V(11) V(12) V(13) V(14) V(15)
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
- V(4, 0, false) \
- V(8, 0, false) V(16, 0, false) V(4, 4, false) V(8, 8, false) V(16, 16, false)
+ V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
- explicit StackSlotOperator(int size, int alignment, bool is_tagged)
+ explicit StackSlotOperator(int size, int alignment)
: Operator1<StackSlotRepresentation>(
IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
"StackSlot", 0, 0, 0, 1, 0, 0,
- StackSlotRepresentation(size, alignment, is_tagged)) {}
+ StackSlotRepresentation(size, alignment)) {}
};
struct MachineOperatorGlobalCache {
@@ -1292,14 +1291,14 @@
#undef LOAD_TRANSFORM_KIND
#endif // V8_ENABLE_WEBASSEMBLY
-#define STACKSLOT(Size, Alignment, IsTagged) \
- struct StackSlotOfSize##Size##OfAlignment##Alignment##IsTagged##Operator \
- final : public StackSlotOperator { \
- StackSlotOfSize##Size##OfAlignment##Alignment##IsTagged##Operator() \
- : StackSlotOperator(Size, Alignment, IsTagged) {} \
- }; \
- StackSlotOfSize##Size##OfAlignment##Alignment##IsTagged##Operator \
- kStackSlotOfSize##Size##OfAlignment##Alignment##IsTagged;
+#define STACKSLOT(Size, Alignment) \
+ struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
+ : public StackSlotOperator { \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
+ : StackSlotOperator(Size, Alignment) {} \
+ }; \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
+ kStackSlotOfSize##Size##OfAlignment##Alignment;
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
#undef STACKSLOT
@@ -2018,19 +2017,18 @@
}
#endif // V8_ENABLE_WEBASSEMBLY
-const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment,
- bool is_tagged) {
+const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
-#define CASE_CACHED_SIZE(Size, Alignment, IsTagged) \
- if (size == Size && alignment == Alignment && is_tagged == IsTagged) { \
- return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment##IsTagged; \
+#define CASE_CACHED_SIZE(Size, Alignment) \
+ if (size == Size && alignment == Alignment) { \
+ return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
}
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
#undef CASE_CACHED_SIZE
- return zone_->New<StackSlotOperator>(size, alignment, is_tagged);
+ return zone_->New<StackSlotOperator>(size, alignment);
}
const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 5d26785..b274bb4 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -266,17 +266,15 @@
class StackSlotRepresentation final {
public:
- StackSlotRepresentation(int size, int alignment, bool is_tagged)
- : size_(size), alignment_(alignment), is_tagged_(is_tagged) {}
+ StackSlotRepresentation(int size, int alignment)
+ : size_(size), alignment_(alignment) {}
int size() const { return size_; }
int alignment() const { return alignment_; }
- bool is_tagged() const { return is_tagged_; }
private:
int size_;
int alignment_;
- bool is_tagged_;
};
V8_EXPORT_PRIVATE bool operator==(StackSlotRepresentation,
@@ -1201,8 +1199,7 @@
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
- const Operator* StackSlot(int size, int alignment = 0,
- bool is_tagged = false);
+ const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
// Note: Only use this operator to:
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 4329312..c96f04b 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -569,7 +569,7 @@
fixed_frame_size =
call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
- frame_ = codegen_zone()->New<Frame>(fixed_frame_size, codegen_zone());
+ frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
}
diff --git a/src/compiler/turboshaft/assembler.h b/src/compiler/turboshaft/assembler.h
index 734e725..8a7d63e 100644
--- a/src/compiler/turboshaft/assembler.h
+++ b/src/compiler/turboshaft/assembler.h
@@ -2384,8 +2384,8 @@
FrameConstantOp::Kind::kParentFramePointer);
}
- V<WordPtr> StackSlot(int size, int alignment, bool is_tagged = false) {
- return ReduceIfReachableStackSlot(size, alignment, is_tagged);
+ V<WordPtr> StackSlot(int size, int alignment) {
+ return ReduceIfReachableStackSlot(size, alignment);
}
OpIndex LoadRootRegister() { return ReduceIfReachableLoadRootRegister(); }
diff --git a/src/compiler/turboshaft/fast-api-call-reducer.h b/src/compiler/turboshaft/fast-api-call-reducer.h
index d9da244..9323531 100644
--- a/src/compiler/turboshaft/fast-api-call-reducer.h
+++ b/src/compiler/turboshaft/fast-api-call-reducer.h
@@ -136,8 +136,7 @@
#else
// With indirect locals, the argument has to be stored on the stack and the
// slot address is passed.
- OpIndex stack_slot =
- __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true);
+ OpIndex stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t));
__ StoreOffHeap(stack_slot, __ BitcastTaggedToWordPtr(argument),
MemoryRepresentation::UintPtr());
return stack_slot;
diff --git a/src/compiler/turboshaft/graph-builder.cc b/src/compiler/turboshaft/graph-builder.cc
index e98254b..4b0c86d 100644
--- a/src/compiler/turboshaft/graph-builder.cc
+++ b/src/compiler/turboshaft/graph-builder.cc
@@ -1165,10 +1165,10 @@
case IrOpcode::kLoadParentFramePointer:
return __ ParentFramePointer();
- case IrOpcode::kStackSlot: {
- StackSlotRepresentation rep = StackSlotRepresentationOf(op);
- return __ StackSlot(rep.size(), rep.alignment(), rep.is_tagged());
- }
+ case IrOpcode::kStackSlot:
+ return __ StackSlot(StackSlotRepresentationOf(op).size(),
+ StackSlotRepresentationOf(op).alignment());
+
case IrOpcode::kBranch:
DCHECK_EQ(block->SuccessorCount(), 2);
__ Branch(Map(node->InputAt(0)), Map(block->SuccessorAt(0)),
diff --git a/src/compiler/turboshaft/operations.h b/src/compiler/turboshaft/operations.h
index 73a36fb..d04e17d 100644
--- a/src/compiler/turboshaft/operations.h
+++ b/src/compiler/turboshaft/operations.h
@@ -3192,7 +3192,6 @@
struct StackSlotOp : FixedArityOperationT<0, StackSlotOp> {
int size;
int alignment;
- bool is_tagged;
// We can freely reorder stack slot operations, but must not de-duplicate
// them.
@@ -3206,10 +3205,9 @@
return {};
}
- StackSlotOp(int size, int alignment, bool is_tagged = false)
- : size(size), alignment(alignment), is_tagged(is_tagged) {}
+ StackSlotOp(int size, int alignment) : size(size), alignment(alignment) {}
void Validate(const Graph& graph) const {}
- auto options() const { return std::tuple{size, alignment, is_tagged}; }
+ auto options() const { return std::tuple{size, alignment}; }
};
// Values that are constant for the current stack frame/invocation.
diff --git a/src/compiler/turboshaft/recreate-schedule.cc b/src/compiler/turboshaft/recreate-schedule.cc
index a42c98e..d907c6a 100644
--- a/src/compiler/turboshaft/recreate-schedule.cc
+++ b/src/compiler/turboshaft/recreate-schedule.cc
@@ -1233,7 +1233,7 @@
{GetNode(op.stack_limit())});
}
Node* ScheduleBuilder::ProcessOperation(const StackSlotOp& op) {
- return AddNode(machine.StackSlot(op.size, op.alignment, op.is_tagged), {});
+ return AddNode(machine.StackSlot(op.size, op.alignment), {});
}
Node* ScheduleBuilder::ProcessOperation(const FrameConstantOp& op) {
switch (op.kind) {
diff --git a/src/utils/bit-vector.h b/src/utils/bit-vector.h
index 9cd4d11..f07c8d4 100644
--- a/src/utils/bit-vector.h
+++ b/src/utils/bit-vector.h
@@ -304,8 +304,6 @@
bits_.Add(value);
}
- bool IsEmpty() const { return bits_.IsEmpty(); }
-
void Clear() { bits_.Clear(); }
int length() const { return bits_.length(); }
diff --git a/test/cctest/compiler/test-code-generator.cc b/test/cctest/compiler/test-code-generator.cc
index d18ffbe..a490131 100644
--- a/test/cctest/compiler/test-code-generator.cc
+++ b/test/cctest/compiler/test-code-generator.cc
@@ -1116,8 +1116,7 @@
CodeKind::FOR_TESTING),
linkage_(environment->test_descriptor()),
frame_(environment->test_descriptor()->CalculateFixedFrameSize(
- CodeKind::FOR_TESTING),
- environment->main_zone()) {
+ CodeKind::FOR_TESTING)) {
// Pick half of the stack parameters at random and move them into spill
// slots, separated by `extra_stack_space` bytes.
// When testing a move with stack slots using CheckAssembleMove or
diff --git a/test/unittests/compiler/frame-unittest.cc b/test/unittests/compiler/frame-unittest.cc
index 6ca9223..f25a6aa 100644
--- a/test/unittests/compiler/frame-unittest.cc
+++ b/test/unittests/compiler/frame-unittest.cc
@@ -25,7 +25,7 @@
};
TEST_F(FrameTest, Constructor) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
EXPECT_EQ(kFixed3, frame.GetTotalFrameSlotCount());
EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
EXPECT_EQ(0, frame.GetSpillSlotCount());
@@ -33,7 +33,7 @@
}
TEST_F(FrameTest, ReserveSpillSlots) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
constexpr int kReserve2 = 2;
frame.ReserveSpillSlots(kReserve2);
@@ -44,7 +44,7 @@
}
TEST_F(FrameTest, EnsureReturnSlots) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
constexpr int kReturn3 = 3;
constexpr int kReturn5 = 5;
constexpr int kReturn2 = 2;
@@ -71,7 +71,7 @@
}
TEST_F(FrameTest, AllocateSavedCalleeRegisterSlots) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
constexpr int kFirstSlots = 2;
constexpr int kSecondSlots = 3;
@@ -90,7 +90,7 @@
}
TEST_F(FrameTest, AlignSavedCalleeRegisterSlots) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
constexpr int kSlots = 2; // An even number leaves the slots misaligned.
frame.AllocateSavedCalleeRegisterSlots(kSlots);
@@ -111,7 +111,7 @@
}
TEST_F(FrameTest, AllocateSpillSlotAligned) {
- Frame frame(kFixed1, nullptr);
+ Frame frame(kFixed1);
// Allocate a quad slot, which must add 3 padding slots. Frame returns the
// last index of the 4 slot allocation.
@@ -142,7 +142,7 @@
}
TEST_F(FrameTest, AllocateSpillSlotAlignedWithReturns) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
constexpr int kReturn3 = 3;
constexpr int kReturn5 = 5;
@@ -167,7 +167,7 @@
}
TEST_F(FrameTest, AllocateSpillSlotAndEndSpillArea) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
// Allocate a double slot, which must add 1 padding slot.
EXPECT_EQ(kFixed3 + 2, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
@@ -189,7 +189,7 @@
}
TEST_F(FrameTest, AllocateSpillSlotOverAligned) {
- Frame frame(kFixed1, nullptr);
+ Frame frame(kFixed1);
// Allocate a 4-aligned double slot, which must add 3 padding slots. This
// also terminates the slot area. Returns the starting slot in this case.
@@ -208,7 +208,7 @@
}
TEST_F(FrameTest, AllocateSpillSlotUnderAligned) {
- Frame frame(kFixed1, nullptr);
+ Frame frame(kFixed1);
// Allocate a 1-aligned double slot. This also terminates the slot area.
EXPECT_EQ(kFixed1 + 1, frame.AllocateSpillSlot(2 * kSlotSize, kSlotSize));
@@ -219,7 +219,7 @@
}
TEST_F(FrameTest, AlignFrame) {
- Frame frame(kFixed3, nullptr);
+ Frame frame(kFixed3);
constexpr int kReturn3 = 3;
frame.EnsureReturnSlots(kReturn3);