Version 11.3.175.1 (cherry-pick)
Merged dd47b25a6c31543de75adfdbda551a28a6a8aac2
Revert "[wasm-gc] Make GC and inlining configurable by Origin Trial"
Change-Id: If4a5d5cdd851cdfdd03a54f15969d0164d1fdc65
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4334573
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Owners-Override: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/11.3.175@{#2}
Cr-Branched-From: 692d8a972e1d58212f89de55df1d530aecdaeef8-refs/heads/main@{#86428}
diff --git a/include/v8-version.h b/include/v8-version.h
index e1ad4f4..fc65c29 100644
--- a/include/v8-version.h
+++ b/include/v8-version.h
@@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 11
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 175
-#define V8_PATCH_LEVEL 0
+#define V8_PATCH_LEVEL 1
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index f0377c0..79169bf 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -399,14 +399,14 @@
// TODO(7748): It would be useful to also support inlining of wasm functions
// if they are surrounded by a try block which requires further work, so that
// the wasm trap gets forwarded to the corresponding catch block.
- if (native_module->enabled_features().has_gc() &&
- v8_flags.experimental_wasm_js_inlining && fct_index != -1 &&
- native_module && native_module->module() == wasm_module_ &&
+ if (v8_flags.experimental_wasm_gc && v8_flags.experimental_wasm_js_inlining &&
+ fct_index != -1 && native_module &&
+ native_module->module() == wasm_module_ &&
!NodeProperties::IsExceptionalCall(node)) {
Graph::SubgraphScope graph_scope(graph());
WasmGraphBuilder builder(nullptr, zone(), jsgraph(), sig, source_positions_,
WasmGraphBuilder::kNoSpecialParameterMode,
- isolate(), native_module->enabled_features());
+ isolate());
can_inline_body = builder.TryWasmInlining(fct_index, native_module);
inlinee_body_start = graph()->start();
inlinee_body_end = graph()->end();
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 9a39db0..5db2da1 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -2196,13 +2196,12 @@
void Run(PipelineData* data, Zone* temp_zone,
MachineOperatorReducer::SignallingNanPropagation
- signalling_nan_propagation,
- wasm::WasmFeatures features) {
+ signalling_nan_propagation) {
// Run optimizations in two rounds: First one around load elimination and
// then one around branch elimination. This is because those two
// optimizations sometimes display quadratic complexity when run together.
// We only need load elimination for managed objects.
- if (features.has_gc()) {
+ if (v8_flags.experimental_wasm_gc) {
GraphReducer graph_reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(),
@@ -3438,7 +3437,6 @@
ZoneVector<WasmInliningPosition>* inlining_positions) {
auto* wasm_engine = wasm::GetWasmEngine();
const wasm::WasmModule* module = env->module;
- wasm::WasmFeatures features = env->enabled_features;
base::TimeTicks start_time;
if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) {
start_time = base::TimeTicks::Now();
@@ -3472,7 +3470,7 @@
#endif // V8_ENABLE_WASM_SIMD256_REVEC
data.BeginPhaseKind("V8.WasmOptimization");
- if (features.has_inlining()) {
+ if (v8_flags.wasm_inlining) {
pipeline.Run<WasmInliningPhase>(env, compilation_data, inlining_positions);
pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
}
@@ -3489,7 +3487,7 @@
is_asm_js ? MachineOperatorReducer::kPropagateSignallingNan
: MachineOperatorReducer::kSilenceSignallingNan;
- if (features.has_gc() || features.has_stringref()) {
+ if (v8_flags.experimental_wasm_gc || v8_flags.experimental_wasm_stringref) {
pipeline.Run<WasmTypingPhase>(compilation_data.func_index);
pipeline.RunPrintAndVerify(WasmTypingPhase::phase_name(), true);
if (v8_flags.wasm_opt) {
@@ -3499,8 +3497,9 @@
}
// These proposals use gc nodes.
- if (features.has_gc() || features.has_typed_funcref() ||
- features.has_stringref()) {
+ if (v8_flags.experimental_wasm_gc ||
+ v8_flags.experimental_wasm_typed_funcref ||
+ v8_flags.experimental_wasm_stringref) {
pipeline.Run<WasmGCLoweringPhase>(module);
pipeline.RunPrintAndVerify(WasmGCLoweringPhase::phase_name(), true);
}
@@ -3513,7 +3512,7 @@
pipeline);
if (v8_flags.wasm_opt || is_asm_js) {
- pipeline.Run<WasmOptimizationPhase>(signalling_nan_propagation, features);
+ pipeline.Run<WasmOptimizationPhase>(signalling_nan_propagation);
pipeline.RunPrintAndVerify(WasmOptimizationPhase::phase_name(), true);
} else {
pipeline.Run<WasmBaseOptimizationPhase>();
@@ -3523,7 +3522,7 @@
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
- if (features.has_gc() && v8_flags.wasm_opt) {
+ if (v8_flags.experimental_wasm_gc && v8_flags.wasm_opt) {
// Run value numbering and machine operator reducer to optimize load/store
// address computation (in particular, reuse the address computation
// whenever possible).
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 0ff6428..8e1e700 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -132,13 +132,11 @@
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
- Parameter0Mode parameter_mode, Isolate* isolate,
- wasm::WasmFeatures enabled_features)
+ Parameter0Mode parameter_mode, Isolate* isolate)
: gasm_(std::make_unique<WasmGraphAssembler>(mcgraph, zone)),
zone_(zone),
mcgraph_(mcgraph),
env_(env),
- enabled_features_(enabled_features),
has_simd_(ContainsSimd(sig)),
sig_(sig),
source_position_table_(source_position_table),
@@ -161,7 +159,7 @@
bool WasmGraphBuilder::TryWasmInlining(int fct_index,
wasm::NativeModule* native_module) {
DCHECK(v8_flags.experimental_wasm_js_inlining);
- DCHECK(native_module->enabled_features().has_gc());
+ DCHECK(v8_flags.experimental_wasm_gc);
DCHECK(native_module->HasWireBytes());
const wasm::WasmModule* module = native_module->module();
const wasm::WasmFunction& inlinee = module->functions[fct_index];
@@ -181,7 +179,7 @@
// If the inlinee was not validated before, do that now.
if (V8_UNLIKELY(!module->function_was_validated(fct_index))) {
wasm::WasmFeatures unused_detected_features;
- if (ValidateFunctionBody(enabled_features_, module,
+ if (ValidateFunctionBody(env_->enabled_features, module,
&unused_detected_features, inlinee_body)
.failed()) {
// At this point we cannot easily raise a compilation error any more.
@@ -331,7 +329,7 @@
}
Node* WasmGraphBuilder::RefNull(wasm::ValueType type) {
- return (enabled_features_.has_gc() && parameter_mode_ == kInstanceMode)
+ return (v8_flags.experimental_wasm_gc && parameter_mode_ == kInstanceMode)
? gasm_->Null(type)
: (type == wasm::kWasmExternRef || type == wasm::kWasmNullExternRef)
? LOAD_ROOT(NullValue, null_value)
@@ -2651,7 +2649,7 @@
}
Node* WasmGraphBuilder::IsNull(Node* object, wasm::ValueType type) {
- return (enabled_features_.has_gc() && parameter_mode_ == kInstanceMode)
+ return (v8_flags.experimental_wasm_gc && parameter_mode_ == kInstanceMode)
? gasm_->IsNull(object, type)
: gasm_->TaggedEqual(object, RefNull(type));
}
@@ -2920,7 +2918,7 @@
int32_scaled_key);
Node* sig_match = gasm_->Word32Equal(loaded_sig, expected_sig_id);
- if (enabled_features_.has_gc() &&
+ if (v8_flags.experimental_wasm_gc &&
!env_->module->types[sig_index].is_final) {
// Do a full subtyping check.
auto end_label = gasm_->MakeLabel();
@@ -6533,9 +6531,10 @@
compiler::SourcePositionTable* spt,
StubCallMode stub_mode, wasm::WasmFeatures features)
: WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt, parameter_mode,
- isolate, features),
+ isolate),
module_(module),
- stub_mode_(stub_mode) {}
+ stub_mode_(stub_mode),
+ enabled_features_(features) {}
CallDescriptor* GetBigIntToI64CallDescriptor(bool needs_frame_state) {
return wasm::GetWasmEngine()->call_descriptors()->GetBigIntToI64Descriptor(
@@ -7959,6 +7958,7 @@
SetOncePointer<const Operator> float32_to_number_operator_;
SetOncePointer<const Operator> float64_to_number_operator_;
SetOncePointer<const Operator> tagged_to_float64_operator_;
+ wasm::WasmFeatures enabled_features_;
};
} // namespace
@@ -8764,7 +8764,7 @@
info.set_wasm_runtime_exception_support();
}
- if (env->enabled_features.has_gc()) info.set_allocation_folding();
+ if (v8_flags.experimental_wasm_gc) info.set_allocation_folding();
if (info.trace_turbo_json()) {
TurboCfgFile tcf;
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index fb69002..d4a7ed5 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -19,7 +19,6 @@
#include "src/runtime/runtime.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
-#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -54,6 +53,7 @@
class AssemblerBufferCache;
struct DecodeStruct;
class WasmCode;
+class WasmFeatures;
class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
enum Suspend : bool;
@@ -241,16 +241,15 @@
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr)
- : WasmGraphBuilder(env, zone, mcgraph, sig, spt, kInstanceMode, nullptr,
- env->enabled_features) {}
+ : WasmGraphBuilder(env, zone, mcgraph, sig, spt, kInstanceMode, nullptr) {
+ }
V8_EXPORT_PRIVATE WasmGraphBuilder(wasm::CompilationEnv* env, Zone* zone,
MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt,
Parameter0Mode parameter_mode,
- Isolate* isolate,
- wasm::WasmFeatures enabled_features);
+ Isolate* isolate);
V8_EXPORT_PRIVATE ~WasmGraphBuilder();
@@ -877,10 +876,6 @@
Zone* const zone_;
MachineGraph* const mcgraph_;
wasm::CompilationEnv* const env_;
- // For the main WasmGraphBuilder class, this is identical to the features
- // field in {env_}, but the WasmWrapperGraphBuilder subclass doesn't have
- // that, so common code should use this field instead.
- wasm::WasmFeatures enabled_features_;
Node** parameters_;
diff --git a/src/compiler/wasm-inlining.cc b/src/compiler/wasm-inlining.cc
index ee8e152..c487543 100644
--- a/src/compiler/wasm-inlining.cc
+++ b/src/compiler/wasm-inlining.cc
@@ -37,7 +37,7 @@
}
int WasmInliner::GetCallCount(Node* call) {
- if (!env_->enabled_features.has_inlining()) return 0;
+ if (!v8_flags.wasm_speculative_inlining) return 0;
return mcgraph()->GetCallCount(call->id());
}
@@ -98,7 +98,7 @@
// If liftoff ran and collected call counts, only inline calls that have been
// invoked often, except for truly tiny functions.
- if (v8_flags.liftoff && env_->enabled_features.has_inlining() &&
+ if (v8_flags.liftoff && v8_flags.wasm_speculative_inlining &&
wire_byte_size >= 12 && call_count < min_count_for_inlining) {
Trace(call, inlinee_index, "not called often enough");
return NoChange();
diff --git a/src/execution/frames.cc b/src/execution/frames.cc
index 22a648f..1c30844 100644
--- a/src/execution/frames.cc
+++ b/src/execution/frames.cc
@@ -1294,7 +1294,7 @@
"WasmExitFrame has one slot more than WasmFrame");
int frame_header_size = WasmFrameConstants::kFixedFrameSizeFromFp;
- if (wasm_code->is_liftoff() && wasm_code->for_inlining()) {
+ if (wasm_code->is_liftoff() && v8_flags.wasm_speculative_inlining) {
// Frame has Wasm feedback slot.
frame_header_size += kSystemPointerSize;
}
diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc
index 02ea865..51129f21 100644
--- a/src/execution/isolate.cc
+++ b/src/execution/isolate.cc
@@ -2956,19 +2956,6 @@
#endif
}
-bool Isolate::IsWasmInliningEnabled(Handle<Context> context) {
- // If Wasm GC is explicitly enabled via a callback, also enable inlining.
-#ifdef V8_ENABLE_WEBASSEMBLY
- if (wasm_gc_enabled_callback()) {
- v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
- return wasm_gc_enabled_callback()(api_context);
- }
- return v8_flags.experimental_wasm_inlining;
-#else
- return false;
-#endif
-}
-
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptStackFrameIterator it(this);
diff --git a/src/execution/isolate.h b/src/execution/isolate.h
index 36a1eb0..b6746d7 100644
--- a/src/execution/isolate.h
+++ b/src/execution/isolate.h
@@ -770,7 +770,6 @@
bool IsWasmGCEnabled(Handle<Context> context);
bool IsWasmStringRefEnabled(Handle<Context> context);
- bool IsWasmInliningEnabled(Handle<Context> context);
THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h
index 0820ced..1e4fe5c 100644
--- a/src/flags/flag-definitions.h
+++ b/src/flags/flag-definitions.h
@@ -1224,10 +1224,6 @@
"enable final types as default for wasm-gc")
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining)
DEFINE_WEAK_IMPLICATION(experimental_wasm_gc, wasm_speculative_inlining)
-// For historical reasons, both --wasm-inlining and --wasm-speculative-inlining
-// are aliases for --experimental-wasm-inlining.
-DEFINE_IMPLICATION(wasm_inlining, experimental_wasm_inlining)
-DEFINE_IMPLICATION(wasm_speculative_inlining, experimental_wasm_inlining)
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc
index e87c677..6b70e86 100644
--- a/src/runtime/runtime-wasm.cc
+++ b/src/runtime/runtime-wasm.cc
@@ -265,9 +265,9 @@
RUNTIME_FUNCTION(Runtime_WasmAllocateFeedbackVector) {
ClearThreadInWasmScope wasm_flag(isolate);
+ DCHECK(v8_flags.wasm_speculative_inlining);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- DCHECK(wasm::WasmFeatures::FromIsolate(isolate).has_inlining());
Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(args[0]),
isolate);
int declared_func_index = args.smi_value_at(1);
@@ -283,11 +283,8 @@
const wasm::WasmModule* module = native_module->module();
int func_index = declared_func_index + module->num_imported_functions;
- int num_slots = native_module->enabled_features().has_inlining()
- ? NumFeedbackSlots(module, func_index)
- : 0;
- Handle<FixedArray> vector =
- isolate->factory()->NewFixedArrayWithZeroes(num_slots);
+ Handle<FixedArray> vector = isolate->factory()->NewFixedArrayWithZeroes(
+ NumFeedbackSlots(module, func_index));
DCHECK_EQ(instance->feedback_vectors().get(declared_func_index), Smi::zero());
instance->feedback_vectors().set(declared_func_index, *vector);
return *vector;
diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h
index c0feba5..68cdeb4 100644
--- a/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -483,14 +483,13 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index b53083e..b620a30 100644
--- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -289,15 +289,14 @@
}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector, and an unused
// slot for alignment.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size = std::max(frame_size - 2 * kSystemPointerSize, 0);
}
diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index c3ecfca..31432d6 100644
--- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -216,14 +216,13 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
DCHECK_EQ(0, frame_size % kSystemPointerSize);
diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h
index d6dec0a..2e17ee5 100644
--- a/src/wasm/baseline/liftoff-assembler.h
+++ b/src/wasm/baseline/liftoff-assembler.h
@@ -769,8 +769,7 @@
inline void PrepareTailCall(int num_callee_stack_params,
int stack_param_delta);
inline void AlignFrameSize();
- inline void PatchPrepareStackFrame(int offset, SafepointTableBuilder*,
- bool feedback_vector_slot);
+ inline void PatchPrepareStackFrame(int offset, SafepointTableBuilder*);
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc
index 9aefb40..bf61ebc 100644
--- a/src/wasm/baseline/liftoff-compiler.cc
+++ b/src/wasm/baseline/liftoff-compiler.cc
@@ -873,7 +873,7 @@
__ CodeEntry();
- if (decoder->enabled_.has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
CODE_COMMENT("frame setup");
int declared_func_index =
func_index_ - env_->module->num_imported_functions;
@@ -1079,8 +1079,7 @@
}
DCHECK_EQ(frame_size, __ GetTotalFrameSize());
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
- &safepoint_table_builder_,
- decoder->enabled_.has_inlining());
+ &safepoint_table_builder_);
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
// Emit the handler table.
@@ -1096,7 +1095,7 @@
DidAssemblerBailout(decoder);
DCHECK_EQ(num_exceptions_, 0);
- if (decoder->enabled_.has_inlining() &&
+ if (v8_flags.wasm_speculative_inlining &&
!encountered_call_instructions_.empty()) {
// Update the call targets stored in the WasmModule.
TypeFeedbackStorage& type_feedback = env_->module->type_feedback;
@@ -7433,7 +7432,7 @@
// One slot would be enough for call_direct, but would make index
// computations much more complicated.
size_t vector_slot = encountered_call_instructions_.size() * 2;
- if (decoder->enabled_.has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
encountered_call_instructions_.push_back(imm.index);
}
@@ -7476,7 +7475,7 @@
} else {
// Inlining direct calls isn't speculative, but existence of the
// feedback vector currently depends on this flag.
- if (decoder->enabled_.has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
LiftoffRegister vector = __ GetUnusedRegister(kGpReg, {});
__ Fill(vector, liftoff::kFeedbackVectorOffset, kIntPtrKind);
__ IncrementSmi(vector,
@@ -7589,7 +7588,7 @@
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
__ DropValues(1);
- if (decoder->enabled_.has_gc() &&
+ if (v8_flags.experimental_wasm_gc &&
!decoder->module_->types[imm.sig_imm.index].is_final) {
Label success_label;
FREEZE_STATE(frozen);
@@ -7747,7 +7746,7 @@
Register target_reg = no_reg, instance_reg = no_reg;
- if (decoder->enabled_.has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
LiftoffRegList pinned;
LiftoffRegister func_ref = pinned.set(__ PopToRegister(pinned));
LiftoffRegister vector = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -7774,7 +7773,7 @@
target_reg = LiftoffRegister(kReturnRegister0).gp();
instance_reg = LiftoffRegister(kReturnRegister1).gp();
- } else { // decoder->enabled_.has_inlining()
+ } else { // v8_flags.wasm_speculative_inlining
// Non-feedback-collecting version.
// Executing a write barrier needs temp registers; doing this on a
// conditional branch confuses the LiftoffAssembler's register management.
@@ -7829,7 +7828,7 @@
// is in {instance}.
target_reg = target.gp();
instance_reg = instance.gp();
- } // decoder->enabled_.has_inlining()
+ } // v8_flags.wasm_speculative_inlining
__ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
if (tail_call) {
diff --git a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
index 3a1ccd6..67cbbf0 100644
--- a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
+++ b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -209,14 +209,13 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 96cba24..bcf3ece 100644
--- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -327,14 +327,13 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 57120c4..47a8eaa 100644
--- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -111,13 +111,12 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
int frame_size =
GetTotalFrameSize() -
(V8_EMBEDDED_CONSTANT_POOL_BOOL ? 3 : 2) * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
index 52a024c..dda83e5 100644
--- a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
+++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
@@ -61,14 +61,13 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
// We can't run out of space, just pass anything big enough to not cause the
diff --git a/src/wasm/baseline/s390/liftoff-assembler-s390.h b/src/wasm/baseline/s390/liftoff-assembler-s390.h
index c24f548..740262d 100644
--- a/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -101,11 +101,10 @@
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d5106c0..b843e4a 100644
--- a/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -202,14 +202,13 @@
}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder,
- bool feedback_vector_slot) {
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (feedback_vector_slot) {
+ if (v8_flags.wasm_speculative_inlining) {
frame_size -= kSystemPointerSize;
}
DCHECK_EQ(0, frame_size % kSystemPointerSize);
diff --git a/src/wasm/function-compiler.cc b/src/wasm/function-compiler.cc
index c8d2088..67c69f9 100644
--- a/src/wasm/function-compiler.cc
+++ b/src/wasm/function-compiler.cc
@@ -136,10 +136,7 @@
.set_detected_features(detected)
.set_assembler_buffer_cache(buffer_cache)
.set_debug_sidetable(debug_sidetable_ptr));
- if (result.succeeded()) {
- result.for_inlining = env->enabled_features.has_inlining();
- break;
- }
+ if (result.succeeded()) break;
}
// If --liftoff-only, do not fall back to turbofan, even if compilation
diff --git a/src/wasm/function-compiler.h b/src/wasm/function-compiler.h
index a3d82b85..e6f38e5 100644
--- a/src/wasm/function-compiler.h
+++ b/src/wasm/function-compiler.h
@@ -58,7 +58,6 @@
ExecutionTier result_tier;
Kind kind = kFunction;
ForDebugging for_debugging = kNotForDebugging;
- bool for_inlining = false;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
diff --git a/src/wasm/graph-builder-interface.cc b/src/wasm/graph-builder-interface.cc
index 48b6e18..f4eaf43 100644
--- a/src/wasm/graph-builder-interface.cc
+++ b/src/wasm/graph-builder-interface.cc
@@ -264,8 +264,8 @@
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
- void FinishFunction(FullDecoder* decoder) {
- if (decoder->enabled_.has_inlining()) {
+ void FinishFunction(FullDecoder*) {
+ if (v8_flags.wasm_speculative_inlining) {
DCHECK_EQ(feedback_instruction_index_, type_feedback_.size());
}
if (inlined_status_ == kRegularFunction) {
@@ -726,7 +726,7 @@
void CallDirect(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[], Value returns[]) {
int maybe_call_count = -1;
- if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
+ if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
DCHECK_EQ(feedback.num_cases(), 1);
maybe_call_count = feedback.call_count(0);
@@ -738,7 +738,7 @@
void ReturnCall(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[]) {
int maybe_call_count = -1;
- if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
+ if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
DCHECK_EQ(feedback.num_cases(), 1);
maybe_call_count = feedback.call_count(0);
@@ -769,7 +769,7 @@
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
const CallSiteFeedback* feedback = nullptr;
- if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
+ if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
feedback = &next_call_feedback();
}
if (feedback == nullptr || feedback->num_cases() == 0) {
@@ -866,7 +866,7 @@
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
const CallSiteFeedback* feedback = nullptr;
- if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
+ if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
feedback = &next_call_feedback();
}
if (feedback == nullptr || feedback->num_cases() == 0) {
diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc
index cc924de..aa8dd56 100644
--- a/src/wasm/module-compiler.cc
+++ b/src/wasm/module-compiler.cc
@@ -1426,7 +1426,7 @@
// Before adding the tier-up unit or increasing priority, do process type
// feedback for best code generation.
- if (native_module->enabled_features().has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
// TODO(jkummerow): we could have collisions here if different instances
// of the same module have collected different feedback. If that ever
// becomes a problem, figure out a solution.
@@ -1438,10 +1438,10 @@
void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance,
int func_index) {
- NativeModule* native_module = instance.module_object().native_module();
- if (native_module->enabled_features().has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
TransitiveTypeFeedbackProcessor::Process(instance, func_index);
}
+ auto* native_module = instance.module_object().native_module();
wasm::GetWasmEngine()->CompileFunction(isolate->counters(), native_module,
func_index,
wasm::ExecutionTier::kTurbofan);
diff --git a/src/wasm/module-instantiate.cc b/src/wasm/module-instantiate.cc
index 06552ce..3a51774 100644
--- a/src/wasm/module-instantiate.cc
+++ b/src/wasm/module-instantiate.cc
@@ -737,7 +737,7 @@
//--------------------------------------------------------------------------
// Allocate the array that will hold type feedback vectors.
//--------------------------------------------------------------------------
- if (enabled_.has_inlining()) {
+ if (v8_flags.wasm_speculative_inlining) {
int num_functions = static_cast<int>(module_->num_declared_functions);
// Zero-fill the array so we can do a quick Smi-check to test if a given
// slot was initialized.
@@ -775,7 +775,7 @@
//--------------------------------------------------------------------------
// Initialize non-defaultable tables.
//--------------------------------------------------------------------------
- if (enabled_.has_typed_funcref()) {
+ if (v8_flags.experimental_wasm_typed_funcref) {
SetTableInitialValues(instance);
}
diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc
index 1bdb98f..5a1b03d 100644
--- a/src/wasm/wasm-code-manager.cc
+++ b/src/wasm/wasm-code-manager.cc
@@ -1064,11 +1064,10 @@
jump_table_ref =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
}
- bool for_inlining = false; // This path isn't used for Liftoff code.
- return AddCodeWithCodeSpace(
- index, desc, stack_slots, tagged_parameter_slots,
- protected_instructions_data, source_position_table, inlining_positions,
- kind, tier, for_debugging, for_inlining, code_space, jump_table_ref);
+ return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
+ protected_instructions_data,
+ source_position_table, inlining_positions, kind,
+ tier, for_debugging, code_space, jump_table_ref);
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
@@ -1077,7 +1076,7 @@
base::Vector<const byte> protected_instructions_data,
base::Vector<const byte> source_position_table,
base::Vector<const byte> inlining_positions, WasmCode::Kind kind,
- ExecutionTier tier, ForDebugging for_debugging, bool for_inlining,
+ ExecutionTier tier, ForDebugging for_debugging,
base::Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
base::Vector<byte> reloc_info{
desc.buffer + desc.buffer_size - desc.reloc_size,
@@ -1134,8 +1133,7 @@
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
- source_position_table, inlining_positions, kind, tier, for_debugging,
- for_inlining}};
+ source_position_table, inlining_positions, kind, tier, for_debugging}};
code->MaybePrint();
code->Validate();
@@ -2250,8 +2248,8 @@
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(),
result.inlining_positions.as_vector(), GetCodeKind(result),
- result.result_tier, result.for_debugging, result.for_inlining,
- this_code_space, jump_tables));
+ result.result_tier, result.for_debugging, this_code_space,
+ jump_tables));
}
DCHECK_EQ(0, code_space.size());
diff --git a/src/wasm/wasm-code-manager.h b/src/wasm/wasm-code-manager.h
index 7705dfd..cdf27d5 100644
--- a/src/wasm/wasm-code-manager.h
+++ b/src/wasm/wasm-code-manager.h
@@ -405,11 +405,6 @@
return ForDebuggingField::decode(flags_);
}
- // Returns {true} for Liftoff code that includes call count tracking for
- // later (in Turbofan) inlining purposes.
- // TODO(jkummerow): This can be dropped when we ship Wasm inlining.
- bool for_inlining() const { return ForInliningField::decode(flags_); }
-
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
private:
@@ -424,8 +419,7 @@
base::Vector<const byte> reloc_info,
base::Vector<const byte> source_position_table,
base::Vector<const byte> inlining_positions, Kind kind,
- ExecutionTier tier, ForDebugging for_debugging,
- bool for_inlining = false)
+ ExecutionTier tier, ForDebugging for_debugging)
: native_module_(native_module),
instructions_(instructions.begin()),
meta_data_(
@@ -445,8 +439,7 @@
code_comments_offset_(code_comments_offset),
unpadded_binary_size_(unpadded_binary_size),
flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
- ForDebuggingField::encode(for_debugging) |
- ForInliningField::encode(for_inlining)) {
+ ForDebuggingField::encode(for_debugging)) {
DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
DCHECK_LE(handler_table_offset, unpadded_binary_size);
DCHECK_LE(code_comments_offset, unpadded_binary_size);
@@ -514,7 +507,6 @@
using KindField = base::BitField8<Kind, 0, 2>;
using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
- using ForInliningField = ForDebuggingField::Next<bool, 1>;
// WasmCode is ref counted. Counters are held by:
// 1) The jump table / code table.
@@ -874,7 +866,7 @@
base::Vector<const byte> protected_instructions_data,
base::Vector<const byte> source_position_table,
base::Vector<const byte> inlining_positions, WasmCode::Kind kind,
- ExecutionTier tier, ForDebugging for_debugging, bool for_inlining,
+ ExecutionTier tier, ForDebugging for_debugging,
base::Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableLocked(int jump_table_size);
diff --git a/src/wasm/wasm-feature-flags.h b/src/wasm/wasm-feature-flags.h
index b1c0074..b016522 100644
--- a/src/wasm/wasm-feature-flags.h
+++ b/src/wasm/wasm-feature-flags.h
@@ -45,9 +45,6 @@
false) \
V(skip_bounds_checks, "skip array bounds checks (unsafe)", false) \
\
- /* Not user-visible, defined here so an OT can control it. */ \
- V(inlining, "enable wasm-into-wasm inlining", false) \
- \
/* Typed function references proposal. */ \
/* Official proposal: https://github.com/WebAssembly/function-references */ \
/* V8 side owner: manoskouk */ \
diff --git a/src/wasm/wasm-features.cc b/src/wasm/wasm-features.cc
index b11b0ae..793416d 100644
--- a/src/wasm/wasm-features.cc
+++ b/src/wasm/wasm-features.cc
@@ -33,18 +33,15 @@
WasmFeatures WasmFeatures::FromContext(Isolate* isolate,
Handle<Context> context) {
WasmFeatures features = WasmFeatures::FromFlags();
- if (isolate->IsWasmGCEnabled(context)) {
+ if (isolate->IsWasmGCEnabled(handle(isolate->context(), isolate))) {
features.Add(kFeature_gc);
// Also enable typed function references, since the commandline flag
// implication won't do that for us in this case.
features.Add(kFeature_typed_funcref);
}
- if (isolate->IsWasmStringRefEnabled(context)) {
+ if (isolate->IsWasmStringRefEnabled(handle(isolate->context(), isolate))) {
features.Add(kFeature_stringref);
}
- if (isolate->IsWasmInliningEnabled(context)) {
- features.Add(kFeature_inlining);
- }
// This space intentionally left blank for future Wasm origin trials.
return features;
}
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index ba12cf1..d206334 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -2949,8 +2949,6 @@
// TODO(7748): These built-ins should not be shipped with wasm GC.
// Either a new flag will be needed or the built-ins have to be deleted prior
// to shipping.
- // TODO(13810): We should install these later, when we can query the
- // isolate's wasm_gc_enabled_callback, to take the Origin Trial into account.
if (v8_flags.experimental_wasm_gc) {
SimpleInstallFunction(
isolate, webassembly, "experimentalConvertArrayToString",
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index 068764e..fc27ec2 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -666,6 +666,7 @@
}
int NumFeedbackSlots(const WasmModule* module, int func_index) {
+ if (!v8_flags.wasm_speculative_inlining) return 0;
// TODO(clemensb): Avoid the mutex once this ships, or at least switch to a
// shared mutex.
base::MutexGuard type_feedback_guard{&module->type_feedback.mutex};
diff --git a/src/wasm/wasm-subtyping.cc b/src/wasm/wasm-subtyping.cc
index 5a2b8f6..4e69f52 100644
--- a/src/wasm/wasm-subtyping.cc
+++ b/src/wasm/wasm-subtyping.cc
@@ -218,8 +218,9 @@
return super_heap == sub_heap || super_heap == HeapType::kEq ||
super_heap == HeapType::kAny;
case HeapType::kString:
- // stringref is a subtype of anyref.
- return sub_heap == super_heap || super_heap == HeapType::kAny;
+ // stringref is a subtype of anyref under wasm-gc.
+ return sub_heap == super_heap ||
+ (v8_flags.experimental_wasm_gc && super_heap == HeapType::kAny);
case HeapType::kStringViewWtf8:
case HeapType::kStringViewWtf16:
case HeapType::kStringViewIter:
diff --git a/test/cctest/wasm/test-streaming-compilation.cc b/test/cctest/wasm/test-streaming-compilation.cc
index 2825d96..b81ccf2 100644
--- a/test/cctest/wasm/test-streaming-compilation.cc
+++ b/test/cctest/wasm/test-streaming-compilation.cc
@@ -195,9 +195,8 @@
Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- WasmFeatures features = WasmFeatures::FromIsolate(i_isolate);
stream_ = GetWasmEngine()->StartStreamingCompilation(
- i_isolate, features, v8::Utils::OpenHandle(*context),
+ i_isolate, WasmFeatures::All(), v8::Utils::OpenHandle(*context),
"WebAssembly.compileStreaming()",
std::make_shared<TestResolver>(i_isolate, &state_, &error_message_,
&module_object_));