mirror of
https://github.com/zebrajr/node.git
synced 2026-01-15 12:15:26 +00:00
deps: patch V8 to 7.4.288.21
Refs: https://github.com/v8/v8/compare/7.4.288.18...7.4.288.21 PR-URL: https://github.com/nodejs/node/pull/27265 Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de> Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
This commit is contained in:
2
deps/v8/include/v8-version.h
vendored
2
deps/v8/include/v8-version.h
vendored
@@ -11,7 +11,7 @@
|
||||
#define V8_MAJOR_VERSION 7
|
||||
#define V8_MINOR_VERSION 4
|
||||
#define V8_BUILD_NUMBER 288
|
||||
#define V8_PATCH_LEVEL 18
|
||||
#define V8_PATCH_LEVEL 21
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
||||
23
deps/v8/src/arm/macro-assembler-arm.cc
vendored
23
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@@ -332,7 +332,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
|
||||
if (options().isolate_independent_code) {
|
||||
DCHECK(root_array_available());
|
||||
Label if_code_is_builtin, out;
|
||||
Label if_code_is_off_heap, out;
|
||||
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
@@ -340,23 +340,22 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
DCHECK(!AreAliased(destination, scratch));
|
||||
DCHECK(!AreAliased(code_object, scratch));
|
||||
|
||||
// Check whether the Code object is a builtin. If so, call its (off-heap)
|
||||
// entry point directly without going through the (on-heap) trampoline.
|
||||
// Otherwise, just call the Code object as always.
|
||||
// Check whether the Code object is an off-heap trampoline. If so, call its
|
||||
// (off-heap) entry point directly without going through the (on-heap)
|
||||
// trampoline. Otherwise, just call the Code object as always.
|
||||
ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
|
||||
tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
|
||||
b(ne, &if_code_is_off_heap);
|
||||
|
||||
ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
cmp(scratch, Operand(Builtins::kNoBuiltinId));
|
||||
b(ne, &if_code_is_builtin);
|
||||
|
||||
// A non-builtin Code object, the entry point is at
|
||||
// Not an off-heap trampoline, the entry point is at
|
||||
// Code::raw_instruction_start().
|
||||
add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
jmp(&out);
|
||||
|
||||
// A builtin Code object, the entry point is loaded from the builtin entry
|
||||
// An off-heap trampoline, the entry point is loaded from the builtin entry
|
||||
// table.
|
||||
// The builtin index is loaded in scratch.
|
||||
bind(&if_code_is_builtin);
|
||||
bind(&if_code_is_off_heap);
|
||||
ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
|
||||
add(destination, destination, kRootRegister);
|
||||
ldr(destination,
|
||||
|
||||
22
deps/v8/src/arm64/macro-assembler-arm64.cc
vendored
22
deps/v8/src/arm64/macro-assembler-arm64.cc
vendored
@@ -2054,7 +2054,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
|
||||
if (options().isolate_independent_code) {
|
||||
DCHECK(root_array_available());
|
||||
Label if_code_is_builtin, out;
|
||||
Label if_code_is_off_heap, out;
|
||||
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireX();
|
||||
@@ -2062,23 +2062,23 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
DCHECK(!AreAliased(destination, scratch));
|
||||
DCHECK(!AreAliased(code_object, scratch));
|
||||
|
||||
// Check whether the Code object is a builtin. If so, call its (off-heap)
|
||||
// entry point directly without going through the (on-heap) trampoline.
|
||||
// Otherwise, just call the Code object as always.
|
||||
// Check whether the Code object is an off-heap trampoline. If so, call its
|
||||
// (off-heap) entry point directly without going through the (on-heap)
|
||||
// trampoline. Otherwise, just call the Code object as always.
|
||||
|
||||
Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
Cmp(scratch, Operand(Builtins::kNoBuiltinId));
|
||||
B(ne, &if_code_is_builtin);
|
||||
Ldrsw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
|
||||
Tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
|
||||
B(ne, &if_code_is_off_heap);
|
||||
|
||||
// A non-builtin Code object, the entry point is at
|
||||
// Not an off-heap trampoline object, the entry point is at
|
||||
// Code::raw_instruction_start().
|
||||
Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
|
||||
B(&out);
|
||||
|
||||
// A builtin Code object, the entry point is loaded from the builtin entry
|
||||
// An off-heap trampoline, the entry point is loaded from the builtin entry
|
||||
// table.
|
||||
// The builtin index is loaded in scratch.
|
||||
bind(&if_code_is_builtin);
|
||||
bind(&if_code_is_off_heap);
|
||||
Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
Lsl(destination, scratch, kSystemPointerSizeLog2);
|
||||
Add(destination, destination, kRootRegister);
|
||||
Ldr(destination,
|
||||
|
||||
20
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
20
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
@@ -1905,24 +1905,24 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
|
||||
if (options().isolate_independent_code) {
|
||||
DCHECK(root_array_available());
|
||||
Label if_code_is_builtin, out;
|
||||
Label if_code_is_off_heap, out;
|
||||
|
||||
// Check whether the Code object is a builtin. If so, call its (off-heap)
|
||||
// entry point directly without going through the (on-heap) trampoline.
|
||||
// Otherwise, just call the Code object as always.
|
||||
cmp(FieldOperand(code_object, Code::kBuiltinIndexOffset),
|
||||
Immediate(Builtins::kNoBuiltinId));
|
||||
j(not_equal, &if_code_is_builtin);
|
||||
// Check whether the Code object is an off-heap trampoline. If so, call its
|
||||
// (off-heap) entry point directly without going through the (on-heap)
|
||||
// trampoline. Otherwise, just call the Code object as always.
|
||||
test(FieldOperand(code_object, Code::kFlagsOffset),
|
||||
Immediate(Code::IsOffHeapTrampoline::kMask));
|
||||
j(not_equal, &if_code_is_off_heap);
|
||||
|
||||
// A non-builtin Code object, the entry point is at
|
||||
// Not an off-heap trampoline, the entry point is at
|
||||
// Code::raw_instruction_start().
|
||||
Move(destination, code_object);
|
||||
add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
jmp(&out);
|
||||
|
||||
// A builtin Code object, the entry point is loaded from the builtin entry
|
||||
// An off-heap trampoline, the entry point is loaded from the builtin entry
|
||||
// table.
|
||||
bind(&if_code_is_builtin);
|
||||
bind(&if_code_is_off_heap);
|
||||
mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
mov(destination,
|
||||
Operand(kRootRegister, destination, times_system_pointer_size,
|
||||
|
||||
91
deps/v8/src/wasm/module-compiler.cc
vendored
91
deps/v8/src/wasm/module-compiler.cc
vendored
@@ -129,7 +129,8 @@ class CompilationStateImpl {
|
||||
void SetNumberOfFunctionsToCompile(int num_functions);
|
||||
|
||||
// Add the callback function to be called on compilation events. Needs to be
|
||||
// set before {AddCompilationUnits} is run.
|
||||
// set before {AddCompilationUnits} is run to ensure that it receives all
|
||||
// events. The callback object must support being deleted from any thread.
|
||||
void AddCallback(CompilationState::callback_t);
|
||||
|
||||
// Inserts new functions to compile and kicks off compilation.
|
||||
@@ -153,7 +154,7 @@ class CompilationStateImpl {
|
||||
}
|
||||
|
||||
bool baseline_compilation_finished() const {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
base::MutexGuard guard(&callbacks_mutex_);
|
||||
return outstanding_baseline_units_ == 0 ||
|
||||
(compile_mode_ == CompileMode::kTiering &&
|
||||
outstanding_tiering_units_ == 0);
|
||||
@@ -203,8 +204,6 @@ class CompilationStateImpl {
|
||||
: func_index(func_index), error(std::move(error)) {}
|
||||
};
|
||||
|
||||
void NotifyOnEvent(CompilationEvent event);
|
||||
|
||||
NativeModule* const native_module_;
|
||||
const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
|
||||
const CompileMode compile_mode_;
|
||||
@@ -236,16 +235,26 @@ class CompilationStateImpl {
|
||||
// compiling.
|
||||
std::shared_ptr<WireBytesStorage> wire_bytes_storage_;
|
||||
|
||||
int outstanding_baseline_units_ = 0;
|
||||
int outstanding_tiering_units_ = 0;
|
||||
|
||||
// End of fields protected by {mutex_}.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Callback functions to be called on compilation events. Only accessible from
|
||||
// the foreground thread.
|
||||
// This mutex protects the callbacks vector, and the counters used to
|
||||
// determine which callbacks to call. The counters plus the callbacks
|
||||
// themselves need to be synchronized to ensure correct order of events.
|
||||
mutable base::Mutex callbacks_mutex_;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Protected by {callbacks_mutex_}:
|
||||
|
||||
// Callback functions to be called on compilation events.
|
||||
std::vector<CompilationState::callback_t> callbacks_;
|
||||
|
||||
int outstanding_baseline_units_ = 0;
|
||||
int outstanding_tiering_units_ = 0;
|
||||
|
||||
// End of fields protected by {callbacks_mutex_}.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const int max_background_tasks_ = 0;
|
||||
};
|
||||
|
||||
@@ -852,6 +861,7 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
|
||||
}
|
||||
|
||||
AsyncCompileJob::~AsyncCompileJob() {
|
||||
// Note: This destructor always runs on the foreground thread of the isolate.
|
||||
background_task_manager_.CancelAndWait();
|
||||
// If the runtime objects were not created yet, then initial compilation did
|
||||
// not finish yet. In this case we can abort compilation.
|
||||
@@ -1473,12 +1483,13 @@ CompilationStateImpl::~CompilationStateImpl() {
|
||||
void CompilationStateImpl::AbortCompilation() {
|
||||
background_compile_token_->Cancel();
|
||||
// No more callbacks after abort.
|
||||
base::MutexGuard callbacks_guard(&callbacks_mutex_);
|
||||
callbacks_.clear();
|
||||
}
|
||||
|
||||
void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
|
||||
DCHECK(!failed());
|
||||
base::MutexGuard guard(&mutex_);
|
||||
base::MutexGuard guard(&callbacks_mutex_);
|
||||
outstanding_baseline_units_ = num_functions;
|
||||
|
||||
if (compile_mode_ == CompileMode::kTiering) {
|
||||
@@ -1487,6 +1498,7 @@ void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
|
||||
}
|
||||
|
||||
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
|
||||
base::MutexGuard callbacks_guard(&callbacks_mutex_);
|
||||
callbacks_.emplace_back(std::move(callback));
|
||||
}
|
||||
|
||||
@@ -1536,7 +1548,7 @@ CompilationStateImpl::GetNextCompilationUnit() {
|
||||
|
||||
void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
|
||||
// This mutex guarantees that events happen in the right order.
|
||||
base::MutexGuard guard(&mutex_);
|
||||
base::MutexGuard guard(&callbacks_mutex_);
|
||||
|
||||
// If we are *not* compiling in tiering mode, then all units are counted as
|
||||
// baseline units.
|
||||
@@ -1547,28 +1559,36 @@ void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
|
||||
// tiering units.
|
||||
DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
|
||||
|
||||
bool baseline_finished = false;
|
||||
bool tiering_finished = false;
|
||||
if (is_tiering_unit) {
|
||||
DCHECK_LT(0, outstanding_tiering_units_);
|
||||
--outstanding_tiering_units_;
|
||||
if (outstanding_tiering_units_ == 0) {
|
||||
// If baseline compilation has not finished yet, then also trigger
|
||||
// {kFinishedBaselineCompilation}.
|
||||
if (outstanding_baseline_units_ > 0) {
|
||||
NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation);
|
||||
}
|
||||
NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation);
|
||||
}
|
||||
tiering_finished = outstanding_tiering_units_ == 0;
|
||||
// If baseline compilation has not finished yet, then also trigger
|
||||
// {kFinishedBaselineCompilation}.
|
||||
baseline_finished = tiering_finished && outstanding_baseline_units_ > 0;
|
||||
} else {
|
||||
DCHECK_LT(0, outstanding_baseline_units_);
|
||||
--outstanding_baseline_units_;
|
||||
if (outstanding_baseline_units_ == 0) {
|
||||
NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation);
|
||||
// If we are not tiering, then we also trigger the "top tier finished"
|
||||
// event when baseline compilation is finished.
|
||||
if (!is_tiering_mode) {
|
||||
NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation);
|
||||
}
|
||||
}
|
||||
// If we are in tiering mode and tiering finished before, then do not
|
||||
// trigger baseline finished.
|
||||
baseline_finished = outstanding_baseline_units_ == 0 &&
|
||||
(!is_tiering_mode || outstanding_tiering_units_ > 0);
|
||||
// If we are not tiering, then we also trigger the "top tier finished"
|
||||
// event when baseline compilation is finished.
|
||||
tiering_finished = baseline_finished && !is_tiering_mode;
|
||||
}
|
||||
|
||||
if (baseline_finished) {
|
||||
for (auto& callback : callbacks_)
|
||||
callback(CompilationEvent::kFinishedBaselineCompilation);
|
||||
}
|
||||
if (tiering_finished) {
|
||||
for (auto& callback : callbacks_)
|
||||
callback(CompilationEvent::kFinishedTopTierCompilation);
|
||||
// Clear the callbacks because no more events will be delivered.
|
||||
callbacks_.clear();
|
||||
}
|
||||
|
||||
if (code != nullptr) native_module_->engine()->LogCode(code);
|
||||
@@ -1648,17 +1668,12 @@ void CompilationStateImpl::SetError(uint32_t func_index,
|
||||
if (!set) return;
|
||||
// If set successfully, give up ownership.
|
||||
compile_error.release();
|
||||
// Schedule a foreground task to call the callback and notify users about the
|
||||
// compile error.
|
||||
NotifyOnEvent(CompilationEvent::kFailedCompilation);
|
||||
}
|
||||
|
||||
void CompilationStateImpl::NotifyOnEvent(CompilationEvent event) {
|
||||
for (auto& callback : callbacks_) callback(event);
|
||||
// If no more events are expected after this one, clear the callbacks to free
|
||||
// memory. We can safely do this here, as this method is only called from
|
||||
// foreground tasks.
|
||||
if (event >= CompilationEvent::kFirstFinalEvent) callbacks_.clear();
|
||||
base::MutexGuard callbacks_guard(&callbacks_mutex_);
|
||||
for (auto& callback : callbacks_) {
|
||||
callback(CompilationEvent::kFailedCompilation);
|
||||
}
|
||||
// No more callbacks after an error.
|
||||
callbacks_.clear();
|
||||
}
|
||||
|
||||
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
|
||||
|
||||
20
deps/v8/src/x64/macro-assembler-x64.cc
vendored
20
deps/v8/src/x64/macro-assembler-x64.cc
vendored
@@ -1588,24 +1588,24 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
|
||||
if (options().isolate_independent_code) {
|
||||
DCHECK(root_array_available());
|
||||
Label if_code_is_builtin, out;
|
||||
Label if_code_is_off_heap, out;
|
||||
|
||||
// Check whether the Code object is a builtin. If so, call its (off-heap)
|
||||
// entry point directly without going through the (on-heap) trampoline.
|
||||
// Otherwise, just call the Code object as always.
|
||||
cmpl(FieldOperand(code_object, Code::kBuiltinIndexOffset),
|
||||
Immediate(Builtins::kNoBuiltinId));
|
||||
j(not_equal, &if_code_is_builtin);
|
||||
// Check whether the Code object is an off-heap trampoline. If so, call its
|
||||
// (off-heap) entry point directly without going through the (on-heap)
|
||||
// trampoline. Otherwise, just call the Code object as always.
|
||||
testl(FieldOperand(code_object, Code::kFlagsOffset),
|
||||
Immediate(Code::IsOffHeapTrampoline::kMask));
|
||||
j(not_equal, &if_code_is_off_heap);
|
||||
|
||||
// A non-builtin Code object, the entry point is at
|
||||
// Not an off-heap trampoline, the entry point is at
|
||||
// Code::raw_instruction_start().
|
||||
Move(destination, code_object);
|
||||
addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
jmp(&out);
|
||||
|
||||
// A builtin Code object, the entry point is loaded from the builtin entry
|
||||
// An off-heap trampoline, the entry point is loaded from the builtin entry
|
||||
// table.
|
||||
bind(&if_code_is_builtin);
|
||||
bind(&if_code_is_off_heap);
|
||||
movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
movq(destination,
|
||||
Operand(kRootRegister, destination, times_system_pointer_size,
|
||||
|
||||
2
deps/v8/src/zone/accounting-allocator.cc
vendored
2
deps/v8/src/zone/accounting-allocator.cc
vendored
@@ -24,7 +24,7 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
|
||||
if (memory == nullptr) return nullptr;
|
||||
|
||||
size_t current =
|
||||
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed);
|
||||
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed) + bytes;
|
||||
size_t max = max_memory_usage_.load(std::memory_order_relaxed);
|
||||
while (current > max && !max_memory_usage_.compare_exchange_weak(
|
||||
max, current, std::memory_order_relaxed)) {
|
||||
|
||||
4
deps/v8/test/cctest/cctest.status
vendored
4
deps/v8/test/cctest/cctest.status
vendored
@@ -615,6 +615,10 @@
|
||||
'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchThrow': [SKIP],
|
||||
'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchTrapTypeError': [SKIP],
|
||||
|
||||
# --interpreted-frames-native-stack tests
|
||||
'test-log/ExternalCodeEventListenerWithInterpretedFramesNativeStack': [SKIP],
|
||||
'test-log/LogInterpretedFramesNativeStack': [SKIP],
|
||||
|
||||
# Crashes on native arm.
|
||||
'test-macro-assembler-arm/ExtractLane': [PASS, ['arch == arm and not simulator_run', SKIP]],
|
||||
'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [PASS, ['arch == arm and not simulator_run', SKIP]],
|
||||
|
||||
28
deps/v8/test/cctest/test-allocation.cc
vendored
28
deps/v8/test/cctest/test-allocation.cc
vendored
@@ -105,6 +105,34 @@ TEST(AccountingAllocatorOOM) {
|
||||
CHECK_EQ(result == nullptr, platform.oom_callback_called);
|
||||
}
|
||||
|
||||
TEST(AccountingAllocatorCurrentAndMax) {
|
||||
AllocationPlatform platform;
|
||||
v8::internal::AccountingAllocator allocator;
|
||||
static constexpr size_t kAllocationSizes[] = {51, 231, 27};
|
||||
std::vector<v8::internal::Segment*> segments;
|
||||
CHECK_EQ(0, allocator.GetCurrentMemoryUsage());
|
||||
CHECK_EQ(0, allocator.GetMaxMemoryUsage());
|
||||
size_t expected_current = 0;
|
||||
size_t expected_max = 0;
|
||||
for (size_t size : kAllocationSizes) {
|
||||
segments.push_back(allocator.AllocateSegment(size));
|
||||
CHECK_NOT_NULL(segments.back());
|
||||
CHECK_EQ(size, segments.back()->total_size());
|
||||
expected_current += size;
|
||||
if (expected_current > expected_max) expected_max = expected_current;
|
||||
CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage());
|
||||
CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage());
|
||||
}
|
||||
for (auto* segment : segments) {
|
||||
expected_current -= segment->total_size();
|
||||
allocator.ReturnSegment(segment);
|
||||
CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage());
|
||||
}
|
||||
CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage());
|
||||
CHECK_EQ(0, allocator.GetCurrentMemoryUsage());
|
||||
CHECK(!platform.oom_callback_called);
|
||||
}
|
||||
|
||||
TEST(MallocedOperatorNewOOM) {
|
||||
AllocationPlatform platform;
|
||||
CHECK(!platform.oom_callback_called);
|
||||
|
||||
Reference in New Issue
Block a user