blob: 0e1c45c1c3c505e35cec45416a7a22ac487781e2 [file] [log] [blame]
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/bytecode-generator.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/logging/local-logger.h"
#include "src/logging/log.h"
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/objects/template-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
namespace interpreter {
// Scoped class tracking context objects created by the visitor. Represents
// mutations of the context chain within the function body, allowing pushing and
// popping of the current {context_register} during visitation.
class BytecodeGenerator::ContextScope {
public:
ContextScope(BytecodeGenerator* generator, Scope* scope)
: generator_(generator),
scope_(scope),
outer_(generator_->execution_context()),
register_(Register::current_context()),
depth_(0) {
DCHECK(scope->NeedsContext() || outer_ == nullptr);
if (outer_) {
depth_ = outer_->depth_ + 1;
// Push the outer context into a new context register.
Register outer_context_reg =
generator_->register_allocator()->NewRegister();
outer_->set_register(outer_context_reg);
generator_->builder()->PushContext(outer_context_reg);
}
generator_->set_execution_context(this);
}
~ContextScope() {
if (outer_) {
DCHECK_EQ(register_.index(), Register::current_context().index());
generator_->builder()->PopContext(outer_->reg());
outer_->set_register(register_);
}
generator_->set_execution_context(outer_);
}
// Returns the depth of the given |scope| for the current execution context.
int ContextChainDepth(Scope* scope) {
return scope_->ContextChainLength(scope);
}
// Returns the execution context at |depth| in the current context chain if it
// is a function local execution context, otherwise returns nullptr.
ContextScope* Previous(int depth) {
if (depth > depth_) {
return nullptr;
}
ContextScope* previous = this;
for (int i = depth; i > 0; --i) {
previous = previous->outer_;
}
return previous;
}
Register reg() const { return register_; }
private:
const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
void set_register(Register reg) { register_ = reg; }
BytecodeGenerator* generator_;
Scope* scope_;
ContextScope* outer_;
Register register_;
int depth_;
};
// Scoped class for tracking control statements entered by the
// visitor. The pattern derives AstGraphBuilder::ControlScope.
class BytecodeGenerator::ControlScope {
public:
explicit ControlScope(BytecodeGenerator* generator)
: generator_(generator),
outer_(generator->execution_control()),
context_(generator->execution_context()) {
generator_->set_execution_control(this);
}
virtual ~ControlScope() { generator_->set_execution_control(outer()); }
ControlScope(const ControlScope&) = delete;
ControlScope& operator=(const ControlScope&) = delete;
void Break(Statement* stmt) {
PerformCommand(CMD_BREAK, stmt, kNoSourcePosition);
}
void Continue(Statement* stmt) {
PerformCommand(CMD_CONTINUE, stmt, kNoSourcePosition);
}
void ReturnAccumulator(int source_position = kNoSourcePosition) {
PerformCommand(CMD_RETURN, nullptr, source_position);
}
void AsyncReturnAccumulator(int source_position = kNoSourcePosition) {
PerformCommand(CMD_ASYNC_RETURN, nullptr, source_position);
}
class DeferredCommands;
protected:
enum Command {
CMD_BREAK,
CMD_CONTINUE,
CMD_RETURN,
CMD_ASYNC_RETURN,
CMD_RETHROW
};
static constexpr bool CommandUsesAccumulator(Command command) {
return command != CMD_BREAK && command != CMD_CONTINUE;
}
void PerformCommand(Command command, Statement* statement,
int source_position);
virtual bool Execute(Command command, Statement* statement,
int source_position) = 0;
// Helper to pop the context chain to a depth expected by this control scope.
// Note that it is the responsibility of each individual {Execute} method to
// trigger this when commands are handled and control-flow continues locally.
void PopContextToExpectedDepth();
BytecodeGenerator* generator() const { return generator_; }
ControlScope* outer() const { return outer_; }
ContextScope* context() const { return context_; }
private:
BytecodeGenerator* generator_;
ControlScope* outer_;
ContextScope* context_;
};
// Helper class for a try-finally control scope. It can record intercepted
// control-flow commands that cause entry into a finally-block, and re-apply
// them after again leaving that block. Special tokens are used to identify
// paths going through the finally-block to dispatch after leaving the block.
class BytecodeGenerator::ControlScope::DeferredCommands final {
public:
// Fixed value tokens for paths we know we need.
// Fallthrough is set to -1 to make it the fallthrough case of the jump table,
// where the remaining cases start at 0.
static const int kFallthroughToken = -1;
// TODO(leszeks): Rethrow being 0 makes it use up a valuable LdaZero, which
// means that other commands (such as break or return) have to use LdaSmi.
// This can very slightly bloat bytecode, so perhaps token values should all
// be shifted down by 1.
static const int kRethrowToken = 0;
DeferredCommands(BytecodeGenerator* generator, Register token_register,
Register result_register)
: generator_(generator),
deferred_(generator->zone()),
token_register_(token_register),
result_register_(result_register),
return_token_(-1),
async_return_token_(-1) {
// There's always a rethrow path.
// TODO(leszeks): We could decouple deferred_ index and token to allow us
// to still push this lazily.
STATIC_ASSERT(kRethrowToken == 0);
deferred_.push_back({CMD_RETHROW, nullptr, kRethrowToken});
}
// One recorded control-flow command.
struct Entry {
Command command; // The command type being applied on this path.
Statement* statement; // The target statement for the command or {nullptr}.
int token; // A token identifying this particular path.
};
// Records a control-flow command while entering the finally-block. This also
// generates a new dispatch token that identifies one particular path. This
// expects the result to be in the accumulator.
void RecordCommand(Command command, Statement* statement) {
int token = GetTokenForCommand(command, statement);
DCHECK_LT(token, deferred_.size());
DCHECK_EQ(deferred_[token].command, command);
DCHECK_EQ(deferred_[token].statement, statement);
DCHECK_EQ(deferred_[token].token, token);
if (CommandUsesAccumulator(command)) {
builder()->StoreAccumulatorInRegister(result_register_);
}
builder()->LoadLiteral(Smi::FromInt(token));
builder()->StoreAccumulatorInRegister(token_register_);
if (!CommandUsesAccumulator(command)) {
// If we're not saving the accumulator in the result register, shove a
// harmless value there instead so that it is still considered "killed" in
// the liveness analysis. Normally we would LdaUndefined first, but the
// Smi token value is just as good, and by reusing it we save a bytecode.
builder()->StoreAccumulatorInRegister(result_register_);
}
}
// Records the dispatch token to be used to identify the re-throw path when
// the finally-block has been entered through the exception handler. This
// expects the exception to be in the accumulator.
void RecordHandlerReThrowPath() {
// The accumulator contains the exception object.
RecordCommand(CMD_RETHROW, nullptr);
}
// Records the dispatch token to be used to identify the implicit fall-through
// path at the end of a try-block into the corresponding finally-block.
void RecordFallThroughPath() {
builder()->LoadLiteral(Smi::FromInt(kFallthroughToken));
builder()->StoreAccumulatorInRegister(token_register_);
// Since we're not saving the accumulator in the result register, shove a
// harmless value there instead so that it is still considered "killed" in
// the liveness analysis. Normally we would LdaUndefined first, but the Smi
// token value is just as good, and by reusing it we save a bytecode.
builder()->StoreAccumulatorInRegister(result_register_);
}
// Applies all recorded control-flow commands after the finally-block again.
// This generates a dynamic dispatch on the token from the entry point.
void ApplyDeferredCommands() {
if (deferred_.size() == 0) return;
BytecodeLabel fall_through;
if (deferred_.size() == 1) {
// For a single entry, just jump to the fallthrough if we don't match the
// entry token.
const Entry& entry = deferred_[0];
builder()
->LoadLiteral(Smi::FromInt(entry.token))
.CompareReference(token_register_)
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &fall_through);
if (CommandUsesAccumulator(entry.command)) {
builder()->LoadAccumulatorWithRegister(result_register_);
}
execution_control()->PerformCommand(entry.command, entry.statement,
kNoSourcePosition);
} else {
// For multiple entries, build a jump table and switch on the token,
// jumping to the fallthrough if none of them match.
BytecodeJumpTable* jump_table =
builder()->AllocateJumpTable(static_cast<int>(deferred_.size()), 0);
builder()
->LoadAccumulatorWithRegister(token_register_)
.SwitchOnSmiNoFeedback(jump_table)
.Jump(&fall_through);
for (const Entry& entry : deferred_) {
builder()->Bind(jump_table, entry.token);
if (CommandUsesAccumulator(entry.command)) {
builder()->LoadAccumulatorWithRegister(result_register_);
}
execution_control()->PerformCommand(entry.command, entry.statement,
kNoSourcePosition);
}
}
builder()->Bind(&fall_through);
}
BytecodeArrayBuilder* builder() { return generator_->builder(); }
ControlScope* execution_control() { return generator_->execution_control(); }
private:
int GetTokenForCommand(Command command, Statement* statement) {
switch (command) {
case CMD_RETURN:
return GetReturnToken();
case CMD_ASYNC_RETURN:
return GetAsyncReturnToken();
case CMD_RETHROW:
return kRethrowToken;
default:
// TODO(leszeks): We could also search for entries with the same
// command and statement.
return GetNewTokenForCommand(command, statement);
}
}
int GetReturnToken() {
if (return_token_ == -1) {
return_token_ = GetNewTokenForCommand(CMD_RETURN, nullptr);
}
return return_token_;
}
int GetAsyncReturnToken() {
if (async_return_token_ == -1) {
async_return_token_ = GetNewTokenForCommand(CMD_ASYNC_RETURN, nullptr);
}
return async_return_token_;
}
int GetNewTokenForCommand(Command command, Statement* statement) {
int token = static_cast<int>(deferred_.size());
deferred_.push_back({command, statement, token});
return token;
}
BytecodeGenerator* generator_;
ZoneVector<Entry> deferred_;
Register token_register_;
Register result_register_;
// Tokens for commands that don't need a statement.
int return_token_;
int async_return_token_;
};
// Scoped class for dealing with control flow reaching the function level.
class BytecodeGenerator::ControlScopeForTopLevel final
: public BytecodeGenerator::ControlScope {
public:
explicit ControlScopeForTopLevel(BytecodeGenerator* generator)
: ControlScope(generator) {}
protected:
bool Execute(Command command, Statement* statement,
int source_position) override {
switch (command) {
case CMD_BREAK: // We should never see break/continue in top-level.
case CMD_CONTINUE:
UNREACHABLE();
case CMD_RETURN:
// No need to pop contexts, execution leaves the method body.
generator()->BuildReturn(source_position);
return true;
case CMD_ASYNC_RETURN:
// No need to pop contexts, execution leaves the method body.
generator()->BuildAsyncReturn(source_position);
return true;
case CMD_RETHROW:
// No need to pop contexts, execution leaves the method body.
generator()->BuildReThrow();
return true;
}
return false;
}
};
// Scoped class for enabling break inside blocks and switch blocks.
class BytecodeGenerator::ControlScopeForBreakable final
: public BytecodeGenerator::ControlScope {
public:
ControlScopeForBreakable(BytecodeGenerator* generator,
BreakableStatement* statement,
BreakableControlFlowBuilder* control_builder)
: ControlScope(generator),
statement_(statement),
control_builder_(control_builder) {}
protected:
bool Execute(Command command, Statement* statement,
int source_position) override {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
PopContextToExpectedDepth();
control_builder_->Break();
return true;
case CMD_CONTINUE:
case CMD_RETURN:
case CMD_ASYNC_RETURN:
case CMD_RETHROW:
break;
}
return false;
}
private:
Statement* statement_;
BreakableControlFlowBuilder* control_builder_;
};
// Scoped class for enabling 'break' and 'continue' in iteration
// constructs, e.g. do...while, while..., for...
class BytecodeGenerator::ControlScopeForIteration final
: public BytecodeGenerator::ControlScope {
public:
ControlScopeForIteration(BytecodeGenerator* generator,
IterationStatement* statement,
LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
loop_builder_(loop_builder) {}
protected:
bool Execute(Command command, Statement* statement,
int source_position) override {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
PopContextToExpectedDepth();
loop_builder_->Break();
return true;
case CMD_CONTINUE:
PopContextToExpectedDepth();
loop_builder_->Continue();
return true;
case CMD_RETURN:
case CMD_ASYNC_RETURN:
case CMD_RETHROW:
break;
}
return false;
}
private:
Statement* statement_;
LoopBuilder* loop_builder_;
};
// Scoped class for enabling 'throw' in try-catch constructs.
class BytecodeGenerator::ControlScopeForTryCatch final
: public BytecodeGenerator::ControlScope {
public:
ControlScopeForTryCatch(BytecodeGenerator* generator,
TryCatchBuilder* try_catch_builder)
: ControlScope(generator) {}
protected:
bool Execute(Command command, Statement* statement,
int source_position) override {
switch (command) {
case CMD_BREAK:
case CMD_CONTINUE:
case CMD_RETURN:
case CMD_ASYNC_RETURN:
break;
case CMD_RETHROW:
// No need to pop contexts, execution re-enters the method body via the
// stack unwinding mechanism which itself restores contexts correctly.
generator()->BuildReThrow();
return true;
}
return false;
}
};
// Scoped class for enabling control flow through try-finally constructs.
class BytecodeGenerator::ControlScopeForTryFinally final
: public BytecodeGenerator::ControlScope {
public:
ControlScopeForTryFinally(BytecodeGenerator* generator,
TryFinallyBuilder* try_finally_builder,
DeferredCommands* commands)
: ControlScope(generator),
try_finally_builder_(try_finally_builder),
commands_(commands) {}
protected:
bool Execute(Command command, Statement* statement,
int source_position) override {
switch (command) {
case CMD_BREAK:
case CMD_CONTINUE:
case CMD_RETURN:
case CMD_ASYNC_RETURN:
case CMD_RETHROW:
PopContextToExpectedDepth();
// We don't record source_position here since we don't generate return
// bytecode right here and will generate it later as part of finally
// block. Each return bytecode generated in finally block will get own
// return source position from corresponded return statement or we'll
// use end of function if no return statement is presented.
commands_->RecordCommand(command, statement);
try_finally_builder_->LeaveTry();
return true;
}
return false;
}
private:
TryFinallyBuilder* try_finally_builder_;
DeferredCommands* commands_;
};
// Allocate and fetch the coverage indices tracking NaryLogical Expressions.
class BytecodeGenerator::NaryCodeCoverageSlots {
public:
NaryCodeCoverageSlots(BytecodeGenerator* generator, NaryOperation* expr)
: generator_(generator) {
if (generator_->block_coverage_builder_ == nullptr) return;
for (size_t i = 0; i < expr->subsequent_length(); i++) {
coverage_slots_.push_back(
generator_->AllocateNaryBlockCoverageSlotIfEnabled(expr, i));
}
}
int GetSlotFor(size_t subsequent_expr_index) const {
if (generator_->block_coverage_builder_ == nullptr) {
return BlockCoverageBuilder::kNoCoverageArraySlot;
}
DCHECK(coverage_slots_.size() > subsequent_expr_index);
return coverage_slots_[subsequent_expr_index];
}
private:
BytecodeGenerator* generator_;
std::vector<int> coverage_slots_;
};
void BytecodeGenerator::ControlScope::PerformCommand(Command command,
Statement* statement,
int source_position) {
ControlScope* current = this;
do {
if (current->Execute(command, statement, source_position)) {
return;
}
current = current->outer();
} while (current != nullptr);
UNREACHABLE();
}
void BytecodeGenerator::ControlScope::PopContextToExpectedDepth() {
// Pop context to the expected depth. Note that this can in fact pop multiple
// contexts at once because the {PopContext} bytecode takes a saved register.
if (generator()->execution_context() != context()) {
generator()->builder()->PopContext(context()->reg());
}
}
class BytecodeGenerator::RegisterAllocationScope final {
public:
explicit RegisterAllocationScope(BytecodeGenerator* generator)
: generator_(generator),
outer_next_register_index_(
generator->register_allocator()->next_register_index()) {}
~RegisterAllocationScope() {
generator_->register_allocator()->ReleaseRegisters(
outer_next_register_index_);
}
RegisterAllocationScope(const RegisterAllocationScope&) = delete;
RegisterAllocationScope& operator=(const RegisterAllocationScope&) = delete;
BytecodeGenerator* generator() const { return generator_; }
private:
BytecodeGenerator* generator_;
int outer_next_register_index_;
};
class BytecodeGenerator::AccumulatorPreservingScope final {
public:
explicit AccumulatorPreservingScope(BytecodeGenerator* generator,
AccumulatorPreservingMode mode)
: generator_(generator) {
if (mode == AccumulatorPreservingMode::kPreserve) {
saved_accumulator_register_ =
generator_->register_allocator()->NewRegister();
generator_->builder()->StoreAccumulatorInRegister(
saved_accumulator_register_);
}
}
~AccumulatorPreservingScope() {
if (saved_accumulator_register_.is_valid()) {
generator_->builder()->LoadAccumulatorWithRegister(
saved_accumulator_register_);
}
}
AccumulatorPreservingScope(const AccumulatorPreservingScope&) = delete;
AccumulatorPreservingScope& operator=(const AccumulatorPreservingScope&) =
delete;
private:
BytecodeGenerator* generator_;
Register saved_accumulator_register_;
};
// Scoped base class for determining how the result of an expression will be
// used.
class BytecodeGenerator::ExpressionResultScope {
public:
ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
: outer_(generator->execution_result()),
allocator_(generator),
kind_(kind),
type_hint_(TypeHint::kAny) {
generator->set_execution_result(this);
}
~ExpressionResultScope() {
allocator_.generator()->set_execution_result(outer_);
}
ExpressionResultScope(const ExpressionResultScope&) = delete;
ExpressionResultScope& operator=(const ExpressionResultScope&) = delete;
bool IsEffect() const { return kind_ == Expression::kEffect; }
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
TestResultScope* AsTest() {
DCHECK(IsTest());
return reinterpret_cast<TestResultScope*>(this);
}
// Specify expression always returns a Boolean result value.
void SetResultIsBoolean() {
DCHECK_EQ(type_hint_, TypeHint::kAny);
type_hint_ = TypeHint::kBoolean;
}
void SetResultIsString() {
DCHECK_EQ(type_hint_, TypeHint::kAny);
type_hint_ = TypeHint::kString;
}
TypeHint type_hint() const { return type_hint_; }
private:
ExpressionResultScope* outer_;
RegisterAllocationScope allocator_;
Expression::Context kind_;
TypeHint type_hint_;
};
// Scoped class used when the result of the current expression is not
// expected to produce a result.
class BytecodeGenerator::EffectResultScope final
: public ExpressionResultScope {
public:
explicit EffectResultScope(BytecodeGenerator* generator)
: ExpressionResultScope(generator, Expression::kEffect) {}
};
// Scoped class used when the result of the current expression to be
// evaluated should go into the interpreter's accumulator.
class BytecodeGenerator::ValueResultScope final : public ExpressionResultScope {
public:
explicit ValueResultScope(BytecodeGenerator* generator)
: ExpressionResultScope(generator, Expression::kValue) {}
};
// Scoped class used when the result of the current expression to be
// evaluated is only tested with jumps to two branches.
class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
public:
TestResultScope(BytecodeGenerator* generator, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough)
: ExpressionResultScope(generator, Expression::kTest),
result_consumed_by_test_(false),
fallthrough_(fallthrough),
then_labels_(then_labels),
else_labels_(else_labels) {}
TestResultScope(const TestResultScope&) = delete;
TestResultScope& operator=(const TestResultScope&) = delete;
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
void SetResultConsumedByTest() { result_consumed_by_test_ = true; }
bool result_consumed_by_test() { return result_consumed_by_test_; }
// Inverts the control flow of the operation, swapping the then and else
// labels and the fallthrough.
void InvertControlFlow() {
std::swap(then_labels_, else_labels_);
fallthrough_ = inverted_fallthrough();
}
BytecodeLabel* NewThenLabel() { return then_labels_->New(); }
BytecodeLabel* NewElseLabel() { return else_labels_->New(); }
BytecodeLabels* then_labels() const { return then_labels_; }
BytecodeLabels* else_labels() const { return else_labels_; }
void set_then_labels(BytecodeLabels* then_labels) {
then_labels_ = then_labels;
}
void set_else_labels(BytecodeLabels* else_labels) {
else_labels_ = else_labels;
}
TestFallthrough fallthrough() const { return fallthrough_; }
TestFallthrough inverted_fallthrough() const {
switch (fallthrough_) {
case TestFallthrough::kThen:
return TestFallthrough::kElse;
case TestFallthrough::kElse:
return TestFallthrough::kThen;
default:
return TestFallthrough::kNone;
}
}
void set_fallthrough(TestFallthrough fallthrough) {
fallthrough_ = fallthrough;
}
private:
bool result_consumed_by_test_;
TestFallthrough fallthrough_;
BytecodeLabels* then_labels_;
BytecodeLabels* else_labels_;
};
// Used to build a list of toplevel declaration data.
class BytecodeGenerator::TopLevelDeclarationsBuilder final : public ZoneObject {
public:
template <typename LocalIsolate>
Handle<FixedArray> AllocateDeclarations(UnoptimizedCompilationInfo* info,
BytecodeGenerator* generator,
Handle<Script> script,
LocalIsolate* isolate) {
DCHECK(has_constant_pool_entry_);
Handle<FixedArray> data =
isolate->factory()->NewFixedArray(entry_slots_, AllocationType::kOld);
int array_index = 0;
if (info->scope()->is_module_scope()) {
for (Declaration* decl : *info->scope()->declarations()) {
Variable* var = decl->var();
if (!var->is_used()) continue;
if (var->location() != VariableLocation::MODULE) continue;
#ifdef DEBUG
int start = array_index;
#endif
if (decl->IsFunctionDeclaration()) {
FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
Handle<SharedFunctionInfo> sfi(
Compiler::GetSharedFunctionInfo(f, script, isolate));
// Return a null handle if any initial values can't be created. Caller
// will set stack overflow.
if (sfi.is_null()) return Handle<FixedArray>();
data->set(array_index++, *sfi);
int literal_index = generator->GetCachedCreateClosureSlot(f);
data->set(array_index++, Smi::FromInt(literal_index));
DCHECK(var->IsExport());
data->set(array_index++, Smi::FromInt(var->index()));
DCHECK_EQ(start + kModuleFunctionDeclarationSize, array_index);
} else if (var->IsExport() && var->binding_needs_init()) {
data->set(array_index++, Smi::FromInt(var->index()));
DCHECK_EQ(start + kModuleVariableDeclarationSize, array_index);
}
}
} else {
for (Declaration* decl : *info->scope()->declarations()) {
Variable* var = decl->var();
if (!var->is_used()) continue;
if (var->location() != VariableLocation::UNALLOCATED) continue;
#ifdef DEBUG
int start = array_index;
#endif
if (decl->IsVariableDeclaration()) {
data->set(array_index++, *var->raw_name()->string());
DCHECK_EQ(start + kGlobalVariableDeclarationSize, array_index);
} else {
FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
Handle<SharedFunctionInfo> sfi(
Compiler::GetSharedFunctionInfo(f, script, isolate));
// Return a null handle if any initial values can't be created. Caller
// will set stack overflow.
if (sfi.is_null()) return Handle<FixedArray>();
data->set(array_index++, *sfi);
int literal_index = generator->GetCachedCreateClosureSlot(f);
data->set(array_index++, Smi::FromInt(literal_index));
DCHECK_EQ(start + kGlobalFunctionDeclarationSize, array_index);
}
}
}
DCHECK_EQ(array_index, data->length());
return data;
}
size_t constant_pool_entry() {
DCHECK(has_constant_pool_entry_);
return constant_pool_entry_;
}
void set_constant_pool_entry(size_t constant_pool_entry) {
DCHECK(has_top_level_declaration());
DCHECK(!has_constant_pool_entry_);
constant_pool_entry_ = constant_pool_entry;
has_constant_pool_entry_ = true;
}
void record_global_variable_declaration() {
entry_slots_ += kGlobalVariableDeclarationSize;
}
void record_global_function_declaration() {
entry_slots_ += kGlobalFunctionDeclarationSize;
}
void record_module_variable_declaration() {
entry_slots_ += kModuleVariableDeclarationSize;
}
void record_module_function_declaration() {
entry_slots_ += kModuleFunctionDeclarationSize;
}
bool has_top_level_declaration() { return entry_slots_ > 0; }
bool processed() { return processed_; }
void mark_processed() { processed_ = true; }
private:
const int kGlobalVariableDeclarationSize = 1;
const int kGlobalFunctionDeclarationSize = 2;
const int kModuleVariableDeclarationSize = 1;
const int kModuleFunctionDeclarationSize = 3;
size_t constant_pool_entry_ = 0;
int entry_slots_ = 0;
bool has_constant_pool_entry_ = false;
bool processed_ = false;
};
class BytecodeGenerator::CurrentScope final {
public:
CurrentScope(BytecodeGenerator* generator, Scope* scope)
: generator_(generator), outer_scope_(generator->current_scope()) {
if (scope != nullptr) {
DCHECK_EQ(outer_scope_, scope->outer_scope());
generator_->set_current_scope(scope);
}
}
~CurrentScope() {
if (outer_scope_ != generator_->current_scope()) {
generator_->set_current_scope(outer_scope_);
}
}
private:
BytecodeGenerator* generator_;
Scope* outer_scope_;
};
class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
public:
enum class SlotKind {
kStoreGlobalSloppy,
kStoreGlobalStrict,
kStoreNamedStrict,
kStoreNamedSloppy,
kLoadProperty,
kLoadSuperProperty,
kLoadGlobalNotInsideTypeof,
kLoadGlobalInsideTypeof,
kClosureFeedbackCell
};
explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
void Put(SlotKind slot_kind, Variable* variable, int slot_index) {
PutImpl(slot_kind, 0, variable, slot_index);
}
void Put(SlotKind slot_kind, AstNode* node, int slot_index) {
PutImpl(slot_kind, 0, node, slot_index);
}
void Put(SlotKind slot_kind, int variable_index, const AstRawString* name,
int slot_index) {
PutImpl(slot_kind, variable_index, name, slot_index);
}
void Put(SlotKind slot_kind, const AstRawString* name, int slot_index) {
PutImpl(slot_kind, 0, name, slot_index);
}
int Get(SlotKind slot_kind, Variable* variable) const {
return GetImpl(slot_kind, 0, variable);
}
int Get(SlotKind slot_kind, AstNode* node) const {
return GetImpl(slot_kind, 0, node);
}
int Get(SlotKind slot_kind, int variable_index,
const AstRawString* name) const {
return GetImpl(slot_kind, variable_index, name);
}
int Get(SlotKind slot_kind, const AstRawString* name) const {
return GetImpl(slot_kind, 0, name);
}
private:
using Key = std::tuple<SlotKind, int, const void*>;
void PutImpl(SlotKind slot_kind, int index, const void* node,
int slot_index) {
Key key = std::make_tuple(slot_kind, index, node);
auto entry = std::make_pair(key, slot_index);
map_.insert(entry);
}
int GetImpl(SlotKind slot_kind, int index, const void* node) const {
Key key = std::make_tuple(slot_kind, index, node);
auto iter = map_.find(key);
if (iter != map_.end()) {
return iter->second;
}
return -1;
}
ZoneMap<Key, int> map_;
};
class BytecodeGenerator::IteratorRecord final {
public:
IteratorRecord(Register object_register, Register next_register,
IteratorType type = IteratorType::kNormal)
: type_(type), object_(object_register), next_(next_register) {
DCHECK(object_.is_valid() && next_.is_valid());
}
inline IteratorType type() const { return type_; }
inline Register object() const { return object_; }
inline Register next() const { return next_; }
private:
IteratorType type_;
Register object_;
Register next_;
};
class BytecodeGenerator::OptionalChainNullLabelScope final {
public:
explicit OptionalChainNullLabelScope(BytecodeGenerator* bytecode_generator)
: bytecode_generator_(bytecode_generator),
labels_(bytecode_generator->zone()) {
prev_ = bytecode_generator_->optional_chaining_null_labels_;
bytecode_generator_->optional_chaining_null_labels_ = &labels_;
}
~OptionalChainNullLabelScope() {
bytecode_generator_->optional_chaining_null_labels_ = prev_;
}
BytecodeLabels* labels() { return &labels_; }
private:
BytecodeGenerator* bytecode_generator_;
BytecodeLabels labels_;
BytecodeLabels* prev_;
};
// LoopScope delimits the scope of {loop}, from its header to its final jump.
// It should be constructed iff a (conceptual) back edge should be produced. In
// the case of creating a LoopBuilder but never emitting the loop, it is valid
// to skip the creation of LoopScope.
class BytecodeGenerator::LoopScope final {
public:
explicit LoopScope(BytecodeGenerator* bytecode_generator, LoopBuilder* loop)
: bytecode_generator_(bytecode_generator),
parent_loop_scope_(bytecode_generator_->current_loop_scope()),
loop_builder_(loop) {
loop_builder_->LoopHeader();
bytecode_generator_->set_current_loop_scope(this);
bytecode_generator_->loop_depth_++;
}
~LoopScope() {
bytecode_generator_->loop_depth_--;
bytecode_generator_->set_current_loop_scope(parent_loop_scope_);
DCHECK_GE(bytecode_generator_->loop_depth_, 0);
loop_builder_->JumpToHeader(
bytecode_generator_->loop_depth_,
parent_loop_scope_ ? parent_loop_scope_->loop_builder_ : nullptr);
}
private:
BytecodeGenerator* const bytecode_generator_;
LoopScope* const parent_loop_scope_;
LoopBuilder* const loop_builder_;
};
namespace {
template <typename PropertyT>
struct Accessors : public ZoneObject {
Accessors() : getter(nullptr), setter(nullptr) {}
PropertyT* getter;
PropertyT* setter;
};
// A map from property names to getter/setter pairs allocated in the zone that
// also provides a way of accessing the pairs in the order they were first
// added so that the generated bytecode is always the same.
template <typename PropertyT>
class AccessorTable
: public base::TemplateHashMap<Literal, Accessors<PropertyT>,
bool (*)(void*, void*),
ZoneAllocationPolicy> {
public:
explicit AccessorTable(Zone* zone)
: base::TemplateHashMap<Literal, Accessors<PropertyT>,
bool (*)(void*, void*), ZoneAllocationPolicy>(
Literal::Match, ZoneAllocationPolicy(zone)),
zone_(zone) {}
Accessors<PropertyT>* LookupOrInsert(Literal* key) {
auto it = this->find(key, true);
if (it->second == nullptr) {
it->second = zone_->New<Accessors<PropertyT>>();
ordered_accessors_.push_back({key, it->second});
}
return it->second;
}
const std::vector<std::pair<Literal*, Accessors<PropertyT>*>>&
ordered_accessors() {
return ordered_accessors_;
}
private:
std::vector<std::pair<Literal*, Accessors<PropertyT>*>> ordered_accessors_;
Zone* zone_;
};
} // namespace
#ifdef DEBUG
static bool IsInEagerLiterals(
FunctionLiteral* literal,
const std::vector<FunctionLiteral*>& eager_literals) {
for (FunctionLiteral* eager_literal : eager_literals) {
if (literal == eager_literal) return true;
}
return false;
}
#endif // DEBUG
BytecodeGenerator::BytecodeGenerator(
Zone* compile_zone, UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
std::vector<FunctionLiteral*>* eager_inner_literals)
: zone_(compile_zone),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
info->SourcePositionRecordingMode()),
info_(info),
ast_string_constants_(ast_string_constants),
closure_scope_(info->scope()),
current_scope_(info->scope()),
eager_inner_literals_(eager_inner_literals),
feedback_slot_cache_(zone()->New<FeedbackSlotCache>(zone())),
top_level_builder_(zone()->New<TopLevelDeclarationsBuilder>()),
block_coverage_builder_(nullptr),
function_literals_(0, zone()),
native_function_literals_(0, zone()),
object_literals_(0, zone()),
array_literals_(0, zone()),
class_literals_(0, zone()),
template_objects_(0, zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
incoming_new_target_or_generator_(),
optional_chaining_null_labels_(nullptr),
dummy_feedback_slot_(feedback_spec(), FeedbackSlotKind::kCompareOp),
generator_jump_table_(nullptr),
suspend_count_(0),
loop_depth_(0),
current_loop_scope_(nullptr),
catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
if (info->has_source_range_map()) {
block_coverage_builder_ = zone()->New<BlockCoverageBuilder>(
zone(), builder(), info->source_range_map());
}
}
namespace {
template <typename Isolate>
struct NullContextScopeHelper;
template <>
struct NullContextScopeHelper<Isolate> {
using Type = NullContextScope;
};
template <>
struct NullContextScopeHelper<LocalIsolate> {
class DummyNullContextScope {
public:
explicit DummyNullContextScope(LocalIsolate*) {}
};
using Type = DummyNullContextScope;
};
template <typename Isolate>
using NullContextScopeFor = typename NullContextScopeHelper<Isolate>::Type;
} // namespace
template <typename LocalIsolate>
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
LocalIsolate* isolate, Handle<Script> script) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
#endif
AllocateDeferredConstants(isolate, script);
if (block_coverage_builder_) {
Handle<CoverageInfo> coverage_info =
isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots());
info()->set_coverage_info(coverage_info);
if (FLAG_trace_block_coverage) {
StdoutStream os;
coverage_info->CoverageInfoPrint(os, info()->literal()->GetDebugName());
}
}
if (HasStackOverflow()) return Handle<BytecodeArray>();
Handle<BytecodeArray> bytecode_array = builder()->ToBytecodeArray(isolate);
if (incoming_new_target_or_generator_.is_valid()) {
bytecode_array->set_incoming_new_target_or_generator_register(
incoming_new_target_or_generator_);
}
return bytecode_array;
}
template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
Isolate* isolate, Handle<Script> script);
template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
LocalIsolate* isolate, Handle<Script> script);
template <typename LocalIsolate>
Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
LocalIsolate* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
#endif
Handle<ByteArray> source_position_table =
builder()->ToSourcePositionTable(isolate);
LOG_CODE_EVENT(isolate,
CodeLinePosInfoRecordEvent(
info_->bytecode_array()->GetFirstBytecodeAddress(),
*source_position_table));
return source_position_table;
}
template Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
Isolate* isolate);
template Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
LocalIsolate* isolate);
#ifdef DEBUG
int BytecodeGenerator::CheckBytecodeMatches(BytecodeArray bytecode) {
return builder()->CheckBytecodeMatches(bytecode);
}
#endif
template <typename LocalIsolate>
void BytecodeGenerator::AllocateDeferredConstants(LocalIsolate* isolate,
Handle<Script> script) {
if (top_level_builder()->has_top_level_declaration()) {
// Build global declaration pair array.
Handle<FixedArray> declarations = top_level_builder()->AllocateDeclarations(
info(), this, script, isolate);
if (declarations.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(
top_level_builder()->constant_pool_entry(), declarations);
}
// Find or build shared function infos.
for (std::pair<FunctionLiteral*, size_t> literal : function_literals_) {
FunctionLiteral* expr = literal.first;
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(expr, script, isolate);
if (shared_info.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
}
// Find or build shared function infos for the native function templates.
for (std::pair<NativeFunctionLiteral*, size_t> literal :
native_function_literals_) {
// This should only happen for main-thread compilations.
DCHECK((std::is_same<Isolate, v8::internal::Isolate>::value));
NativeFunctionLiteral* expr = literal.first;
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
// Compute the function template for the native function.
v8::Local<v8::FunctionTemplate> info =
expr->extension()->GetNativeFunctionTemplate(
v8_isolate, Utils::ToLocal(expr->name()));
DCHECK(!info.IsEmpty());
Handle<SharedFunctionInfo> shared_info =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
isolate, Utils::OpenHandle(*info), expr->name());
DCHECK(!shared_info.is_null());
builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
}
// Build object literal constant properties
for (std::pair<ObjectLiteral*, size_t> literal : object_literals_) {
ObjectLiteral* object_literal = literal.first;
if (object_literal->properties_count() > 0) {
// If constant properties is an empty fixed array, we've already added it
// to the constant pool when visiting the object literal.
Handle<ObjectBoilerplateDescription> constant_properties =
object_literal->GetOrBuildBoilerplateDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second,
constant_properties);
}
}
// Build array literal constant elements
for (std::pair<ArrayLiteral*, size_t> literal : array_literals_) {
ArrayLiteral* array_literal = literal.first;
Handle<ArrayBoilerplateDescription> constant_elements =
array_literal->GetOrBuildBoilerplateDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
}
// Build class literal boilerplates.
for (std::pair<ClassLiteral*, size_t> literal : class_literals_) {
ClassLiteral* class_literal = literal.first;
Handle<ClassBoilerplate> class_boilerplate =
ClassBoilerplate::BuildClassBoilerplate(isolate, class_literal);
builder()->SetDeferredConstantPoolEntry(literal.second, class_boilerplate);
}
// Build template literals.
for (std::pair<GetTemplateObject*, size_t> literal : template_objects_) {
GetTemplateObject* get_template_object = literal.first;
Handle<TemplateObjectDescription> description =
get_template_object->GetOrBuildDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second, description);
}
}
template void BytecodeGenerator::AllocateDeferredConstants(
Isolate* isolate, Handle<Script> script);
template void BytecodeGenerator::AllocateDeferredConstants(
LocalIsolate* isolate, Handle<Script> script);
namespace {
bool NeedsContextInitialization(DeclarationScope* scope) {
return scope->NeedsContext() && !scope->is_script_scope() &&
!scope->is_module_scope();
}
} // namespace
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
InitializeAstVisitor(stack_limit);
// Initialize the incoming context.
ContextScope incoming_context(this, closure_scope());
// Initialize control scope.
ControlScopeForTopLevel control(this);
RegisterAllocationScope register_scope(this);
AllocateTopLevelRegisters();
builder()->EmitFunctionStartSourcePosition(
info()->literal()->start_position());
if (info()->literal()->CanSuspend()) {
BuildGeneratorPrologue();
}
if (NeedsContextInitialization(closure_scope())) {
// Push a new inner context scope for the function.
BuildNewLocalActivationContext();
ContextScope local_function_context(this, closure_scope());
BuildLocalActivationContextInitialization();
GenerateBytecodeBody();
} else {
GenerateBytecodeBody();
}
// Check that we are not falling off the end.
DCHECK(builder()->RemainderOfBlockIsDead());
}
void BytecodeGenerator::GenerateBytecodeBody() {
// Build the arguments object if it is used.
VisitArgumentsObject(closure_scope()->arguments());
// Build rest arguments array if it is used.
Variable* rest_parameter = closure_scope()->rest_parameter();
VisitRestArgumentsArray(rest_parameter);
// Build assignment to the function name or {.this_function}
// variables if used.
VisitThisFunctionVariable(closure_scope()->function_var());
VisitThisFunctionVariable(closure_scope()->this_function_var());
// Build assignment to {new.target} variable if it is used.
VisitNewTargetVariable(closure_scope()->new_target_var());
// Create a generator object if necessary and initialize the
// {.generator_object} variable.
FunctionLiteral* literal = info()->literal();
if (IsResumableFunction(literal->kind())) {
BuildGeneratorObjectVariableInitialization();
}
// Emit tracing call if requested to do so.
if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
// Emit type profile call.
if (info()->flags().collect_type_profile()) {
feedback_spec()->AddTypeProfileSlot();
int num_parameters = closure_scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Register parameter(builder()->Parameter(i));
builder()->LoadAccumulatorWithRegister(parameter).CollectTypeProfile(
closure_scope()->parameter(i)->initializer_position());
}
}
// Increment the function-scope block coverage counter.
BuildIncrementBlockCoverageCounterIfEnabled(literal, SourceRangeKind::kBody);
// Visit declarations within the function scope.
if (closure_scope()->is_script_scope()) {
VisitGlobalDeclarations(closure_scope()->declarations());
} else if (closure_scope()->is_module_scope()) {
VisitModuleDeclarations(closure_scope()->declarations());
} else {
VisitDeclarations(closure_scope()->declarations());
}
// Emit initializing assignments for module namespace imports (if any).
VisitModuleNamespaceImports();
// The derived constructor case is handled in VisitCallSuper.
if (IsBaseConstructor(function_kind())) {
if (literal->class_scope_has_private_brand()) {
BuildPrivateBrandInitialization(builder()->Receiver());
}
if (literal->requires_instance_members_initializer()) {
BuildInstanceMemberInitialization(Register::function_closure(),
builder()->Receiver());
}
}
// Visit statements in the function body.
VisitStatements(literal->body());
// Emit an implicit return instruction in case control flow can fall off the
// end of the function without an explicit return being present on all paths.
if (!builder()->RemainderOfBlockIsDead()) {
builder()->LoadUndefined();
BuildReturn();
}
}
void BytecodeGenerator::AllocateTopLevelRegisters() {
if (IsResumableFunction(info()->literal()->kind())) {
// Either directly use generator_object_var or allocate a new register for
// the incoming generator object.
Variable* generator_object_var = closure_scope()->generator_object_var();
if (generator_object_var->location() == VariableLocation::LOCAL) {
incoming_new_target_or_generator_ =
GetRegisterForLocalVariable(generator_object_var);
} else {
incoming_new_target_or_generator_ = register_allocator()->NewRegister();
}
} else if (closure_scope()->new_target_var()) {
// Either directly use new_target_var or allocate a new register for
// the incoming new target object.
Variable* new_target_var = closure_scope()->new_target_var();
if (new_target_var->location() == VariableLocation::LOCAL) {
incoming_new_target_or_generator_ =
GetRegisterForLocalVariable(new_target_var);
} else {
incoming_new_target_or_generator_ = register_allocator()->NewRegister();
}
}
}
void BytecodeGenerator::BuildGeneratorPrologue() {
DCHECK_GT(info()->literal()->suspend_count(), 0);
DCHECK(generator_object().is_valid());
generator_jump_table_ =
builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0);
// If the generator is not undefined, this is a resume, so perform state
// dispatch.
builder()->SwitchOnGeneratorState(generator_object(), generator_jump_table_);
// Otherwise, fall-through to the ordinary function prologue, after which we
// will run into the generator object creation and other extra code inserted
// by the parser.
}
void BytecodeGenerator::VisitBlock(Block* stmt) {
// Visit declarations and statements.
CurrentScope current_scope(this, stmt->scope());
if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
BuildNewLocalBlockContext(stmt->scope());
ContextScope scope(this, stmt->scope());
VisitBlockDeclarationsAndStatements(stmt);
} else {
VisitBlockDeclarationsAndStatements(stmt);
}
}
void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
BlockBuilder block_builder(builder(), block_coverage_builder_, stmt);
ControlScopeForBreakable execution_control(this, stmt, &block_builder);
if (stmt->scope() != nullptr) {
VisitDeclarations(stmt->scope()->declarations());
}
VisitStatements(stmt->statements());
}
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->var();
// Unused variables don't need to be visited.
if (!variable->is_used()) return;
switch (variable->location()) {
case VariableLocation::UNALLOCATED:
case VariableLocation::MODULE:
UNREACHABLE();
case VariableLocation::LOCAL:
if (variable->binding_needs_init()) {
Register destination(builder()->Local(variable->index()));
builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
}
break;
case VariableLocation::PARAMETER:
if (variable->binding_needs_init()) {
Register destination(builder()->Parameter(variable->index()));
builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
}
break;
case VariableLocation::REPL_GLOBAL:
// REPL let's are stored in script contexts. They get initialized
// with the hole the same way as normal context allocated variables.
case VariableLocation::CONTEXT:
if (variable->binding_needs_init()) {
DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
variable->index(), 0);
}
break;
case VariableLocation::LOOKUP: {
DCHECK_EQ(VariableMode::kDynamic, variable->mode());
DCHECK(!variable->binding_needs_init());
Register name = register_allocator()->NewRegister();
builder()
->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(name)
.CallRuntime(Runtime::kDeclareEvalVar, name);
break;
}
}
}
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->var();
DCHECK(variable->mode() == VariableMode::kLet ||
variable->mode() == VariableMode::kVar ||
variable->mode() == VariableMode::kDynamic);
// Unused variables don't need to be visited.
if (!variable->is_used()) return;
switch (variable->location()) {
case VariableLocation::UNALLOCATED:
case VariableLocation::MODULE:
UNREACHABLE();
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitFunctionLiteral(decl->fun());
BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
case VariableLocation::REPL_GLOBAL:
case VariableLocation::CONTEXT: {
DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
VisitFunctionLiteral(decl->fun());
builder()->StoreContextSlot(execution_context()->reg(), variable->index(),
0);
break;
}
case VariableLocation::LOOKUP: {
RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(args[0]);
VisitFunctionLiteral(decl->fun());
builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
Runtime::kDeclareEvalFunction, args);
break;
}
}
DCHECK_IMPLIES(
eager_inner_literals_ != nullptr && decl->fun()->ShouldEagerCompile(),
IsInEagerLiterals(decl->fun(), *eager_inner_literals_));
}
void BytecodeGenerator::VisitModuleNamespaceImports() {
if (!closure_scope()->is_module_scope()) return;
RegisterAllocationScope register_scope(this);
Register module_request = register_allocator()->NewRegister();
SourceTextModuleDescriptor* descriptor =
closure_scope()->AsModuleScope()->module();
for (auto entry : descriptor->namespace_imports()) {
builder()
->LoadLiteral(Smi::FromInt(entry->module_request))
.StoreAccumulatorInRegister(module_request)
.CallRuntime(Runtime::kGetModuleNamespace, module_request);
Variable* var = closure_scope()->LookupInModule(entry->local_name);
BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
}
void BytecodeGenerator::BuildDeclareCall(Runtime::FunctionId id) {
if (!top_level_builder()->has_top_level_declaration()) return;
DCHECK(!top_level_builder()->processed());
top_level_builder()->set_constant_pool_entry(
builder()->AllocateDeferredConstantPoolEntry());
// Emit code to declare globals.
RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->LoadConstantPoolEntry(top_level_builder()->constant_pool_entry())
.StoreAccumulatorInRegister(args[0])
.MoveRegister(Register::function_closure(), args[1])
.CallRuntime(id, args);
top_level_builder()->mark_processed();
}
void BytecodeGenerator::VisitModuleDeclarations(Declaration::List* decls) {
RegisterAllocationScope register_scope(this);
for (Declaration* decl : *decls) {
Variable* var = decl->var();
if (!var->is_used()) continue;
if (var->location() == VariableLocation::MODULE) {
if (decl->IsFunctionDeclaration()) {
DCHECK(var->IsExport());
FunctionDeclaration* f = static_cast<FunctionDeclaration*>(decl);
AddToEagerLiteralsIfEager(f->fun());
top_level_builder()->record_module_function_declaration();
} else if (var->IsExport() && var->binding_needs_init()) {
DCHECK(decl->IsVariableDeclaration());
top_level_builder()->record_module_variable_declaration();
}
} else {
RegisterAllocationScope register_scope(this);
Visit(decl);
}
}
BuildDeclareCall(Runtime::kDeclareModuleExports);
}
void BytecodeGenerator::VisitGlobalDeclarations(Declaration::List* decls) {
RegisterAllocationScope register_scope(this);
for (Declaration* decl : *decls) {
Variable* var = decl->var();
DCHECK(var->is_used());
if (var->location() == VariableLocation::UNALLOCATED) {
// var or function.
if (decl->IsFunctionDeclaration()) {
top_level_builder()->record_global_function_declaration();
FunctionDeclaration* f = static_cast<FunctionDeclaration*>(decl);
AddToEagerLiteralsIfEager(f->fun());
} else {
top_level_builder()->record_global_variable_declaration();
}
} else {
// let or const. Handled in NewScriptContext.
DCHECK(decl->IsVariableDeclaration());
DCHECK(IsLexicalVariableMode(var->mode()));
}
}
BuildDeclareCall(Runtime::kDeclareGlobals);
}
void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
for (Declaration* decl : *declarations) {
RegisterAllocationScope register_scope(this);
Visit(decl);
}
}
void BytecodeGenerator::VisitStatements(
const ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
// Allocate an outer register allocations scope for the statement.
RegisterAllocationScope allocation_scope(this);
Statement* stmt = statements->at(i);
Visit(stmt);
if (builder()->RemainderOfBlockIsDead()) break;
}
}
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForEffect(stmt->expression());
}
void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {}
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
ConditionalControlFlowBuilder conditional_builder(
builder(), block_coverage_builder_, stmt);
builder()->SetStatementPosition(stmt);
if (stmt->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
conditional_builder.Then();
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
// Generate else block unconditionally if it exists.
if (stmt->HasElseStatement()) {
conditional_builder.Else();
Visit(stmt->else_statement());
}
} else {
// TODO(oth): If then statement is BreakStatement or
// ContinueStatement we can reduce number of generated
// jump/jump_ifs here. See BasicLoops test.
VisitForTest(stmt->condition(), conditional_builder.then_labels(),
conditional_builder.else_labels(), TestFallthrough::kThen);
conditional_builder.Then();
Visit(stmt->then_statement());
if (stmt->HasElseStatement()) {
conditional_builder.JumpToEnd();
conditional_builder.Else();
Visit(stmt->else_statement());
}
}
}
void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* stmt) {
Visit(stmt->statement());
}
void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
execution_control()->Continue(stmt->target());
}
void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
execution_control()->Break(stmt->target());
}
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
if (stmt->is_async_return()) {
execution_control()->AsyncReturnAccumulator(stmt->end_position());
} else {
execution_control()->ReturnAccumulator(stmt->end_position());
}
}
void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
BuildNewLocalWithContext(stmt->scope());
VisitInScope(stmt->statement(), stmt->scope());
}
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// We need this scope because we visit for register values. We have to
// maintain a execution result scope where registers can be allocated.
ZonePtrList<CaseClause>* clauses = stmt->cases();
SwitchBuilder switch_builder(builder(), block_coverage_builder_, stmt,
clauses->length());
ControlScopeForBreakable scope(this, stmt, &switch_builder);
int default_index = -1;
builder()->SetStatementPosition(stmt);
// Keep the switch value in a register until a case matches.
Register tag = VisitForRegisterValue(stmt->tag());
FeedbackSlot slot = clauses->length() > 0
? feedback_spec()->AddCompareICSlot()
: FeedbackSlot::Invalid();
// Iterate over all cases and create nodes for label comparison.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
// The default is not a test, remember index.
if (clause->is_default()) {
default_index = i;
continue;
}
// Perform label comparison as if via '===' with tag.
VisitForAccumulatorValue(clause->label());
builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
feedback_index(slot));
switch_builder.Case(ToBooleanMode::kAlreadyBoolean, i);
}
if (default_index >= 0) {
// Emit default jump if there is a default case.
switch_builder.DefaultAt(default_index);
} else {
// Otherwise if we have reached here none of the cases matched, so jump to
// the end.
switch_builder.Break();
}
// Iterate over all cases and create the case bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
switch_builder.SetCaseTarget(i, clause);
VisitStatements(clause->statements());
}
}
template <typename TryBodyFunc, typename CatchBodyFunc>
void BytecodeGenerator::BuildTryCatch(
TryBodyFunc try_body_func, CatchBodyFunc catch_body_func,
HandlerTable::CatchPrediction catch_prediction,
TryCatchStatement* stmt_for_coverage) {
TryCatchBuilder try_control_builder(
builder(),
stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
stmt_for_coverage, catch_prediction);
// Preserve the context in a dedicated register, so that it can be restored
// when the handler is entered by the stack-unwinding machinery.
// TODO(ignition): Be smarter about register allocation.
Register context = register_allocator()->NewRegister();
builder()->MoveRegister(Register::current_context(), context);
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting 'throw' control commands.
try_control_builder.BeginTry(context);
{
ControlScopeForTryCatch scope(this, &try_control_builder);
try_body_func();
}
try_control_builder.EndTry();
catch_body_func(context);
try_control_builder.EndCatch();
}
template <typename TryBodyFunc, typename FinallyBodyFunc>
void BytecodeGenerator::BuildTryFinally(
TryBodyFunc try_body_func, FinallyBodyFunc finally_body_func,
HandlerTable::CatchPrediction catch_prediction,
TryFinallyStatement* stmt_for_coverage) {
// We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction.
TryFinallyBuilder try_control_builder(
builder(),
stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
stmt_for_coverage, catch_prediction);
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
// finally-block have been evaluated.
//
// The try-finally construct can enter the finally-block in three ways:
// 1. By exiting the try-block normally, falling through at the end.
// 2. By exiting the try-block with a function-local control flow transfer
// (i.e. through break/continue/return statements).
// 3. By exiting the try-block with a thrown exception.
//
// The result register semantics depend on how the block was entered:
// - ReturnStatement: It represents the return value being returned.
// - ThrowStatement: It represents the exception being thrown.
// - BreakStatement/ContinueStatement: Undefined and not used.
// - Falling through into finally-block: Undefined and not used.
Register token = register_allocator()->NewRegister();
Register result = register_allocator()->NewRegister();
ControlScope::DeferredCommands commands(this, token, result);
// Preserve the context in a dedicated register, so that it can be restored
// when the handler is entered by the stack-unwinding machinery.
// TODO(ignition): Be smarter about register allocation.
Register context = register_allocator()->NewRegister();
builder()->MoveRegister(Register::current_context(), context);
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting all control commands.
try_control_builder.BeginTry(context);
{
ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
try_body_func();
}
try_control_builder.EndTry();
// Record fall-through and exception cases.
commands.RecordFallThroughPath();
try_control_builder.LeaveTry();
try_control_builder.BeginHandler();
commands.RecordHandlerReThrowPath();
// Pending message object is saved on entry.
try_control_builder.BeginFinally();
Register message = context; // Reuse register.
// Clear message object as we enter the finally block.
builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
message);
// Evaluate the finally-block.
finally_body_func(token);
try_control_builder.EndFinally();
// Pending message object is restored on exit.
builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
// Dynamic dispatch after the finally-block.
commands.ApplyDeferredCommands();
}
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
loop_builder->LoopBody();
ControlScopeForIteration execution_control(this, stmt, loop_builder);
Visit(stmt->body());
loop_builder->BindContinueTarget();
}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond()->ToBooleanIsFalse()) {
// Since we know that the condition is false, we don't create a loop.
// Therefore, we don't create a LoopScope (and thus we don't create a header
// and a JumpToHeader). However, we still need to iterate once through the
// body.
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
LoopScope loop_scope(this, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
} else {
LoopScope loop_scope(this, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_backbranch(zone());
VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(),
TestFallthrough::kThen);
loop_backbranch.Bind(builder());
}
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond()->ToBooleanIsFalse()) {
// If the condition is false there is no need to generate the loop.
return;
}
LoopScope loop_scope(this, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
VisitForTest(stmt->cond(), &loop_body, loop_builder.break_labels(),
TestFallthrough::kThen);
loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != nullptr) {
Visit(stmt->init());
}
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
// If the condition is known to be false there is no need to generate
// body, next or condition blocks. Init block should be generated.
return;
}
LoopScope loop_scope(this, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
VisitForTest(stmt->cond(), &loop_body, loop_builder.break_labels(),
TestFallthrough::kThen);
loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
if (stmt->next() != nullptr) {
builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
}
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
if (stmt->subject()->IsNullLiteral() ||
stmt->subject()->IsUndefinedLiteral()) {
// ForIn generates lots of code, skip if it wouldn't produce any effects.
return;
}
BytecodeLabel subject_undefined_label;
FeedbackSlot slot = feedback_spec()->AddForInSlot();
// Prepare the state for executing ForIn.
builder()->SetExpressionAsStatementPosition(stmt->subject());
VisitForAccumulatorValue(stmt->subject());
builder()->JumpIfUndefinedOrNull(&subject_undefined_label);
Register receiver = register_allocator()->NewRegister();
builder()->ToObject(receiver);
// Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
RegisterList triple = register_allocator()->NewRegisterList(3);
Register cache_length = triple[2];
builder()->ForInEnumerate(receiver);
builder()->ForInPrepare(triple, feedback_index(slot));
// Set up loop counter
Register index = register_allocator()->NewRegister();
builder()->LoadLiteral(Smi::zero());
builder()->StoreAccumulatorInRegister(index);
// The loop
{
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
LoopScope loop_scope(this, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
builder()->ForInNext(receiver, index, triple.Truncate(2),
feedback_index(slot));
loop_builder.ContinueIfUndefined();
// Assign accumulator value to the 'each' target.
{
EffectResultScope scope(this);
// Make sure to preserve the accumulator across the PrepareAssignmentLhs
// call.
AssignmentLhsData lhs_data = PrepareAssignmentLhs(
stmt->each(), AccumulatorPreservingMode::kPreserve);
builder()->SetExpressionPosition(stmt->each());
BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
}
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
}
builder()->Bind(&subject_undefined_label);
}
// Desugar a for-of statement into an application of the iteration protocol.
//
// for (EACH of SUBJECT) BODY
//
// becomes
//
// iterator = %GetIterator(SUBJECT)
// try {
//
// loop {
// // Make sure we are considered 'done' if .next(), .done or .value fail.
// done = true
// value = iterator.next()
// if (value.done) break;
// value = value.value
// done = false
//
// EACH = value
// BODY
// }
// done = true
//
// } catch(e) {
// iteration_continuation = RETHROW
// } finally {
// %FinalizeIteration(iterator, done, iteration_continuation)
// }
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
EffectResultScope effect_scope(this);
builder()->SetExpressionAsStatementPosition(stmt->subject());
VisitForAccumulatorValue(stmt->subject());
// Store the iterator in a dedicated register so that it can be closed on
// exit, and the 'done' value in a dedicated register so that it can be
// changed and accessed independently of the iteration result.
IteratorRecord iterator = BuildGetIteratorRecord(stmt->type());
Register done = register_allocator()->NewRegister();
builder()->LoadFalse();
builder()->StoreAccumulatorInRegister(done);
BuildTryFinally(
// Try block.
[&]() {
Register next_result = register_allocator()->NewRegister();
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
LoopScope loop_scope(this, &loop_builder);
builder()->LoadTrue().StoreAccumulatorInRegister(done);
// Call the iterator's .next() method. Break from the loop if the `done`
// property is truthy, otherwise load the value from the iterator result
// and append the argument.
builder()->SetExpressionAsStatementPosition(stmt->each());
BuildIteratorNext(iterator, next_result);
builder()->LoadNamedProperty(
next_result, ast_string_constants()->done_string(),
feedback_index(feedback_spec()->AddLoadICSlot()));
loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
builder()
// value = value.value
->LoadNamedProperty(
next_result, ast_string_constants()->value_string(),
feedback_index(feedback_spec()->AddLoadICSlot()));
// done = false, before the assignment to each happens, so that done is
// false if the assignment throws.
builder()
->StoreAccumulatorInRegister(next_result)
.LoadFalse()
.StoreAccumulatorInRegister(done);
// Assign to the 'each' target.
AssignmentLhsData lhs_data = PrepareAssignmentLhs(stmt->each());
builder()->LoadAccumulatorWithRegister(next_result);
BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
VisitIterationBody(stmt, &loop_builder);
},
// Finally block.
[&](Register iteration_continuation_token) {
// Finish the iteration in the finally block.
BuildFinalizeIteration(iterator, done, iteration_continuation_token);
},
HandlerTable::UNCAUGHT);
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Update catch prediction tracking. The updated catch_prediction value lasts
// until the end of the try_block in the AST node, and does not apply to the
// catch_block.
HandlerTable::CatchPrediction outer_catch_prediction = catch_prediction();
set_catch_prediction(stmt->GetCatchPrediction(outer_catch_prediction));
BuildTryCatch(
// Try body.
[&]() {
Visit(stmt->try_block());
set_catch_prediction(outer_catch_prediction);
},
// Catch body.
[&](Register context) {
if (stmt->scope()) {
// Create a catch scope that binds the exception.
BuildNewLocalCatchContext(stmt->scope());
builder()->StoreAccumulatorInRegister(context);
}
// If requested, clear message object as we enter the catch block.
if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
builder()->LoadTheHole().SetPendingMessage();
}
// Load the catch context into the accumulator.
builder()->LoadAccumulatorWithRegister(context);
// Evaluate the catch-block.
if (stmt->scope()) {
VisitInScope(stmt->catch_block(), stmt->scope());
} else {
VisitBlock(stmt->catch_block());
}
},
catch_prediction(), stmt);
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
BuildTryFinally(
// Try block.
[&]() { Visit(stmt->try_block()); },
// Finally block.
[&](Register body_continuation_token) { Visit(stmt->finally_block()); },
catch_prediction(), stmt);
}
void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
builder()->SetStatementPosition(stmt);
builder()->Debugger();
}
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
DCHECK(expr->scope()->outer_scope() == current_scope());
uint8_t flags = CreateClosureFlags::Encode(
expr->pretenure(), closure_scope()->is_function_scope(),
info()->flags().might_always_opt());
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
builder()->CreateClosure(entry, GetCachedCreateClosureSlot(expr), flags);
function_literals_.push_back(std::make_pair(expr, entry));
AddToEagerLiteralsIfEager(expr);
}
void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
if (eager_inner_literals_ && literal->ShouldEagerCompile()) {
DCHECK(!IsInEagerLiterals(literal, *eager_inner_literals_));
eager_inner_literals_->push_back(literal);
}
}
bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
if (!FLAG_enable_one_shot_optimization) return false;
if (loop_depth_ > 0) return false;
return info()->literal()->is_toplevel() ||
info()->literal()->is_oneshot_iife();
}
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
size_t class_boilerplate_entry =
builder()->AllocateDeferredConstantPoolEntry();
class_literals_.push_back(std::make_pair(expr, class_boilerplate_entry));
VisitDeclarations(expr->scope()->declarations());
Register class_constructor = register_allocator()->NewRegister();
// Create the class brand symbol and store it on the context during class
// evaluation. This will be stored in the instance later in the constructor.
// We do this early so that invalid access to private methods or accessors
// in computed property keys throw.
if (expr->scope()->brand() != nullptr) {
Register brand = register_allocator()->NewRegister();
const AstRawString* class_name =
expr->scope()->class_variable() != nullptr
? expr->scope()->class_variable()->raw_name()
: ast_string_constants()->empty_string();
builder()
->LoadLiteral(class_name)
.StoreAccumulatorInRegister(brand)
.CallRuntime(Runtime::kCreatePrivateBrandSymbol, brand);
BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
HoleCheckMode::kElided);
}
AccessorTable<ClassLiteral::Property> private_accessors(zone());
for (int i = 0; i < expr->private_members()->length(); i++) {
ClassLiteral::Property* property = expr->private_members()->at(i);
DCHECK(property->is_private());
switch (property->kind()) {
case ClassLiteral::Property::FIELD: {
// Initialize the private field variables early.
// Create the private name symbols for fields during class
// evaluation and store them on the context. These will be
// used as keys later during instance or static initialization.
RegisterAllocationScope private_name_register_scope(this);
Register private_name = register_allocator()->NewRegister();
VisitForRegisterValue(property->key(), private_name);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(private_name)
.CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
DCHECK_NOT_NULL(property->private_name_var());
BuildVariableAssignment(property->private_name_var(), Token::INIT,
HoleCheckMode::kElided);
break;
}
case ClassLiteral::Property::METHOD: {
// We can initialize the private methods and accessors later so that the
// home objects can be assigned right after the creation of the
// closures, and those are guarded by the brand checks.
break;
}
// Collect private accessors into a table to merge the creation of
// those closures later.
case ClassLiteral::Property::GETTER: {
Literal* key = property->key()->AsLiteral();
DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter);
private_accessors.LookupOrInsert(key)->getter = property;
break;
}
case ClassLiteral::Property::SETTER: {
Literal* key = property->key()->AsLiteral();
DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter);
private_accessors.LookupOrInsert(key)->setter = property;
break;
}
default:
UNREACHABLE();
}
}
{
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewGrowableRegisterList();
Register class_boilerplate = register_allocator()->GrowRegisterList(&args);
Register class_constructor_in_args =
register_allocator()->GrowRegisterList(&args);
Register super_class = register_allocator()->GrowRegisterList(&args);
DCHECK_EQ(ClassBoilerplate::kFirstDynamicArgumentIndex,
args.register_count());
VisitForAccumulatorValueOrTheHole(expr->extends());
builder()->StoreAccumulatorInRegister(super_class);
VisitFunctionLiteral(expr->constructor());
builder()
->StoreAccumulatorInRegister(class_constructor)
.MoveRegister(class_constructor, class_constructor_in_args)
.LoadConstantPoolEntry(class_boilerplate_entry)
.StoreAccumulatorInRegister(class_boilerplate);
// Create computed names and method values nodes to store into the literal.
for (int i = 0; i < expr->public_members()->length(); i++) {
ClassLiteral::Property* property = expr->public_members()->at(i);
if (property->is_computed_name()) {
Register key = register_allocator()->GrowRegisterList(&args);
builder()->SetExpressionAsStatementPosition(property->key());
BuildLoadPropertyKey(property, key);
if (property->is_static()) {
// The static prototype property is read only. We handle the non
// computed property name case in the parser. Since this is the only
// case where we need to check for an own read only property we
// special case this so we do not need to do this for every property.
FeedbackSlot slot = GetDummyCompareICSlot();
BytecodeLabel done;
builder()
->LoadLiteral(ast_string_constants()->prototype_string())
.CompareOperation(Token::Value::EQ_STRICT, key,
feedback_index(slot))
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &done)
.CallRuntime(Runtime::kThrowStaticPrototypeError)
.Bind(&done);
}
if (property->kind() == ClassLiteral::Property::FIELD) {
DCHECK(!property->is_private());
// Initialize field's name variable with the computed name.
DCHECK_NOT_NULL(property->computed_name_var());
builder()->LoadAccumulatorWithRegister(key);
BuildVariableAssignment(property->computed_name_var(), Token::INIT,
HoleCheckMode::kElided);
}
}
DCHECK(!property->is_private());
if (property->kind() == ClassLiteral::Property::FIELD) {
// We don't compute field's value here, but instead do it in the
// initializer function.
continue;
}
Register value = register_allocator()->GrowRegisterList(&args);
VisitForRegisterValue(property->value(), value);
}
builder()->CallRuntime(Runtime::kDefineClass, args);
}
Register prototype = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(prototype);
// Assign to class variable.
Variable* class_variable = expr->scope()->class_variable();
if (class_variable != nullptr && class_variable->is_used()) {
DCHECK(class_variable->IsStackLocal() || class_variable->IsContextSlot());
builder()->LoadAccumulatorWithRegister(class_constructor);
BuildVariableAssignment(class_variable, Token::INIT,
HoleCheckMode::kElided);
}
// Create the closures of private methods, and store the home object for
// any private methods that need them.
if (expr->has_private_methods()) {
for (int i = 0; i < expr->private_members()->length(); i++) {
ClassLiteral::Property* property = expr->private_members()->at(i);
if (property->kind() != ClassLiteral::Property::METHOD) {
continue;
}
RegisterAllocationScope register_scope(this);
VisitForAccumulatorValue(property->value());
BuildVariableAssignment(property->private_name_var(), Token::INIT,
HoleCheckMode::kElided);
Register home_object = property->private_name_var()->is_static()
? class_constructor
: prototype;
if (property->NeedsHomeObjectOnClassPrototype()) {
Register func = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(func);
VisitSetHomeObject(func, home_object, property);
}
}
}
// Define private accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters, in the order the first
// component is declared. Store the home objects if necessary.
for (auto accessors : private_accessors.ordered_accessors()) {
RegisterAllocationScope inner_register_scope(this);
RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
ClassLiteral::Property* getter = accessors.second->getter;
ClassLiteral::Property* setter = accessors.second->setter;
bool is_static =
getter != nullptr ? getter->is_static() : setter->is_static();
Register home_object = is_static ? class_constructor : prototype;
VisitLiteralAccessor(home_object, getter, accessors_reg[0]);
VisitLiteralAccessor(home_object, setter, accessors_reg[1]);
builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
Variable* var = getter != nullptr ? getter->private_name_var()
: setter->private_name_var();
DCHECK_NOT_NULL(var);
BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
if (expr->instance_members_initializer_function() != nullptr) {
Register initializer =
VisitForRegisterValue(expr->instance_members_initializer_function());
if (FunctionLiteral::NeedsHomeObject(
expr->instance_members_initializer_function())) {
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()->LoadAccumulatorWithRegister(prototype).StoreHomeObjectProperty(
initializer, feedback_index(slot), language_mode());
}
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()
->LoadAccumulatorWithRegister(initializer)
.StoreClassFieldsInitializer(class_constructor, feedback_index(slot))
.LoadAccumulatorWithRegister(class_constructor);
}
if (expr->static_fields_initializer() != nullptr) {
// TODO(gsathya): This can be optimized away to be a part of the
// class boilerplate in the future. The name argument can be
// passed to the DefineClass runtime function and have it set
// there.
if (name.is_valid()) {
Register key = register_allocator()->NewRegister();
builder()
->LoadLiteral(ast_string_constants()->name_string())
.StoreAccumulatorInRegister(key);
DataPropertyInLiteralFlags data_property_flags =
DataPropertyInLiteralFlag::kNoFlags;
FeedbackSlot slot =
feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
builder()->LoadAccumulatorWithRegister(name).StoreDataPropertyInLiteral(
class_constructor, key, data_property_flags, feedback_index(slot));
}
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer =
VisitForRegisterValue(expr->static_fields_initializer());
if (FunctionLiteral::NeedsHomeObject(expr->static_fields_initializer())) {
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()
->LoadAccumulatorWithRegister(class_constructor)
.StoreHomeObjectProperty(initializer, feedback_index(slot),
language_mode());
}
builder()
->MoveRegister(class_constructor, args[0])
.CallProperty(initializer, args,
feedback_index(feedback_spec()->AddCallICSlot()));
}
builder()->LoadAccumulatorWithRegister(class_constructor);
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
VisitClassLiteral(expr, Register::invalid_value());
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr, Register name) {
CurrentScope current_scope(this, expr->scope());
DCHECK_NOT_NULL(expr->scope());
if (expr->scope()->NeedsContext()) {
BuildNewLocalBlockContext(expr->scope());
ContextScope scope(this, expr->scope());
BuildClassLiteral(expr, name);
} else {
BuildClassLiteral(expr, name);
}
}
void BytecodeGenerator::VisitInitializeClassMembersStatement(
InitializeClassMembersStatement* stmt) {
RegisterList args = register_allocator()->NewRegisterList(3);
Register constructor = args[0], key = args[1], value = args[2];
builder()->MoveRegister(builder()->Receiver(), constructor);
for (int i = 0; i < stmt->fields()->length(); i++) {
ClassLiteral::Property* property = stmt->fields()->at(i);
// Private methods are not initialized in the
// InitializeClassMembersStatement.
DCHECK_IMPLIES(property->is_private(),
property->kind() == ClassLiteral::Property::FIELD);
if (property->is_computed_name()) {
DCHECK_EQ(property->kind(), ClassLiteral::Property::FIELD);
DCHECK(!property->is_private());
Variable* var = property->computed_name_var();
DCHECK_NOT_NULL(var);
// The computed name is already evaluated and stored in a
// variable at class definition time.
BuildVariableLoad(var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
} else if (property->is_private()) {
Variable* private_name_var = property->private_name_var();
DCHECK_NOT_NULL(private_name_var);
BuildVariableLoad(private_name_var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
} else {
BuildLoadPropertyKey(property, key);
}
builder()->SetExpressionAsStatementPosition(property->value());
VisitForRegisterValue(property->value(), value);
VisitSetHomeObject(value, constructor, property);
Runtime::FunctionId function_id =
property->kind() == ClassLiteral::Property::FIELD &&
!property->is_private()
? Runtime::kCreateDataProperty
: Runtime::kAddPrivateField;
builder()->CallRuntime(function_id, args);
}
}
void BytecodeGenerator::BuildInvalidPropertyAccess(MessageTemplate tmpl,
Property* property) {
RegisterAllocationScope register_scope(this);
const AstRawString* name = property->key()->AsVariableProxy()->raw_name();
RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->LoadLiteral(Smi::FromEnum(tmpl))
.StoreAccumulatorInRegister(args[0])
.LoadLiteral(name)
.StoreAccumulatorInRegister(args[1])
.CallRuntime(Runtime::kNewTypeError, args)
.Throw();
}
void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver) {
RegisterList brand_args = register_allocator()->NewRegisterList(3);
Variable* brand = info()->scope()->outer_scope()->AsClassScope()->brand();
int depth = execution_context()->ContextChainDepth(brand->scope());
ContextScope* class_context = execution_context()->Previous(depth);
BuildVariableLoad(brand, HoleCheckMode::kElided);
builder()
->StoreAccumulatorInRegister(brand_args[1])
.MoveRegister(receiver, brand_args[0])
.MoveRegister(class_context->reg(), brand_args[2])
.CallRuntime(Runtime::kAddPrivateBrand, brand_args);
}
void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
Register instance) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer = register_allocator()->NewRegister();
FeedbackSlot slot = feedback_spec()->AddLoadICSlot();
BytecodeLabel done;
builder()
->LoadClassFieldsInitializer(constructor, feedback_index(slot))
// TODO(gsathya): This jump can be elided for the base
// constructor and derived constructor. This is only required
// when called from an arrow function.
.JumpIfUndefined(&done)
.StoreAccumulatorInRegister(initializer)
.MoveRegister(instance, args[0])
.CallProperty(initializer, args,
feedback_index(feedback_spec()->AddCallICSlot()))
.Bind(&done);
}
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
int index = feedback_spec()->AddCreateClosureSlot();
uint8_t flags = CreateClosureFlags::Encode(false, false, false);
builder()->CreateClosure(entry, index, flags);
native_function_literals_.push_back(std::make_pair(expr, entry));
}
void BytecodeGenerator::VisitConditional(Conditional* expr) {
ConditionalControlFlowBuilder conditional_builder(
builder(), block_coverage_builder_, expr);
if (expr->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
conditional_builder.Then();
VisitForAccumulatorValue(expr->then_expression());
} else if (expr->condition()->ToBooleanIsFalse()) {
// Generate else block unconditionally if it exists.
conditional_builder.Else();
VisitForAccumulatorValue(expr->else_expression());
} else {
VisitForTest(expr->condition(), conditional_builder.then_labels(),
conditional_builder.else_labels(), TestFallthrough::kThen);
conditional_builder.Then();
VisitForAccumulatorValue(expr->then_expression());
conditional_builder.JumpToEnd();
conditional_builder.Else();
VisitForAccumulatorValue(expr->else_expression());
}
}
void BytecodeGenerator::VisitLiteral(Literal* expr) {
if (execution_result()->IsEffect()) return;
switch (expr->type()) {
case Literal::kSmi:
builder()->LoadLiteral(expr->AsSmiLiteral());
break;
case Literal::kHeapNumber:
builder()->LoadLiteral(expr->AsNumber());
break;
case Literal::kUndefined:
builder()->LoadUndefined();
break;
case Literal::kBoolean:
builder()->LoadBoolean(expr->ToBooleanIsTrue());
execution_result()->SetResultIsBoolean();
break;
case Literal::kNull:
builder()->LoadNull();
break;
case Literal::kTheHole:
builder()->LoadTheHole();
break;
case Literal::kString:
builder()->LoadLiteral(expr->AsRawString());
execution_result()->SetResultIsString();
break;
case Literal::kSymbol:
builder()->LoadLiteral(expr->AsSymbol());
break;
case Literal::kBigInt:
builder()->LoadLiteral(expr->AsBigInt());
break;
}
}
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
builder()->CreateRegExpLiteral(
expr->raw_pattern(), feedback_index(feedback_spec()->AddLiteralSlot()),
expr->flags());
}
void BytecodeGenerator::BuildCreateObjectLiteral(Register literal,
uint8_t flags, size_t entry) {
if (ShouldOptimizeAsOneShot()) {
RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->LoadConstantPoolEntry(entry)
.StoreAccumulatorInRegister(args[0])
.LoadLiteral(Smi::FromInt(flags))
.StoreAccumulatorInRegister(args[1])
.CallRuntime(Runtime::kCreateObjectLiteralWithoutAllocationSite, args)
.StoreAccumulatorInRegister(literal);
} else {
// TODO(cbruni): Directly generate runtime call for literals we cannot
// optimize once the CreateShallowObjectLiteral stub is in sync with the TF
// optimizations.
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
builder()
->CreateObjectLiteral(entry, literal_index, flags)
.StoreAccumulatorInRegister(literal);
}
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->InitDepthAndFlags();
// Fast path for the empty object literal which doesn't need an
// AllocationSite.
if (expr->IsEmptyObjectLiteral()) {
DCHECK(expr->IsFastCloningSupported());
builder()->CreateEmptyObjectLiteral();
return;
}
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
expr->ComputeFlags(), expr->IsFastCloningSupported());
Register literal = register_allocator()->NewRegister();
// Create literal object.
int property_index = 0;
bool clone_object_spread =
expr->properties()->first()->kind() == ObjectLiteral::Property::SPREAD;
if (clone_object_spread) {
// Avoid the slow path for spreads in the following common cases:
// 1) `let obj = { ...source }`
// 2) `let obj = { ...source, override: 1 }`
// 3) `let obj = { ...source, ...overrides }`
RegisterAllocationScope register_scope(this);
Expression* property = expr->properties()->first()->value();
Register from_value = VisitForRegisterValue(property);
int clone_index = feedback_index(feedback_spec()->AddCloneObjectSlot());
builder()->CloneObject(from_value, flags, clone_index);
builder()->StoreAccumulatorInRegister(literal);
property_index++;
} else {
size_t entry;
// If constant properties is an empty fixed array, use a cached empty fixed
// array to ensure it's only added to the constant pool once.
if (expr->properties_count() == 0) {
entry = builder()->EmptyObjectBoilerplateDescriptionConstantPoolEntry();
} else {
entry = builder()->AllocateDeferredConstantPoolEntry();
object_literals_.push_back(std::make_pair(expr, entry));
}
BuildCreateObjectLiteral(literal, flags, entry);
}
// Store computed values into the literal.
AccessorTable<ObjectLiteral::Property> accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
if (!clone_object_spread && property->IsCompileTimeValue()) continue;
RegisterAllocationScope inner_register_scope(this);
Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::SPREAD:
UNREACHABLE();
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
DCHECK(clone_object_spread || !property->value()->IsCompileTimeValue());
V8_FALLTHROUGH;
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
if (key->IsStringLiteral()) {
DCHECK(key->IsPropertyName());
if (property->emit_store()) {
builder()->SetExpressionPosition(property->value());
VisitForAccumulatorValue(property->value());
FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
if (FunctionLiteral::NeedsHomeObject(property->value())) {
RegisterAllocationScope register_scope(this);
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
builder()->StoreNamedOwnProperty(
literal, key->AsRawPropertyName(), feedback_index(slot));
VisitSetHomeObject(value, literal, property);
} else {
builder()->StoreNamedOwnProperty(
literal, key->AsRawPropertyName(), feedback_index(slot));
}
} else {
builder()->SetExpressionPosition(property->value());
VisitForEffect(property->value());
}
} else {
RegisterList args = register_allocator()->NewRegisterList(3);
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->key());
VisitForRegisterValue(property->key(), args[1]);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
if (property->emit_store()) {
builder()->CallRuntime(Runtime::kSetKeyedProperty, args);
Register value = args[2];
VisitSetHomeObject(value, literal, property);
}
}
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
// __proto__:null is handled by CreateObjectLiteral.
if (property->IsNullPrototype()) break;
DCHECK(property->emit_store());
DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
break;
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
accessor_table.LookupOrInsert(key)->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
accessor_table.LookupOrInsert(key)->setter = property;
}
break;
}
}
// Define accessors, using only a single call to the runtime for each pair of
// corresponding getters and setters.
for (auto accessors : accessor_table.ordered_accessors()) {
RegisterAllocationScope inner_register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(5);
builder()->MoveRegister(literal, args[0]);
VisitForRegisterValue(accessors.first, args[1]);
VisitLiteralAccessor(literal, accessors.second->getter, args[2]);
VisitLiteralAccessor(literal, accessors.second->setter, args[3]);
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(args[4])
.CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, args);
}
// Object literals have two parts. The "static" part on the left contains no
// computed property names, and so we can compute its map ahead of time; see
// Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
// with the first computed property name and continues with all properties to
// its right. All the code from above initializes the static component of the
// object literal, and arranges for the map of the result to reflect the
// static order in which the keys appear. For the dynamic properties, we
// compile them into a series of "SetOwnProperty" runtime calls. This will
// preserve insertion order.
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
RegisterAllocationScope inner_register_scope(this);
if (property->IsPrototype()) {
// __proto__:null is handled by CreateObjectLiteral.
if (property->IsNullPrototype()) continue;
DCHECK(property->emit_store());
DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
continue;
}
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
Register key = register_allocator()->NewRegister();
BuildLoadPropertyKey(property, key);
builder()->SetExpressionPosition(property->value());
Register value;
// Static class fields require the name property to be set on
// the class, meaning we can't wait until the
// StoreDataPropertyInLiteral call later to set the name.
if (property->value()->IsClassLiteral() &&
property->value()->AsClassLiteral()->static_fields_initializer() !=
nullptr) {
value = register_allocator()->NewRegister();
VisitClassLiteral(property->value()->AsClassLiteral(), key);
builder()->StoreAccumulatorInRegister(value);
} else {
value = VisitForRegisterValue(property->value());
}
VisitSetHomeObject(value, literal, property);
DataPropertyInLiteralFlags data_property_flags =
DataPropertyInLiteralFlag::kNoFlags;
if (property->NeedsSetFunctionName()) {
data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
}
FeedbackSlot slot =
feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
builder()
->LoadAccumulatorWithRegister(value)
.StoreDataPropertyInLiteral(literal, key, data_property_flags,
feedback_index(slot));
break;
}
case ObjectLiteral::Property::GETTER:
case ObjectLiteral::Property::SETTER: {
RegisterList args = register_allocator()->NewRegisterList(4);
builder()->MoveRegister(literal, args[0]);
BuildLoadPropertyKey(property, args[1]);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
VisitSetHomeObject(args[2], literal, property);
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(args[3]);
Runtime::FunctionId function_id =
property->kind() == ObjectLiteral::Property::GETTER
? Runtime::kDefineGetterPropertyUnchecked
: Runtime::kDefineSetterPropertyUnchecked;
builder()->CallRuntime(function_id, args);
break;
}
case ObjectLiteral::Property::SPREAD: {
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInlineCopyDataProperties, args