blob: 00504b5eff0aaf1f6713786afb129795e9c54738 [file] [log] [blame]
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/torque/implementation-visitor.h"
#include <algorithm>
#include <iomanip>
#include <string>
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/torque/cc-generator.h"
#include "src/torque/constants.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
#include "src/torque/types.h"
namespace v8 {
namespace internal {
namespace torque {
VisitResult ImplementationVisitor::Visit(Expression* expr) {
CurrentSourcePosition::Scope scope(expr->pos);
switch (expr->kind) {
#define ENUM_ITEM(name) \
case AstNode::Kind::k##name: \
return Visit(name::cast(expr));
AST_EXPRESSION_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
default:
UNREACHABLE();
}
}
const Type* ImplementationVisitor::Visit(Statement* stmt) {
CurrentSourcePosition::Scope scope(stmt->pos);
StackScope stack_scope(this);
const Type* result;
switch (stmt->kind) {
#define ENUM_ITEM(name) \
case AstNode::Kind::k##name: \
result = Visit(name::cast(stmt)); \
break;
AST_STATEMENT_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
default:
UNREACHABLE();
}
DCHECK_EQ(result == TypeOracle::GetNeverType(),
assembler().CurrentBlockIsComplete());
return result;
}
void ImplementationVisitor::BeginGeneratedFiles() {
std::set<SourceId> contains_class_definitions;
for (const ClassType* type : TypeOracle::GetClasses()) {
if (type->GenerateCppClassDefinitions()) {
contains_class_definitions.insert(type->AttributedToFile());
}
}
for (SourceId file : SourceFileMap::AllSources()) {
// Output beginning of CSA .cc file.
{
std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_ccfile;
for (const std::string& include_path : GlobalContext::CppIncludes()) {
out << "#include " << StringLiteralQuote(include_path) << "\n";
}
for (SourceId file : SourceFileMap::AllSources()) {
out << "#include \"torque-generated/" +
SourceFileMap::PathFromV8RootWithoutExtension(file) +
"-tq-csa.h\"\n";
}
out << "\n";
out << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
}
// Output beginning of CSA .h file.
{
std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_headerfile;
std::string headerDefine =
"V8_GEN_TORQUE_GENERATED_" +
UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
out << "#ifndef " << headerDefine << "\n";
out << "#define " << headerDefine << "\n\n";
out << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
out << "\n";
out << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
}
// Output beginning of class definition .cc file.
{
auto& streams = GlobalContext::GeneratedPerFile(file);
std::ostream& out = streams.class_definition_ccfile;
if (contains_class_definitions.count(file) != 0) {
out << "#include \""
<< SourceFileMap::PathFromV8RootWithoutExtension(file)
<< "-inl.h\"\n\n";
out << "#include \"torque-generated/class-verifiers.h\"\n";
out << "#include \"src/objects/instance-type-inl.h\"\n\n";
}
out << "namespace v8 {\n";
out << "namespace internal {\n";
}
}
}
void ImplementationVisitor::EndGeneratedFiles() {
for (SourceId file : SourceFileMap::AllSources()) {
{
std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_ccfile;
out << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
}
{
std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_headerfile;
std::string headerDefine =
"V8_GEN_TORQUE_GENERATED_" +
UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
out << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
out << "#endif // " << headerDefine << "\n";
}
{
std::ostream& out =
GlobalContext::GeneratedPerFile(file).class_definition_ccfile;
out << "} // namespace v8\n";
out << "} // namespace internal\n";
}
}
}
void ImplementationVisitor::BeginRuntimeMacrosFile() {
std::ostream& source = runtime_macros_cc_;
std::ostream& header = runtime_macros_h_;
source << "#include \"torque-generated/runtime-macros.h\"\n\n";
source << "#include \"src/torque/runtime-macro-shims.h\"\n";
for (const std::string& include_path : GlobalContext::CppIncludes()) {
source << "#include " << StringLiteralQuote(include_path) << "\n";
}
source << "\n";
source << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_";
header << "#ifndef " << kHeaderDefine << "\n";
header << "#define " << kHeaderDefine << "\n\n";
header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
header << "\n";
header << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
}
void ImplementationVisitor::EndRuntimeMacrosFile() {
std::ostream& source = runtime_macros_cc_;
std::ostream& header = runtime_macros_h_;
source << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
header << "\n} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
header << "#endif // V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_\n";
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
{}, false};
BindingsManagersScope bindings_managers_scope;
csa_headerfile() << " ";
GenerateFunctionDeclaration(csa_headerfile(), "", decl->external_name(),
signature, {});
csa_headerfile() << ";\n";
GenerateFunctionDeclaration(csa_ccfile(), "", decl->external_name(),
signature, {});
csa_ccfile() << " {\n";
csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
DCHECK(!signature.return_type->IsVoidOrNever());
assembler_ = CfgAssembler(Stack<const Type*>{});
VisitResult expression_result = Visit(decl->body());
VisitResult return_result =
GenerateImplicitConvert(signature.return_type, expression_result);
CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
assembler_ = base::nullopt;
csa_ccfile() << " return ";
CSAGenerator::EmitCSAValue(return_result, values, csa_ccfile());
csa_ccfile() << ";\n";
csa_ccfile() << "}\n\n";
}
void ImplementationVisitor::Visit(TypeAlias* alias) {
if (alias->IsRedeclaration()) return;
if (const ClassType* class_type = ClassType::DynamicCast(alias->type())) {
if (class_type->IsExtern() && !class_type->nspace()->IsDefaultNamespace()) {
Error(
"extern classes are currently only supported in the default "
"namespace");
}
}
}
VisitResult ImplementationVisitor::InlineMacro(
Macro* macro, base::Optional<LocationReference> this_reference,
const std::vector<VisitResult>& arguments,
const std::vector<Block*> label_blocks) {
CurrentScope::Scope current_scope(macro);
BindingsManagersScope bindings_managers_scope;
CurrentCallable::Scope current_callable(macro);
CurrentReturnValue::Scope current_return_value;
const Signature& signature = macro->signature();
const Type* return_type = macro->signature().return_type;
bool can_return = return_type != TypeOracle::GetNeverType();
BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
DCHECK_EQ(macro->signature().parameter_names.size(),
arguments.size() + (this_reference ? 1 : 0));
DCHECK_EQ(this_reference.has_value(), macro->IsMethod());
// Bind the this for methods. Methods that modify a struct-type "this" must
// only be called if the this is in a variable, in which case the
// LocalValue is non-const. Otherwise, the LocalValue used for the parameter
// binding is const, and thus read-only, which will cause errors if
// modified, e.g. when called by a struct method that sets the structs
// fields. This prevents using temporary struct values for anything other
// than read operations.
if (this_reference) {
DCHECK(macro->IsMethod());
parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
true);
}
size_t i = 0;
for (auto arg : arguments) {
if (this_reference && i == signature.implicit_count) i++;
const bool mark_as_used = signature.implicit_count > i;
const Identifier* name = macro->parameter_names()[i++];
parameter_bindings.Add(name,
LocalValue{LocationReference::Temporary(
arg, "parameter " + name->value)},
mark_as_used);
}
DCHECK_EQ(label_blocks.size(), signature.labels.size());
for (size_t i = 0; i < signature.labels.size(); ++i) {
const LabelDeclaration& label_info = signature.labels[i];
label_bindings.Add(label_info.name,
LocalLabel{label_blocks[i], label_info.types});
}
Block* macro_end;
base::Optional<Binding<LocalLabel>> macro_end_binding;
if (can_return) {
Stack<const Type*> stack = assembler().CurrentStack();
std::vector<const Type*> lowered_return_types = LowerType(return_type);
stack.PushMany(lowered_return_types);
if (!return_type->IsConstexpr()) {
SetReturnValue(VisitResult(return_type,
stack.TopRange(lowered_return_types.size())));
}
// The stack copy used to initialize the _macro_end block is only used
// as a template for the actual gotos generated by return statements. It
// doesn't correspond to any real return values, and thus shouldn't contain
// top types, because these would pollute actual return value types that get
// unioned with them for return statements, erroneously forcing them to top.
for (auto i = stack.begin(); i != stack.end(); ++i) {
if ((*i)->IsTopType()) {
*i = TopType::cast(*i)->source_type();
}
}
macro_end = assembler().NewBlock(std::move(stack));
macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
LocalLabel{macro_end, {return_type}});
} else {
SetReturnValue(VisitResult::NeverResult());
}
const Type* result = Visit(*macro->body());
if (result->IsNever()) {
if (!return_type->IsNever() && !macro->HasReturns()) {
std::stringstream s;
s << "macro " << macro->ReadableName()
<< " that never returns must have return type never";
ReportError(s.str());
}
} else {
if (return_type->IsNever()) {
std::stringstream s;
s << "macro " << macro->ReadableName()
<< " has implicit return at end of its declartion but return type "
"never";
ReportError(s.str());
} else if (!macro->signature().return_type->IsVoid()) {
std::stringstream s;
s << "macro " << macro->ReadableName()
<< " expects to return a value but doesn't on all paths";
ReportError(s.str());
}
}
if (!result->IsNever()) {
assembler().Goto(macro_end);
}
if (macro->HasReturns() || !result->IsNever()) {
assembler().Bind(macro_end);
}
return GetAndClearReturnValue();
}
void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
CurrentCallable::Scope current_callable(macro);
const Signature& signature = macro->signature();
const Type* return_type = macro->signature().return_type;
bool can_return = return_type != TypeOracle::GetNeverType();
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
GenerateMacroFunctionDeclaration(csa_headerfile(), macro);
csa_headerfile() << ";\n";
GenerateMacroFunctionDeclaration(csa_ccfile(), macro);
csa_ccfile() << " {\n";
if (output_type_ == OutputType::kCC) {
// For now, generated C++ is only for field offset computations. If we ever
// generate C++ code that can allocate, then it should be handlified.
csa_ccfile() << " DisallowHeapAllocation no_gc;\n";
} else {
csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
csa_ccfile()
<< " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
}
Stack<std::string> lowered_parameters;
Stack<const Type*> lowered_parameter_types;
std::vector<VisitResult> arguments;
base::Optional<LocationReference> this_reference;
if (Method* method = Method::DynamicCast(macro)) {
const Type* this_type = method->aggregate_type();
LowerParameter(this_type, ExternalParameterName(kThisParameterName),
&lowered_parameters);
StackRange range = lowered_parameter_types.PushMany(LowerType(this_type));
VisitResult this_result = VisitResult(this_type, range);
// For classes, mark 'this' as a temporary to prevent assignment to it.
// Note that using a VariableAccess for non-class types is technically
// incorrect because changes to the 'this' variable do not get reflected
// to the caller. Therefore struct methods should always be inlined and a
// C++ version should never be generated, since it would be incorrect.
// However, in order to be able to type- and semantics-check even unused
// struct methods, set the this_reference to be the local variable copy of
// the passed-in this, which allows the visitor to at least find and report
// errors.
this_reference =
(this_type->IsClassType())
? LocationReference::Temporary(this_result, "this parameter")
: LocationReference::VariableAccess(this_result);
}
for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
if (this_reference && i == macro->signature().implicit_count) continue;
const std::string& name = macro->parameter_names()[i]->value;
std::string external_name = ExternalParameterName(name);
const Type* type = macro->signature().types()[i];
if (type->IsConstexpr()) {
arguments.push_back(VisitResult(type, external_name));
} else {
LowerParameter(type, external_name, &lowered_parameters);
StackRange range = lowered_parameter_types.PushMany(LowerType(type));
arguments.push_back(VisitResult(type, range));
}
}
DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
assembler_ = CfgAssembler(lowered_parameter_types);
std::vector<Block*> label_blocks;
for (const LabelDeclaration& label_info : signature.labels) {
Stack<const Type*> label_input_stack;
for (const Type* type : label_info.types) {
label_input_stack.PushMany(LowerType(type));
}
Block* block = assembler().NewBlock(std::move(label_input_stack));
label_blocks.push_back(block);
}
VisitResult return_value =
InlineMacro(macro, this_reference, arguments, label_blocks);
Block* end = assembler().NewBlock();
if (return_type != TypeOracle::GetNeverType()) {
assembler().Goto(end);
}
for (size_t i = 0; i < label_blocks.size(); ++i) {
Block* label_block = label_blocks[i];
const LabelDeclaration& label_info = signature.labels[i];
assembler().Bind(label_block);
std::vector<std::string> label_parameter_variables;
for (size_t i = 0; i < label_info.types.size(); ++i) {
LowerLabelParameter(label_info.types[i],
ExternalLabelParameterName(label_info.name->value, i),
&label_parameter_variables);
}
assembler().Emit(GotoExternalInstruction{
ExternalLabelName(label_info.name->value), label_parameter_variables});
}
if (return_type != TypeOracle::GetNeverType()) {
assembler().Bind(end);
}
base::Optional<Stack<std::string>> values;
if (output_type_ == OutputType::kCC) {
CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
values = cc_generator.EmitGraph(lowered_parameters);
} else {
CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
values = csa_generator.EmitGraph(lowered_parameters);
}
assembler_ = base::nullopt;
if (has_return_value) {
csa_ccfile() << " return ";
if (output_type_ == OutputType::kCC) {
CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
} else {
CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
}
csa_ccfile() << ";\n";
}
csa_ccfile() << "}\n\n";
}
void ImplementationVisitor::Visit(TorqueMacro* macro) {
VisitMacroCommon(macro);
}
void ImplementationVisitor::Visit(Method* method) {
DCHECK(!method->IsExternal());
VisitMacroCommon(method);
}
namespace {
std::string AddParameter(size_t i, Builtin* builtin,
Stack<std::string>* parameters,
Stack<const Type*>* parameter_types,
BlockBindings<LocalValue>* parameter_bindings,
bool mark_as_used) {
const Identifier* name = builtin->signature().parameter_names[i];
const Type* type = builtin->signature().types()[i];
std::string external_name = "parameter" + std::to_string(i);
parameters->Push(external_name);
StackRange range = parameter_types->PushMany(LowerType(type));
parameter_bindings->Add(
name,
LocalValue{LocationReference::Temporary(VisitResult(type, range),
"parameter " + name->value)},
mark_as_used);
return external_name;
}
} // namespace
void ImplementationVisitor::Visit(Builtin* builtin) {
if (builtin->IsExternal()) return;
CurrentScope::Scope current_scope(builtin);
CurrentCallable::Scope current_callable(builtin);
CurrentReturnValue::Scope current_return_value;
const std::string& name = builtin->ExternalName();
const Signature& signature = builtin->signature();
csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
<< " compiler::CodeAssemblerState* state_ = state();"
<< " compiler::CodeAssembler ca_(state());\n";
Stack<const Type*> parameter_types;
Stack<std::string> parameters;
BindingsManagersScope bindings_managers_scope;
BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
if (builtin->IsVarArgsJavaScript()) {
DCHECK(signature.parameter_types.var_args);
if (signature.ExplicitCount() > 0) {
Error("Cannot mix explicit parameters with varargs.")
.Position(signature.parameter_names[signature.implicit_count]->pos);
}
csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
<< "Descriptor::kJSActualArgumentsCount);\n";
csa_ccfile() << " TNode<IntPtrT> "
"arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
"Int32T>(argc)));\n";
csa_ccfile() << " TNode<RawPtrT> arguments_frame = "
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
"arguments_length));\n";
csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
parameters.Push("torque_arguments.frame");
parameters.Push("torque_arguments.base");
parameters.Push("torque_arguments.length");
const Type* arguments_type = TypeOracle::GetArgumentsType();
StackRange range = parameter_types.PushMany(LowerType(arguments_type));
parameter_bindings.Add(*signature.arguments_variable,
LocalValue{LocationReference::Temporary(
VisitResult(arguments_type, range),
"parameter " + *signature.arguments_variable)},
true);
}
for (size_t i = 0; i < signature.implicit_count; ++i) {
const std::string& param_name = signature.parameter_names[i]->value;
SourcePosition param_pos = signature.parameter_names[i]->pos;
std::string generated_name = AddParameter(
i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
const Type* actual_type = signature.parameter_types.types[i];
std::vector<const Type*> expected_types;
if (param_name == "context") {
csa_ccfile() << " TNode<NativeContext> " << generated_name
<< " = UncheckedParameter<NativeContext>("
<< "Descriptor::kContext);\n";
csa_ccfile() << " USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetNativeContextType(),
TypeOracle::GetContextType()};
} else if (param_name == "receiver") {
csa_ccfile()
<< " TNode<Object> " << generated_name << " = "
<< (builtin->IsVarArgsJavaScript()
? "arguments.GetReceiver()"
: "UncheckedParameter<Object>(Descriptor::kReceiver)")
<< ";\n";
csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "newTarget") {
csa_ccfile() << " TNode<Object> " << generated_name
<< " = UncheckedParameter<Object>("
<< "Descriptor::kJSNewTarget);\n";
csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "target") {
csa_ccfile() << " TNode<JSFunction> " << generated_name
<< " = UncheckedParameter<JSFunction>("
<< "Descriptor::kJSTarget);\n";
csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSFunctionType()};
} else {
Error(
"Unexpected implicit parameter \"", param_name,
"\" for JavaScript calling convention, "
"expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
.Position(param_pos);
expected_types = {actual_type};
}
if (std::find(expected_types.begin(), expected_types.end(),
actual_type) == expected_types.end()) {
Error("According to JavaScript calling convention, expected parameter ",
param_name, " to have type ", PrintList(expected_types, " or "),
" but found type ", *actual_type)
.Position(param_pos);
}
}
for (size_t i = signature.implicit_count;
i < signature.parameter_names.size(); ++i) {
const std::string& parameter_name = signature.parameter_names[i]->value;
const Type* type = signature.types()[i];
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings, mark_as_used);
csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
<< "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
<< ">(Descriptor::k" << CamelifyString(parameter_name)
<< ");\n";
csa_ccfile() << " USE(" << var << ");\n";
}
} else {
DCHECK(builtin->IsStub());
bool has_context_parameter = signature.HasContextParameter();
for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
const Type* type = signature.types()[i];
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings, mark_as_used);
csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
<< "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
<< ">(";
if (i == 0 && has_context_parameter) {
csa_ccfile() << "Descriptor::kContext";
} else {
csa_ccfile() << "Descriptor::ParameterIndex<"
<< (has_context_parameter ? i - 1 : i) << ">()";
}
csa_ccfile() << ");\n";
csa_ccfile() << " USE(" << var << ");\n";
}
}
assembler_ = CfgAssembler(parameter_types);
const Type* body_result = Visit(*builtin->body());
if (body_result != TypeOracle::GetNeverType()) {
ReportError("control reaches end of builtin, expected return of a value");
}
CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
builtin->kind()};
csa_generator.EmitGraph(parameters);
assembler_ = base::nullopt;
csa_ccfile() << "}\n\n";
}
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
return Visit(stmt, &block_bindings);
}
const Type* ImplementationVisitor::Visit(
VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
// const qualified variables are required to be initialized properly.
if (stmt->const_qualified && !stmt->initializer) {
ReportError("local constant \"", stmt->name, "\" is not initialized.");
}
base::Optional<const Type*> type;
if (stmt->type) {
type = TypeVisitor::ComputeType(*stmt->type);
}
base::Optional<VisitResult> init_result;
if (stmt->initializer) {
StackScope scope(this);
init_result = Visit(*stmt->initializer);
if (type) {
init_result = GenerateImplicitConvert(*type, *init_result);
}
type = init_result->type();
if ((*type)->IsConstexpr() && !stmt->const_qualified) {
Error("Use 'const' instead of 'let' for variable '", stmt->name->value,
"' of constexpr type '", (*type)->ToString(), "'.")
.Position(stmt->name->pos)
.Throw();
}
init_result = scope.Yield(*init_result);
} else {
DCHECK(type.has_value());
if ((*type)->IsConstexpr()) {
ReportError("constexpr variables need an initializer");
}
TypeVector lowered_types = LowerType(*type);
for (const Type* type : lowered_types) {
assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
"uninitialized variable '" + stmt->name->value + "' of type " +
type->ToString() + " originally defined at " +
PositionAsString(stmt->pos),
type)});
}
init_result =
VisitResult(*type, assembler().TopRange(lowered_types.size()));
}
LocationReference ref = stmt->const_qualified
? LocationReference::Temporary(
*init_result, "const " + stmt->name->value)
: LocationReference::VariableAccess(*init_result);
block_bindings->Add(stmt->name, LocalValue{std::move(ref)});
return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
return Visit(stmt->call, true).type();
}
VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
Block* true_block = assembler().NewBlock(assembler().CurrentStack());
Block* false_block = assembler().NewBlock(assembler().CurrentStack());
Block* done_block = assembler().NewBlock();
Block* true_conversion_block = assembler().NewBlock();
GenerateExpressionBranch(expr->condition, true_block, false_block);
VisitResult left;
VisitResult right;
{
// The code for both paths of the conditional need to be generated first
// before evaluating the conditional expression because the common type of
// the result of both the true and false of the condition needs to be known
// to convert both branches to a common type.
assembler().Bind(true_block);
StackScope left_scope(this);
left = Visit(expr->if_true);
assembler().Goto(true_conversion_block);
const Type* common_type;
{
assembler().Bind(false_block);
StackScope right_scope(this);
right = Visit(expr->if_false);
common_type = GetCommonType(left.type(), right.type());
right = right_scope.Yield(GenerateImplicitConvert(common_type, right));
assembler().Goto(done_block);
}
assembler().Bind(true_conversion_block);
left = left_scope.Yield(GenerateImplicitConvert(common_type, left));
assembler().Goto(done_block);
}
assembler().Bind(done_block);
CHECK_EQ(left, right);
return left;
}
VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
StackScope outer_scope(this);
VisitResult left_result = Visit(expr->left);
if (left_result.type()->IsConstexprBool()) {
VisitResult right_result = Visit(expr->right);
if (!right_result.type()->IsConstexprBool()) {
ReportError(
"expected type constexpr bool on right-hand side of operator "
"||");
}
return VisitResult(TypeOracle::GetConstexprBoolType(),
std::string("(") + left_result.constexpr_value() +
" || " + right_result.constexpr_value() + ")");
}
Block* true_block = assembler().NewBlock();
Block* false_block = assembler().NewBlock();
Block* done_block = assembler().NewBlock();
left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
GenerateBranch(left_result, true_block, false_block);
assembler().Bind(true_block);
VisitResult true_result = GenerateBoolConstant(true);
assembler().Goto(done_block);
assembler().Bind(false_block);
VisitResult false_result;
{
StackScope false_block_scope(this);
false_result = false_block_scope.Yield(
GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
}
assembler().Goto(done_block);
assembler().Bind(done_block);
DCHECK_EQ(true_result, false_result);
return outer_scope.Yield(true_result);
}
VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
StackScope outer_scope(this);
VisitResult left_result = Visit(expr->left);
if (left_result.type()->IsConstexprBool()) {
VisitResult right_result = Visit(expr->right);
if (!right_result.type()->IsConstexprBool()) {
ReportError(
"expected type constexpr bool on right-hand side of operator "
"&&");
}
return VisitResult(TypeOracle::GetConstexprBoolType(),
std::string("(") + left_result.constexpr_value() +
" && " + right_result.constexpr_value() + ")");
}
Block* true_block = assembler().NewBlock();
Block* false_block = assembler().NewBlock();
Block* done_block = assembler().NewBlock();
left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
GenerateBranch(left_result, true_block, false_block);
assembler().Bind(true_block);
VisitResult true_result;
{
StackScope true_block_scope(this);
VisitResult right_result = Visit(expr->right);
if (TryGetSourceForBitfieldExpression(expr->left) != nullptr &&
TryGetSourceForBitfieldExpression(expr->right) != nullptr &&
TryGetSourceForBitfieldExpression(expr->left)->value ==
TryGetSourceForBitfieldExpression(expr->right)->value) {
Lint(
"Please use & rather than && when checking multiple bitfield "
"values, to avoid complexity in generated code.");
}
true_result = true_block_scope.Yield(
GenerateImplicitConvert(TypeOracle::GetBoolType(), right_result));
}
assembler().Goto(done_block);
assembler().Bind(false_block);
VisitResult false_result = GenerateBoolConstant(false);
assembler().Goto(done_block);
assembler().Bind(done_block);
DCHECK_EQ(true_result, false_result);
return outer_scope.Yield(true_result);
}
VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
StackScope scope(this);
LocationReference location_ref = GetLocationReference(expr->location);
VisitResult current_value = GenerateFetchFromLocation(location_ref);
VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
Arguments args;
args.parameters = {current_value, one};
VisitResult assignment_value = GenerateCall(
expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
GenerateAssignToLocation(location_ref, assignment_value);
return scope.Yield(expr->postfix ? current_value : assignment_value);
}
VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
StackScope scope(this);
LocationReference location_ref = GetLocationReference(expr->location);
VisitResult assignment_value;
if (expr->op) {
VisitResult location_value = GenerateFetchFromLocation(location_ref);
assignment_value = Visit(expr->value);
Arguments args;
args.parameters = {location_value, assignment_value};
assignment_value = GenerateCall(*expr->op, args);
GenerateAssignToLocation(location_ref, assignment_value);
} else {
assignment_value = Visit(expr->value);
GenerateAssignToLocation(location_ref, assignment_value);
}
return scope.Yield(assignment_value);
}
VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
const Type* result_type = TypeOracle::GetConstFloat64Type();
if (expr->number >= std::numeric_limits<int32_t>::min() &&
expr->number <= std::numeric_limits<int32_t>::max()) {
int32_t i = static_cast<int32_t>(expr->number);
if (i == expr->number) {
if ((i >> 30) == (i >> 31)) {
result_type = TypeOracle::GetConstInt31Type();
} else {
result_type = TypeOracle::GetConstInt32Type();
}
}
}
std::stringstream str;
str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
<< expr->number;
return VisitResult{result_type, str.str()};
}
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
VisitResult result = Visit(expr->expression);
const Type* result_type = SubtractType(
result.type(), TypeVisitor::ComputeType(expr->excluded_type));
if (result_type->IsNever()) {
ReportError("unreachable code");
}
CHECK_EQ(LowerType(result_type), TypeVector{result_type});
assembler().Emit(UnsafeCastInstruction{result_type});
result.SetType(result_type);
return result;
}
VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
return VisitResult{
TypeOracle::GetConstStringType(),
"\"" + expr->literal.substr(1, expr->literal.size() - 2) + "\""};
}
VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
if (builtin->IsExternal() || builtin->kind() != Builtin::kStub) {
ReportError(
"creating function pointers is only allowed for internal builtins with "
"stub linkage");
}
const Type* type = TypeOracle::GetBuiltinPointerType(
builtin->signature().parameter_types.types,
builtin->signature().return_type);
assembler().Emit(
PushBuiltinPointerInstruction{builtin->ExternalName(), type});
return VisitResult(type, assembler().TopRange(1));
}
VisitResult ImplementationVisitor::Visit(LocationExpression* expr) {
StackScope scope(this);
return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
VisitResult ImplementationVisitor::Visit(FieldAccessExpression* expr) {
StackScope scope(this);
LocationReference location = GetLocationReference(expr);
if (location.IsBitFieldAccess()) {
if (auto* identifier = IdentifierExpression::DynamicCast(expr->object)) {
bitfield_expressions_[expr] = identifier->name;
}
}
return scope.Yield(GenerateFetchFromLocation(location));
}
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
Binding<LocalLabel>* label = LookupLabel(stmt->label->value);
size_t parameter_count = label->parameter_types.size();
if (stmt->arguments.size() != parameter_count) {
ReportError("goto to label has incorrect number of parameters (expected ",
parameter_count, " found ", stmt->arguments.size(), ")");
}
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(stmt->label->pos,
label->declaration_position());
}
size_t i = 0;
StackRange arguments = assembler().TopRange(0);
for (Expression* e : stmt->arguments) {
StackScope scope(this);
VisitResult result = Visit(e);
const Type* parameter_type = label->parameter_types[i++];
result = GenerateImplicitConvert(parameter_type, result);
arguments.Extend(scope.Yield(result).stack_range());
}
assembler().Goto(label->block, arguments.Size());
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
bool has_else = stmt->if_false.has_value();
if (stmt->is_constexpr) {
VisitResult expression_result = Visit(stmt->condition);
if (!(expression_result.type() == TypeOracle::GetConstexprBoolType())) {
std::stringstream stream;
stream << "expression should return type constexpr bool "
<< "but returns type " << *expression_result.type();
ReportError(stream.str());
}
Block* true_block = assembler().NewBlock();
Block* false_block = assembler().NewBlock();
Block* done_block = assembler().NewBlock();
assembler().Emit(ConstexprBranchInstruction{
expression_result.constexpr_value(), true_block, false_block});
assembler().Bind(true_block);
const Type* left_result = Visit(stmt->if_true);
if (left_result == TypeOracle::GetVoidType()) {
assembler().Goto(done_block);
}
assembler().Bind(false_block);
const Type* right_result = TypeOracle::GetVoidType();
if (has_else) {
right_result = Visit(*stmt->if_false);
}
if (right_result == TypeOracle::GetVoidType()) {
assembler().Goto(done_block);
}
if (left_result->IsNever() != right_result->IsNever()) {
std::stringstream stream;
stream << "either both or neither branches in a constexpr if statement "
"must reach their end at"
<< PositionAsString(stmt->pos);
ReportError(stream.str());
}
if (left_result != TypeOracle::GetNeverType()) {
assembler().Bind(done_block);
}
return left_result;
} else {
Block* true_block = assembler().NewBlock(assembler().CurrentStack(),
IsDeferred(stmt->if_true));
Block* false_block =
assembler().NewBlock(assembler().CurrentStack(),
stmt->if_false && IsDeferred(*stmt->if_false));
GenerateExpressionBranch(stmt->condition, true_block, false_block);
Block* done_block;
bool live = false;
if (has_else) {
done_block = assembler().NewBlock();
} else {
done_block = false_block;
live = true;
}
assembler().Bind(true_block);
{
const Type* result = Visit(stmt->if_true);
if (result == TypeOracle::GetVoidType()) {
live = true;
assembler().Goto(done_block);
}
}
if (has_else) {
assembler().Bind(false_block);
const Type* result = Visit(*stmt->if_false);
if (result == TypeOracle::GetVoidType()) {
live = true;
assembler().Goto(done_block);
}
}
if (live) {
assembler().Bind(done_block);
}
return live ? TypeOracle::GetVoidType() : TypeOracle::GetNeverType();
}
}
const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
Block* body_block = assembler().NewBlock(assembler().CurrentStack());
Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
Block* header_block = assembler().NewBlock();
assembler().Goto(header_block);
assembler().Bind(header_block);
GenerateExpressionBranch(stmt->condition, body_block, exit_block);
assembler().Bind(body_block);
{
BreakContinueActivator activator{exit_block, header_block};
const Type* body_result = Visit(stmt->body);
if (body_result != TypeOracle::GetNeverType()) {
assembler().Goto(header_block);
}
}
assembler().Bind(exit_block);
return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(BlockStatement* block) {
BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
const Type* type = TypeOracle::GetVoidType();
for (Statement* s : block->statements) {
CurrentSourcePosition::Scope source_position(s->pos);
if (type->IsNever()) {
ReportError("statement after non-returning statement");
}
if (auto* var_declaration = VarDeclarationStatement::DynamicCast(s)) {
type = Visit(var_declaration, &block_bindings);
} else {
type = Visit(s);
}
}
return type;
}
const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
#if defined(DEBUG)
assembler().Emit(PrintConstantStringInstruction{"halting because of '" +
stmt->reason + "' at " +
PositionAsString(stmt->pos)});
#endif
assembler().Emit(AbortInstruction{stmt->never_continues
? AbortInstruction::Kind::kUnreachable
: AbortInstruction::Kind::kDebugBreak});
if (stmt->never_continues) {
return TypeOracle::GetNeverType();
} else {
return TypeOracle::GetVoidType();
}
}
namespace {
std::string FormatAssertSource(const std::string& str) {
// Replace all whitespace characters with a space character.
std::string str_no_newlines = str;
std::replace_if(
str_no_newlines.begin(), str_no_newlines.end(),
[](unsigned char c) { return isspace(c); }, ' ');
// str might include indentation, squash multiple space characters into one.
std::string result;
std::unique_copy(str_no_newlines.begin(), str_no_newlines.end(),
std::back_inserter(result),
[](char a, char b) { return a == ' ' && b == ' '; });
return result;
}
} // namespace
const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
if (stmt->kind == AssertStatement::AssertKind::kStaticAssert) {
std::string message =
"static_assert(" + stmt->source + ") at " + ToString(stmt->pos);
GenerateCall(QualifiedName({"", TORQUE_INTERNAL_NAMESPACE_STRING},
STATIC_ASSERT_MACRO_STRING),
Arguments{{Visit(stmt->expression),
VisitResult(TypeOracle::GetConstexprStringType(),
StringLiteralQuote(message))},
{}});
return TypeOracle::GetVoidType();
}
bool do_check = stmt->kind != AssertStatement::AssertKind::kAssert ||
GlobalContext::force_assert_statements();
#if defined(DEBUG)
do_check = true;
#endif
Block* resume_block;
if (!do_check) {
Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack());
resume_block = assembler().NewBlock(assembler().CurrentStack());
assembler().Goto(resume_block);
assembler().Bind(unreachable_block);
}
// CSA_ASSERT & co. are not used here on purpose for two reasons. First,
// Torque allows and handles two types of expressions in the if protocol
// automagically, ones that return TNode<BoolT> and those that use the
// BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
// handle this is embedded in the expression handling and to it's not
// possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
// isn't trivial up-front. Secondly, on failure, the assert text should be
// the corresponding Torque code, not the -gen.cc code, which would be the
// case when using CSA_ASSERT_XXX.
Block* true_block = assembler().NewBlock(assembler().CurrentStack());
Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
GenerateExpressionBranch(stmt->expression, true_block, false_block);
assembler().Bind(false_block);
assembler().Emit(AbortInstruction{
AbortInstruction::Kind::kAssertionFailure,
"Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
assembler().Bind(true_block);
if (!do_check) {
assembler().Bind(resume_block);
}
return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(ExpressionStatement* stmt) {
const Type* type = Visit(stmt->expression).type();
return type->IsNever() ? type : TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
Callable* current_callable = CurrentCallable::Get();
if (current_callable->signature().return_type->IsNever()) {
std::stringstream s;
s << "cannot return from a function with return type never";
ReportError(s.str());
}
LocalLabel* end =
current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
if (current_callable->HasReturnValue()) {
if (!stmt->value) {
std::stringstream s;
s << "return expression needs to be specified for a return type of "
<< *current_callable->signature().return_type;
ReportError(s.str());
}
VisitResult expression_result = Visit(*stmt->value);
VisitResult return_result = GenerateImplicitConvert(
current_callable->signature().return_type, expression_result);
if (current_callable->IsMacro()) {
if (return_result.IsOnStack()) {
StackRange return_value_range =
GenerateLabelGoto(end, return_result.stack_range());
SetReturnValue(VisitResult(return_result.type(), return_value_range));
} else {
GenerateLabelGoto(end);
SetReturnValue(return_result);
}
} else if (current_callable->IsBuiltin()) {
assembler().Emit(ReturnInstruction{});
} else {
UNREACHABLE();
}
} else {
if (stmt->value) {
std::stringstream s;
s << "return expression can't be specified for a void or never return "
"type";
ReportError(s.str());
}
GenerateLabelGoto(end);
}
current_callable->IncrementReturns();
return TypeOracle::GetNeverType();
}
VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
size_t parameter_count = expr->label_block->parameters.names.size();
std::vector<VisitResult> parameters;
Block* label_block = nullptr;
Block* done_block = assembler().NewBlock();
VisitResult try_result;
{
CurrentSourcePosition::Scope source_position(expr->label_block->pos);
if (expr->label_block->parameters.has_varargs) {
ReportError("cannot use ... for label parameters");
}
Stack<const Type*> label_input_stack = assembler().CurrentStack();
TypeVector parameter_types;
for (size_t i = 0; i < parameter_count; ++i) {
const Type* type =
TypeVisitor::ComputeType(expr->label_block->parameters.types[i]);
parameter_types.push_back(type);
if (type->IsConstexpr()) {
ReportError("no constexpr type allowed for label arguments");
}
StackRange range = label_input_stack.PushMany(LowerType(type));
parameters.push_back(VisitResult(type, range));
}
label_block = assembler().NewBlock(label_input_stack,
IsDeferred(expr->label_block->body));
Binding<LocalLabel> label_binding{&LabelBindingsManager::Get(),
expr->label_block->label,
LocalLabel{label_block, parameter_types}};
// Visit try
StackScope stack_scope(this);
try_result = Visit(expr->try_expression);
if (try_result.type() != TypeOracle::GetNeverType()) {
try_result = stack_scope.Yield(try_result);
assembler().Goto(done_block);
}
}
// Visit and output the code for the label block. If the label block falls
// through, then the try must not return a value. Also, if the try doesn't
// fall through, but the label does, then overall the try-label block
// returns type void.
assembler().Bind(label_block);
const Type* label_result;
{
BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
for (size_t i = 0; i < parameter_count; ++i) {
Identifier* name = expr->label_block->parameters.names[i];
parameter_bindings.Add(name,
LocalValue{LocationReference::Temporary(
parameters[i], "parameter " + name->value)});
}
label_result = Visit(expr->label_block->body);
}
if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
ReportError(
"otherwise clauses cannot fall through in a non-void expression");
}
if (label_result != TypeOracle::GetNeverType()) {
assembler().Goto(done_block);
}
if (label_result->IsVoid() && try_result.type()->IsNever()) {
try_result =
VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
}
if (!try_result.type()->IsNever()) {
assembler().Bind(done_block);
}
return try_result;
}
VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}
InitializerResults ImplementationVisitor::VisitInitializerResults(
const ClassType* class_type,
const std::vector<NameAndExpression>& initializers) {
InitializerResults result;
for (const NameAndExpression& initializer : initializers) {
result.names.push_back(initializer.name);
Expression* e = initializer.expression;
const Field& field = class_type->LookupField(initializer.name->value);
bool has_index = field.index.has_value();
if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
if (!has_index) {
ReportError(
"spread expressions can only be used to initialize indexed class "
"fields ('",
initializer.name->value, "' is not)");
}
e = s->spreadee;
} else if (has_index) {
ReportError("the indexed class field '", initializer.name->value,
"' must be initialized with a spread operator");
}
result.field_value_map[field.name_and_type.name] = Visit(e);
}
return result;
}
LocationReference ImplementationVisitor::GenerateFieldReference(
VisitResult object, const Field& field, const ClassType* class_type) {
if (field.index.has_value()) {
return LocationReference::HeapSlice(
GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
}
DCHECK(field.offset.has_value());
StackRange result_range = assembler().TopRange(0);
result_range.Extend(GenerateCopy(object).stack_range());
VisitResult offset =
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
result_range.Extend(offset.stack_range());
const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
field.const_qualified);
return LocationReference::HeapReference(VisitResult(type, result_range));
}
// This is used to generate field references during initialization, where we can
// re-use the offsets used for computing the allocation size.
LocationReference ImplementationVisitor::GenerateFieldReferenceForInit(
VisitResult object, const Field& field,
const LayoutForInitialization& layout) {
StackRange result_range = assembler().TopRange(0);
result_range.Extend(GenerateCopy(object).stack_range());
VisitResult offset = GenerateImplicitConvert(
TypeOracle::GetIntPtrType(), layout.offsets.at(field.name_and_type.name));
result_range.Extend(offset.stack_range());
if (field.index) {
VisitResult length =
GenerateCopy(layout.array_lengths.at(field.name_and_type.name));
result_range.Extend(length.stack_range());
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
} else {
// Const fields are writable during initialization.
VisitResult heap_reference(
TypeOracle::GetMutableReferenceType(field.name_and_type.type),
result_range);
return LocationReference::HeapReference(heap_reference);
}
}
void ImplementationVisitor::InitializeClass(
const ClassType* class_type, VisitResult allocate_result,
const InitializerResults& initializer_results,
const LayoutForInitialization& layout) {
if (const ClassType* super = class_type->GetSuperClass()) {
InitializeClass(super, allocate_result, initializer_results, layout);
}
for (Field f : class_type->fields()) {
VisitResult initializer_value =
initializer_results.field_value_map.at(f.name_and_type.name);
LocationReference field =
GenerateFieldReferenceForInit(allocate_result, f, layout);
if (f.index) {
DCHECK(field.IsHeapSlice());
VisitResult slice = field.GetVisitResult();
GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"InitializeFieldsFromIterator"),
{{slice, initializer_value}, {}});
} else {
GenerateAssignToLocation(field, initializer_value);
}
}
}
VisitResult ImplementationVisitor::GenerateArrayLength(
Expression* array_length, Namespace* nspace,
const std::map<std::string, LocalValue>& bindings) {
StackScope stack_scope(this);
CurrentSourcePosition::Scope pos_scope(array_length->pos);
// Switch to the namespace where the class was declared.
CurrentScope::Scope current_scope_scope(nspace);
// Reset local bindings and install local binding for the preceding fields.
BindingsManagersScope bindings_managers_scope;
BlockBindings<LocalValue> field_bindings(&ValueBindingsManager::Get());
for (auto& p : bindings) {
field_bindings.Add(p.first, LocalValue{p.second}, true);
}
VisitResult length = Visit(array_length);
VisitResult converted_length =
GenerateCall("Convert", Arguments{{length}, {}},
{TypeOracle::GetIntPtrType(), length.type()}, false);
return stack_scope.Yield(converted_length);
}
VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
const Field& field) {
DCHECK(field.index);
StackScope stack_scope(this);
const ClassType* class_type = *object.type()->ClassSupertype();
std::map<std::string, LocalValue> bindings;
bool before_current = true;
for (Field f : class_type->ComputeAllFields()) {
if (field.name_and_type.name == f.name_and_type.name) {
before_current = false;
}
bindings.insert(
{f.name_and_type.name,
f.const_qualified
? (before_current
? LocalValue{GenerateFieldReference(object, f, class_type)}
: LocalValue("Array lengths may only refer to fields "
"defined earlier"))
: LocalValue(
"Non-const fields cannot be used for array lengths.")});
}
return stack_scope.Yield(
GenerateArrayLength(*field.index, class_type->nspace(), bindings));
}
VisitResult ImplementationVisitor::GenerateArrayLength(
const ClassType* class_type, const InitializerResults& initializer_results,
const Field& field) {
DCHECK(field.index);
StackScope stack_scope(this);
std::map<std::string, LocalValue> bindings;
for (Field f : class_type->ComputeAllFields()) {
if (f.index) break;
const std::string& fieldname = f.name_and_type.name;
VisitResult value = initializer_results.field_value_map.at(fieldname);
bindings.insert(
{fieldname,
f.const_qualified
? LocalValue{LocationReference::Temporary(
value, "initial field " + fieldname)}
: LocalValue(
"Non-const fields cannot be used for array lengths.")});
}
return stack_scope.Yield(
GenerateArrayLength(*field.index, class_type->nspace(), bindings));
}
LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
const ClassType* class_type,
const InitializerResults& initializer_results) {
LayoutForInitialization layout;
VisitResult offset;
for (Field f : class_type->ComputeAllFields()) {
if (f.offset.has_value()) {
offset =
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
}
layout.offsets[f.name_and_type.name] = offset;
if (f.index) {
size_t element_size;
std::string element_size_string;
std::tie(element_size, element_size_string) =
*SizeOf(f.name_and_type.type);
VisitResult array_element_size =
VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
VisitResult array_length =
GenerateArrayLength(class_type, initializer_results, f);
layout.array_lengths[f.name_and_type.name] = array_length;
Arguments arguments;
arguments.parameters = {offset, array_length, array_element_size};
offset = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"AddIndexedFieldSizeToObjectSize"),
arguments);
} else {
DCHECK(f.offset.has_value());
}
}
if (class_type->size().SingleValue()) {
layout.size = VisitResult(TypeOracle::GetConstInt31Type(),
ToString(*class_type->size().SingleValue()));
} else {
layout.size = offset;
}
if ((size_t{1} << class_type->size().AlignmentLog2()) <
TargetArchitecture::TaggedSize()) {
Arguments arguments;
arguments.parameters = {layout.size};
layout.size = GenerateCall(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AlignTagged"),
arguments);
}
return layout;
}
VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
StackScope stack_scope(this);
const Type* type = TypeVisitor::ComputeType(expr->type);
const ClassType* class_type = ClassType::DynamicCast(type);
if (class_type == nullptr) {
ReportError("type for new expression must be a class, \"", *type,
"\" is not");
}
if (!class_type->AllowInstantiation()) {
// Classes that are only used for testing should never be instantiated.
ReportError(*class_type,
" cannot be allocated with new (it's used for testing)");
}
InitializerResults initializer_results =
VisitInitializerResults(class_type, expr->initializers);
const Field& map_field = class_type->LookupField("map");
if (*map_field.offset != 0) {
ReportError("class initializers must have a map as first parameter");
}
const std::map<std::string, VisitResult>& initializer_fields =
initializer_results.field_value_map;
auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
VisitResult object_map;
if (class_type->IsExtern()) {
if (it_object_map == initializer_fields.end()) {
ReportError("Constructor for ", class_type->name(),
" needs Map argument!");
}
object_map = it_object_map->second;
} else {
if (it_object_map != initializer_fields.end()) {
ReportError(
"Constructor for ", class_type->name(),
" must not specify Map argument; it is automatically inserted.");
}
Arguments get_struct_map_arguments;
get_struct_map_arguments.parameters.push_back(
VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
object_map = GenerateCall(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetInstanceTypeMap"),
get_struct_map_arguments, {}, false);
CurrentSourcePosition::Scope current_pos(expr->pos);
initializer_results.names.insert(initializer_results.names.begin(),
MakeNode<Identifier>("map"));
initializer_results.field_value_map[map_field.name_and_type.name] =
object_map;
}
CheckInitializersWellformed(class_type->name(),
class_type->ComputeAllFields(),
expr->initializers, !class_type->IsExtern());
LayoutForInitialization layout =
GenerateLayoutForInitialization(class_type, initializer_results);
Arguments allocate_arguments;
allocate_arguments.parameters.push_back(layout.size);
allocate_arguments.parameters.push_back(object_map);
allocate_arguments.parameters.push_back(
GenerateBoolConstant(expr->pretenured));
VisitResult allocate_result = GenerateCall(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AllocateFromNew"),
allocate_arguments, {class_type}, false);
DCHECK(allocate_result.IsOnStack());
InitializeClass(class_type, allocate_result, initializer_results, layout);
return stack_scope.Yield(GenerateCall(
"%RawDownCast", Arguments{{allocate_result}, {}}, {class_type}));
}
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
base::Optional<Binding<LocalLabel>*> break_label =
TryLookupLabel(kBreakLabelName);
if (!break_label) {
ReportError("break used outside of loop");
}
assembler().Goto((*break_label)->block);
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
base::Optional<Binding<LocalLabel>*> continue_label =
TryLookupLabel(kContinueLabelName);
if (!continue_label) {
ReportError("continue used outside of loop");
}
assembler().Goto((*continue_label)->block);
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
BlockBindings<LocalValue> loop_bindings(&ValueBindingsManager::Get());
if (stmt->var_declaration) Visit(*stmt->var_declaration, &loop_bindings);
Block* body_block = assembler().NewBlock(assembler().CurrentStack());
Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
Block* header_block = assembler().NewBlock();
assembler().Goto(header_block);
assembler().Bind(header_block);
// The continue label is where "continue" statements jump to. If no action
// expression is provided, we jump directly to the header.
Block* continue_block = header_block;
// The action label is only needed when an action expression was provided.
Block* action_block = nullptr;
if (stmt->action) {
action_block = assembler().NewBlock();
// The action expression needs to be executed on a continue.
continue_block = action_block;
}
if (stmt->test) {
GenerateExpressionBranch(*stmt->test, body_block, exit_block);
} else {
assembler().Goto(body_block);
}
assembler().Bind(body_block);
{
BreakContinueActivator activator(exit_block, continue_block);
const Type* body_result = Visit(stmt->body);
if (body_result != TypeOracle::GetNeverType()) {
assembler().Goto(continue_block);
}
}
if (stmt->action) {
assembler().Bind(action_block);
const Type* action_result = Visit(*stmt->action);
if (action_result != TypeOracle::GetNeverType()) {
assembler().Goto(header_block);
}
}
assembler().Bind(exit_block);
return TypeOracle::GetVoidType();
}
VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
ReportError(
"spread operators are only currently supported in indexed class field "
"initialization expressions");
}
void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
for (SourceId file : SourceFileMap::AllSources()) {
std::string base_filename =
dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
GlobalContext::PerFileStreams& streams =
GlobalContext::GeneratedPerFile(file);
WriteFile(base_filename + "-tq-csa.cc", streams.csa_ccfile.str());
WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
WriteFile(base_filename + "-tq.inc",
streams.class_definition_headerfile.str());
WriteFile(base_filename + "-tq-inl.inc",
streams.class_definition_inline_headerfile.str());
WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
}
WriteFile(dir + "/runtime-macros.h", runtime_macros_h_.str());
WriteFile(dir + "/runtime-macros.cc", runtime_macros_cc_.str());
}
void ImplementationVisitor::GenerateMacroFunctionDeclaration(std::ostream& o,
Macro* macro) {
GenerateFunctionDeclaration(
o, "",
output_type_ == OutputType::kCC ? macro->CCName() : macro->ExternalName(),
macro->signature(), macro->parameter_names());
}
std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, const std::string& name,
const Signature& signature, const NameVector& parameter_names,
bool pass_code_assembler_state) {
std::vector<std::string> generated_parameter_names;
if (signature.return_type->IsVoidOrNever()) {
o << "void";
} else {
o << (output_type_ == OutputType::kCC
? signature.return_type->GetRuntimeType()
: signature.return_type->GetGeneratedTypeName());
}
o << " " << macro_prefix << name << "(";
bool first = true;
if (output_type_ == OutputType::kCC) {
first = false;
o << "Isolate* isolate";
} else if (pass_code_assembler_state) {
first = false;
o << "compiler::CodeAssemblerState* state_";
}
DCHECK_GE(signature.types().size(), parameter_names.size());
for (size_t i = 0; i < signature.types().size(); ++i) {
if (!first) o << ", ";
first = false;
const Type* parameter_type = signature.types()[i];
const std::string& generated_type_name =
output_type_ == OutputType::kCC
? parameter_type->GetRuntimeType()
: parameter_type->GetGeneratedTypeName();
generated_parameter_names.push_back(ExternalParameterName(
i < parameter_names.size() ? parameter_names[i]->value
: std::to_string(i)));
o << generated_type_name << " " << generated_parameter_names.back();
}
for (const LabelDeclaration& label_info : signature.labels) {
if (output_type_ == OutputType::kCC) {
ReportError("Macros that generate runtime code can't have label exits");
}
if (!first) o << ", ";
first = false;
generated_parameter_names.push_back(
ExternalLabelName(label_info.name->value));
o << "compiler::CodeAssemblerLabel* " << generated_parameter_names.back();
size_t i = 0;
for (const Type* type : label_info.types) {
std::string generated_type_name;
if (type->StructSupertype()) {
generated_type_name = "\n#error no structs allowed in labels\n";
} else {
generated_type_name = "compiler::TypedCodeAssemblerVariable<";
generated_type_name += type->GetGeneratedTNodeTypeName();
generated_type_name += ">*";
}
o << ", ";
generated_parameter_names.push_back(
ExternalLabelParameterName(label_info.name->value, i));
o << generated_type_name << " " << generated_parameter_names.back();
++i;
}
}
o << ")";
return generated_parameter_names;
}
namespace {
void FailCallableLookup(
const std::string& reason, const QualifiedName& name,
const TypeVector& parameter_types,
const std::vector<Binding<LocalLabel>*>& labels,
const std::vector<Signature>& candidates,
const std::vector<std::pair<GenericCallable*, std::string>>
inapplicable_generics) {
std::stringstream stream;
stream << "\n" << reason << ": \n " << name << "(" << parameter_types << ")";
if (labels.size() != 0) {
stream << " labels ";
for (size_t i = 0; i < labels.size(); ++i) {
stream << labels[i]->name() << "(" << labels[i]->parameter_types << ")";
}
}
stream << "\ncandidates are:";
for (const Signature& signature : candidates) {
stream << "\n " << name;
PrintSignature(stream, signature, false);
}
if (inapplicable_generics.size() != 0) {
stream << "\nfailed to instantiate all of these generic declarations:";
for (auto& failure : inapplicable_generics) {
GenericCallable* generic = failure.first;
const std::string& reason = failure.second;
stream << "\n " << generic->name() << " defined at "
<< generic->Position() << ":\n " << reason << "\n";
}
}
ReportError(stream.str());
}
Callable* GetOrCreateSpecialization(
const SpecializationKey<GenericCallable>& key) {
if (base::Optional<Callable*> specialization =
key.generic->GetSpecialization(key.specialized_types)) {
return *specialization;
}
return DeclarationVisitor::SpecializeImplicit(key);
}
} // namespace
base::Optional<Binding<LocalValue>*> ImplementationVisitor::TryLookupLocalValue(
const std::string& name) {
return ValueBindingsManager::Get().TryLookup(name);
}
base::Optional<Binding<LocalLabel>*> ImplementationVisitor::TryLookupLabel(
const std::string& name) {
return LabelBindingsManager::Get().TryLookup(name);
}
Binding<LocalLabel>* ImplementationVisitor::LookupLabel(
const std::string& name) {
base::Optional<Binding<LocalLabel>*> label = TryLookupLabel(name);
if (!label) ReportError("cannot find label ", name);
return *label;
}
Block* ImplementationVisitor::LookupSimpleLabel(const std::string& name) {
LocalLabel* label = LookupLabel(name);
if (!label->parameter_types.empty()) {
ReportError("label ", name,
"was expected to have no parameters, but has parameters (",
label->parameter_types, ")");
}
return label->block;
}
// Try to lookup a callable with the provided argument types. Do not report
// an error if no matching callable was found, but return false instead.
// This is used to test the presence of overloaded field accessors.
bool ImplementationVisitor::TestLookupCallable(
const QualifiedName& name, const TypeVector& parameter_types) {
return LookupCallable(name, Declarations::TryLookup(name), parameter_types,
{}, {}, true) != nullptr;
}
TypeArgumentInference ImplementationVisitor::InferSpecializationTypes(
GenericCallable* generic, const TypeVector& explicit_specialization_types,
const TypeVector& explicit_arguments) {
std::vector<base::Optional<const Type*>> all_arguments;
const ParameterList& parameters = generic->declaration()->parameters;
for (size_t i = 0; i < parameters.implicit_count; ++i) {
base::Optional<Binding<LocalValue>*> val =
TryLookupLocalValue(parameters.names[i]->value);
all_arguments.push_back(
val ? (*val)->GetLocationReference(*val).ReferencedType()
: base::nullopt);
}
for (const Type* explicit_argument : explicit_arguments) {
all_arguments.push_back(explicit_argument);
}
return generic->InferSpecializationTypes(explicit_specialization_types,
all_arguments);
}
template <class Container>
Callable* ImplementationVisitor::LookupCallable(
const QualifiedName& name, const Container& declaration_container,
const TypeVector& parameter_types,
const std::vector<Binding<LocalLabel>*>& labels,
const TypeVector& specialization_types, bool silence_errors) {
Callable* result = nullptr;
std::vector<Declarable*> overloads;
std::vector<Signature> overload_signatures;
std::vector<std::pair<GenericCallable*, std::string>> inapplicable_generics;
for (auto* declarable : declaration_container) {
if (GenericCallable* generic = GenericCallable::DynamicCast(declarable)) {
TypeArgumentInference inference = InferSpecializationTypes(
generic, specialization_types, parameter_types);
if (inference.HasFailed()) {
inapplicable_generics.push_back(
std::make_pair(generic, inference.GetFailureReason()));
continue;
}
overloads.push_back(generic);
overload_signatures.push_back(
DeclarationVisitor::MakeSpecializedSignature(
SpecializationKey<GenericCallable>{generic,
inference.GetResult()}));
} else if (Callable* callable = Callable::DynamicCast(declarable)) {
overloads.push_back(callable);
overload_signatures.push_back(callable->signature());
}
}
// Indices of candidates in overloads/overload_signatures.
std::vector<size_t> candidates;
for (size_t i = 0; i < overloads.size(); ++i) {
const Signature& signature = overload_signatures[i];
if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
candidates.push_back(i);
}
}
if (overloads.empty() && inapplicable_generics.empty()) {
if (silence_errors) return nullptr;
std::stringstream stream;
stream << "no matching declaration found for " << name;
ReportError(stream.str());
} else if (candidates.empty()) {
if (silence_errors) return nullptr;
FailCallableLookup("cannot find suitable callable with name", name,
parameter_types, labels, overload_signatures,
inapplicable_generics);
}
auto is_better_candidate = [&](size_t a, size_t b) {
return ParameterDifference(overload_signatures[a].GetExplicitTypes(),
parameter_types)
.StrictlyBetterThan(ParameterDifference(
overload_signatures[b].GetExplicitTypes(), parameter_types));
};
size_t best = *std::min_element(candidates.begin(), candidates.end(),
is_better_candidate);
// This check is contained in libstdc++'s std::min_element.
DCHECK(!is_better_candidate(best, best));
for (size_t candidate : candidates) {
if (candidate != best && !is_better_candidate(best, candidate)) {
std::vector<Signature> candidate_signatures;
for (size_t i : candidates) {
candidate_signatures.push_back(overload_signatures[i]);
}
FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
candidate_signatures, inapplicable_generics);
}
}
if (GenericCallable* generic =
GenericCallable::DynamicCast(overloads[best])) {
TypeArgumentInference inference = InferSpecializationTypes(
generic, specialization_types, parameter_types);
result = GetOrCreateSpecialization(
SpecializationKey<GenericCallable>{generic, inference.GetResult()});
} else {
result = Callable::cast(overloads[best]);
}
size_t caller_size = parameter_types.size();
size_t callee_size =
result->signature().types().size() - result->signature().implicit_count;
if (caller_size != callee_size &&
!result->signature().parameter_types.var_args) {
std::stringstream stream;
stream << "parameter count mismatch calling " << *result << " - expected "
<< std::to_string(callee_size) << ", found "
<< std::to_string(caller_size);
ReportError(stream.str());
}
return result;
}
template <class Container>
Callable* ImplementationVisitor::LookupCallable(
const QualifiedName& name, const Container& declaration_container,
const Arguments& arguments, const TypeVector& specialization_types) {
return LookupCallable(name, declaration_container,
arguments.parameters.ComputeTypeVector(),
arguments.labels, specialization_types);
}
Method* ImplementationVisitor::LookupMethod(
const std::string& name, const AggregateType* receiver_type,
const Arguments& arguments, const TypeVector& specialization_types) {
TypeVector types(arguments.parameters.ComputeTypeVector());
types.insert(types.begin(), receiver_type);
return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
types, arguments.labels,
specialization_types));
}
const Type* ImplementationVisitor::GetCommonType(const Type* left,
const Type* right) {
const Type* common_type;
if (IsAssignableFrom(left, right)) {
common_type = left;
} else if (IsAssignableFrom(right, left)) {
common_type = right;
} else {
common_type = TypeOracle::GetUnionType(left, right);
}
common_type = common_type->NonConstexprVersion();
return common_type;
}
VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
if (to_copy.IsOnStack()) {
return VisitResult(to_copy.type(),
assembler().Peek(to_copy.stack_range(), to_copy.type()));
}
return to_copy;
}
VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
StackScope stack_scope(this);
auto& initializers = expr->initializers;
std::vector<VisitResult> values;
std::vector<const Type*> term_argument_types;
values.reserve(initializers.size());
term_argument_types.reserve(initializers.size());
// Compute values and types of all initializer arguments
for (const NameAndExpression& initializer : initializers) {
VisitResult value = Visit(initializer.expression);
values.push_back(value);
term_argument_types.push_back(value.type());
}
// Compute and check struct type from given struct name and argument types
const Type* type = TypeVisitor::ComputeTypeForStructExpression(
expr->type, term_argument_types);
if (const auto* struct_type = StructType::DynamicCast(type)) {
CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
initializers);
// Implicitly convert values and thereby build the struct on the stack
StackRange struct_range = assembler().TopRange(0);
auto& fields = struct_type->fields();
for (size_t i = 0; i < values.size(); i++) {
values[i] =
GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
struct_range.Extend(values[i].stack_range());
}
return stack_scope.Yield(VisitResult(struct_type, struct_range));
} else {
const auto* bitfield_struct_type = BitFieldStructType::cast(type);
CheckInitializersWellformed(bitfield_struct_type->name(),
bitfield_struct_type->fields(), initializers);
// Create a zero and cast it to the desired bitfield struct type.
VisitResult result{TypeOracle::GetConstInt32Type(), "0"};
result = GenerateImplicitConvert(TypeOracle::GetInt32Type(), result);
result = GenerateCall("Unsigned", Arguments{{result}, {}}, {});
result = GenerateCall("%RawDownCast", Arguments{{result}, {}},
{bitfield_struct_type});
// Set each field in the result. If these fields are constexpr, then all of
// this initialization will end up reduced to a single value during TurboFan
// optimization.
auto& fields = bitfield_struct_type->fields();
for (size_t i = 0; i < values.size(); i++) {
values[i] =
GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
result = GenerateSetBitField(bitfield_struct_type, fields[i], result,
values[i], /*starts_as_zero=*/true);
}
return stack_scope.Yield(result);
}
}
VisitResult ImplementationVisitor::GenerateSetBitField(
const Type* bitfield_struct_type, const BitField& bitfield,
VisitResult bitfield_struct, VisitResult value, bool starts_as_zero) {
GenerateCopy(bitfield_struct);
GenerateCopy(value);
assembler().Emit(
StoreBitFieldInstruction{bitfield_struct_type, bitfield, starts_as_zero});
return VisitResult(bitfield_struct_type, assembler().TopRange(1));
}
LocationReference ImplementationVisitor::GetLocationReference(
Expression* location) {
switch (location->kind) {
case AstNode::Kind::kIdentifierExpression:
return GetLocationReference(static_cast<IdentifierExpression*>(location));
case AstNode::Kind::kFieldAccessExpression:
return GetLocationReference(
static_cast<FieldAccessExpression*>(location));
case AstNode::Kind::kElementAccessExpression:
return GetLocationReference(
static_cast<ElementAccessExpression*>(location));
case AstNode::Kind::kDereferenceExpression:
return GetLocationReference(
static_cast<DereferenceExpression*>(location));
default:
return LocationReference::Temporary(Visit(location), "expression");
}
}
LocationReference ImplementationVisitor::GetLocationReference(
FieldAccessExpression* expr) {
return GenerateFieldAccess(GetLocationReference(expr->object),
expr->field->value, false, expr->field->pos);
}
LocationReference ImplementationVisitor::GenerateFieldAccess(
LocationReference reference, const std::string& fieldname,
bool ignore_stuct_field_constness, base::Optional<SourcePosition> pos) {
if (reference.IsVariableAccess() &&
reference.variable().type()->StructSupertype()) {
const StructType* type = *reference.variable().type()->StructSupertype();
const Field& field = type->LookupField(fieldname);
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
}
if (field.const_qualified) {
VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
return LocationReference::Temporary(
t_value, "for constant field '" + field.name_and_type.name + "'");
} else {
return LocationReference::VariableAccess(
ProjectStructField(reference.variable(), fieldname));
}
}
if (reference.IsTemporary() &&
reference.temporary().type()->StructSupertype()) {
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
const StructType* type = *reference.temporary().type()->StructSupertype();
const Field& field = type->LookupField(fieldname);
LanguageServerData::AddDefinition(*pos, field.pos);
}
return LocationReference::Temporary(
ProjectStructField(reference.temporary(), fieldname),
reference.temporary_description());
}
if (base::Optional<const Type*> referenced_type =
reference.ReferencedType()) {
if ((*referenced_type)->IsBitFieldStructType()) {
const BitFieldStructType* bitfield_struct =
BitFieldStructType::cast(*referenced_type);
const BitField& field = bitfield_struct->LookupField(fieldname);
return LocationReference::BitFieldAccess(reference, field);
}
if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
(*referenced_type), TypeOracle::GetSmiTaggedGeneric())) {
const BitFieldStructType* bitfield_struct =
BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
if (bitfield_struct == nullptr) {
ReportError(
"When a value of type SmiTagged<T> is used in a field access "
"expression, T is expected to be a bitfield struct type. Instead, "
"T "
"is ",
**type_wrapped_in_smi);
}
const BitField& field = bitfield_struct->LookupField(fieldname);
return LocationReference::BitFieldAccess(reference, field);
}
}
if (reference.IsHeapReference()) {
VisitResult ref = reference.heap_reference();
bool is_const;
auto generic_type =
TypeOracle::MatchReferenceGeneric(ref.type(), &is_const);
if (!generic_type) {
ReportError(
"Left-hand side of field access expression is marked as a reference "
"but is not of type Reference<...>. Found type: ",
ref.type()->ToString());
}
if (auto struct_type = (*generic_type)->StructSupertype()) {
const Field& field = (*struct_type)->LookupField(fieldname);
// Update the Reference's type to refer to the field type within the
// struct.
ref.SetType(TypeOracle::GetReferenceType(
field.name_and_type.type,
is_const ||
(field.const_qualified && !ignore_stuct_field_constness)));
if (!field.offset.has_value()) {
Error("accessing field with unknown offset").Throw();
}
if (*field.offset != 0) {
// Copy the Reference struct up the stack and update the new copy's
// |offset| value to point to the struct field.
StackScope scope(this);
ref = GenerateCopy(ref);
VisitResult ref_offset = ProjectStructField(ref, "offset");
VisitResult struct_offset{
TypeOracle::GetIntPtrType()->ConstexprVersion(),
std::to_string(*field.offset)};
VisitResult updated_offset =
GenerateCall("+", Arguments{{ref_offset, struct_offset}, {}});
assembler().Poke(ref_offset.stack_range(), updated_offset.stack_range(),
ref_offset.type());
ref = scope.Yield(ref);
}
return LocationReference::HeapReference(ref);
}
}
VisitResult object_result = GenerateFetchFromLocation(reference);
if (base::Optional<const ClassType*> class_type =
object_result.type()->ClassSupertype()) {
// This is a hack to distinguish the situation where we want to use
// overloaded field accessors from when we want to create a reference.
bool has_explicit_overloads = TestLookupCallable(
QualifiedName{"." + fieldname}, {object_result.type()});
if ((*class_type)->HasField(fieldname) && !has_explicit_overloads) {
const Field& field = (*class_type)->LookupField(fieldname);
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
}
return GenerateFieldReference(object_result, field, *class_type);
}
}
return LocationReference::FieldAccess(object_result, fieldname);
}
LocationReference ImplementationVisitor::GetLocationReference(
ElementAccessExpression* expr) {
LocationReference reference = GetLocationReference(expr->array);
VisitResult index = Visit(expr->index);
if (reference.IsHeapSlice()) {
Arguments arguments{{index}, {}};
const AggregateType* slice_type =
AggregateType::cast(reference.heap_slice().type());
Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
// The reference has to be treated like a normal value when calling methods
// on the underlying slice implementation.
LocationReference slice_value = LocationReference::Temporary(
reference.GetVisitResult(), "slice as value");
return LocationReference::HeapReference(
GenerateCall(method, std::move(slice_value), arguments, {}, false));
} else {
return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
index);
}
}
LocationReference ImplementationVisitor::GetLocationReference(
IdentifierExpression* expr) {
if (expr->namespace_qualification.empty()) {
if (base::Optional<Binding<LocalValue>*> value =
TryLookupLocalValue(expr->name->value)) {
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(expr->name->pos,
(*value)->declaration_position());
}
if (expr->generic_arguments.size() != 0) {
ReportError("cannot have generic parameters on local name ",
expr->name);
}
return (*value)->GetLocationReference(*value);
}
}
if (expr->IsThis()) {
ReportError("\"this\" cannot be qualified");
}
QualifiedName name =
QualifiedName(expr->namespace_qualification, expr->name->value);
if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(expr->name->pos,
(*builtin)->Position());
}
return LocationReference::Temporary(GetBuiltinCode(*builtin),
"builtin " + expr->name->value);
}
if (expr->generic_arguments.size() != 0) {
GenericCallable* generic = Declarations::LookupUniqueGeneric(name);
Callable* specialization =
GetOrCreateSpecialization(SpecializationKey<GenericCallable>{
generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
DCHECK(!builtin->IsExternal());
return LocationReference::Temporary(GetBuiltinCode(builtin),
"builtin " + expr->name->value);
} else {
ReportError("cannot create function pointer for non-builtin ",
generic->name());
}
}
Value* value = Declarations::LookupValue(name);
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
}
if (auto* constant = NamespaceConstant::DynamicCast(value)) {
if (constant->type()->IsConstexpr()) {
return LocationReference::Temporary(
VisitResult(constant->type(), constant->external_name() + "(state_)"),
"namespace constant " + expr->name->value);
}
assembler().Emit(NamespaceConstantInstruction{constant});
StackRange stack_range =
assembler().TopRange(LoweredSlotCount(constant->type()));
return LocationReference::Temporary(
VisitResult(constant->type(), stack_range),
"namespace constant " + expr->name->value);
}
ExternConstant* constant = ExternConstant::cast(value);
return LocationReference::Temporary(constant->value(),
"extern value " + expr->name->value);
}
LocationReference ImplementationVisitor::GetLocationReference(
DereferenceExpression* expr) {
VisitResult ref = Visit(expr->reference);
if (!TypeOracle::MatchReferenceGeneric(ref.type())) {
Error("Operator * expects a reference type but found a value of type ",
*ref.type())
.Throw();
}
return LocationReference::HeapReference(ref);
}
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
const LocationReference& reference) {
if (reference.IsTemporary()) {
return GenerateCopy(reference.temporary());
} else if (reference.IsVariableAccess()) {
return GenerateCopy(reference.variable());
} else if (reference.IsHeapReference()) {
const Type* referenced_type = *reference.ReferencedType();
if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"LoadFloat64OrHole"),
Arguments{{reference.heap_reference()}, {}});
} else if (auto struct_type = referenced_type->StructSupertype()) {
StackRange result_range = assembler().TopRange(0);
for (const Field& field : (*struct_type)->fields()) {
StackScope scope(this);
const std::string& fieldname = field.name_and_type.name;
VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
GenerateFieldAccess(reference, fieldname)));
result_range.Extend(field_value.stack_range());
}
return VisitResult(referenced_type, result_range);
} else {
GenerateCopy(reference.heap_reference());
assembler().Emit(LoadReferenceInstruction{referenced_type});
DCHECK_EQ(1, LoweredSlotCount(referenced_type));
return VisitResult(referenced_type, assembler().TopRange(1));
}
} else if (reference.IsBitFieldAccess()) {
// First fetch the bitfield struct, then get the bits out of it.
VisitResult bit_field_struct =
GenerateFetchFromLocation(reference.bit_field_struct_location());
assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
reference.bit_field()});
return VisitResult(*reference.ReferencedType(), assembler().TopRange(1));
} else {
if (reference.IsHeapSlice()) {
ReportError(
"fetching a value directly from an indexed field isn't allowed");
}
DCHECK(reference.IsCallAccess());
return GenerateCall(reference.eval_function(),
Arguments{reference.call_arguments(), {}});
}
}
void ImplementationVisitor::GenerateAssignToLocation(
const LocationReference& reference, const VisitResult& assignment_value) {
if (reference.IsCallAccess()) {
Arguments arguments{reference.call_arguments(), {}};
arguments.parameters.push_back(assignment_value);
GenerateCall(reference.assign_function(), arguments);
} else if (reference.IsVariableAccess()) {
VisitResult variable = reference.variable();
VisitResult converted_value =
GenerateImplicitConvert(variable.type(), assignment_value);
assembler().Poke(variable.stack_range(), converted_value.stack_range(),
variable.type());
// Local variables are detected by the existence of a binding. Assignment
// to local variables is recorded to support lint errors.
if (reference.binding()) {
(*reference.binding())->SetWritten();
}
} else if (reference.IsHeapSlice()) {
ReportError("assigning a value directly to an indexed field isn't allowed");
} else if (reference.IsHeapReference()) {
const Type* referenced_type = *reference.ReferencedType();
if (reference.IsConst()) {
Error("cannot assign to const value of type ", *referenced_type).Throw();
}
if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
GenerateCall(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"StoreFloat64OrHole"),
Arguments{{reference.heap_reference(), assignment_value}, {}});
} else if (auto struct_type = referenced_type->StructSupertype()) {
if (!assignment_value.type()->IsSubtypeOf(referenced_type)) {
ReportError("Cannot assign to ", *referenced_type,
" with value of type ", *assignment_value.type());
}
for (const Field& field : (*struct_type)->fields()) {
const std::string& fieldname = field.name_and_type.name;
// Allow assignment of structs even if they contain const fields.
// Const on struct fields just disallows direct writes to them.
bool ignore_stuct_field_constness = true;
GenerateAssignToLocation(
GenerateFieldAccess(reference, fieldname,
ignore_stuct_field_constness),
ProjectStructField(assignment_value, fieldname));
}
} else {
GenerateCopy(reference.heap_reference());
VisitResult converted_assignment_value =
GenerateImplicitConvert(referenced_type, assignment_value);
if (referenced_type == TypeOracle::GetFloat64Type()) {
VisitResult silenced_float_value = GenerateCall(
"Float64SilenceNaN", Arguments{{assignment_value}, {}});
assembler().Poke(converted_assignment_value.stack_range(),
silenced_float_value.stack_range(), referenced_type);
}
assembler().Emit(StoreReferenceInstruction{referenced_type});
}
} else if (reference.IsBitFieldAccess()) {
// First fetch the bitfield struct, then set the updated bits, then store
// it back to where we found it.
VisitResult bit_field_struct =
GenerateFetchFromLocation(reference.bit_field_struct_location());
VisitResult converted_value =
GenerateImplicitConvert(*reference.ReferencedType(), assignment_value);
VisitResult updated_bit_field_struct =
GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
bit_field_struct, converted_value);
GenerateAssignToLocation(reference.bit_field_struct_location(),
updated_bit_field_struct);
} else {
DCHECK(reference.IsTemporary());
ReportError("cannot assign to const-bound or temporary ",
reference.temporary_description());
}
}
VisitResult ImplementationVisitor::GeneratePointerCall(
Expression* callee, const Arguments& arguments, bool is_tailcall) {
StackScope scope(this);
TypeVector parameter_types(arguments.parameters.ComputeTypeVector());
VisitResult callee_result = Visit(callee);
if (!callee_result.type()->IsBuiltinPointerType()) {
std::stringstream stream;
stream << "Expected a function pointer type but found "
<< *callee_result.type();
ReportError(stream.str());
}
const BuiltinPointerType* type =
BuiltinPointerType::cast(callee_result.type());
if (type->parameter_types().size() != parameter_types.size()) {
std::stringstream stream;
stream << "parameter count mismatch calling function pointer with Type: "
<< *type << " - expected "
<< std::to_string(type->parameter_types().size()) << ", found "
<< std::to_string(parameter_types.size());
ReportError(stream.str());
}
ParameterTypes types{type->parameter_types(), false};
Signature sig;
sig.parameter_types = types;
if (!IsCompatibleSignature(sig, parameter_types, 0)) {
std::stringstream stream;
stream << "parameters do not match function pointer signature. Expected: ("
<< type->parameter_types() << ") but got: (" << parameter_types
<< ")";
ReportError(stream.str());
}
callee_result = GenerateCopy(callee_result);
StackRange arg_range = assembler().TopRange(0);
for (size_t current = 0; current < arguments.parameters.size(); ++current) {
const Type* to_type = type->parameter_types()[current];
arg_range.Extend(
GenerateImplicitConvert(to_type, arguments.parameters[current])
.stack_range());
}
assembler().Emit(
CallBuiltinPointerInstruction{is_tailcall, type, arg_range.Size()});
if (is_tailcall) {
return VisitResult::NeverResult();
}
DCHECK_EQ(1, LoweredSlotCount(type->return_type()));
return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
}
void ImplementationVisitor::AddCallParameter(
Callable* callable, VisitResult parameter, const Type* parameter_type,
std::vector<VisitResult>* converted_arguments, StackRange* argument_range,
std::vector<std::string>* constexpr_arguments, bool inline_macro) {
VisitResult converted;
if ((converted_arguments->size() < callable->signature().implicit_count) &&
parameter.type()->IsTopType()) {
converted = GenerateCopy(parameter);
} else {
converted = GenerateImplicitConvert(parameter_type, parameter);
}
converted_arguments->push_back(converted);
if (!inline_macro) {
if (converted.IsOnStack()) {
argument_range->Extend(converted.stack_range());
} else {
constexpr_arguments->push_back(converted.constexpr_value());
}
}
}
namespace {
std::pair<std::string, std::string> GetClassInstanceTypeRange(
const ClassType* class_type) {
std::pair<std::string, std::string> result;
if (class_type->InstanceTypeRange()) {
auto instance_type_range = *class_type->InstanceTypeRange();
std::string instance_type_string_first =
"static_cast<InstanceType>(" +
std::to_string(instance_type_range.first) + ")";
std::string instance_type_string_second =
"static_cast<InstanceType>(" +
std::to_string(instance_type_range.second) + ")";
result =
std::make_pair(instance_type_string_first, instance_type_string_second);
} else {
ReportError(
"%Min/MaxInstanceType must take a class type that is either a string "
"or has a generated instance type range");
}
return result;
}
} // namespace
VisitResult ImplementationVisitor::GenerateCall(
Callable* callable, base::Optional<LocationReference> this_reference,
Arguments arguments, const TypeVector& specialization_types,
bool is_tailcall) {
const Type* return_type = callable->signature().return_type;
if (is_tailcall) {
if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
const Type* outer_return_type = builtin->signature().return_type;
if (!return_type->IsSubtypeOf(outer_return_type)) {
Error("Cannot tailcall, type of result is ", *return_type,
" but should be a subtype of ", *outer_return_type, ".");
}
} else {
Error("Tail calls are only allowed from builtins");
}
}
bool inline_macro = callable->ShouldBeInlined(output_type_);
std::vector<VisitResult> implicit_arguments;
for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
std::string implicit_name = callable->signature().parameter_names[i]->value;
base::Optional<Binding<LocalValue>*> val =
TryLookupLocalValue(implicit_name);
if (val) {
implicit_arguments.push_back(
GenerateFetchFromLocation((*val)->GetLocationReference(*val)));
} else {
VisitResult unititialized = VisitResult::TopTypeResult(
"implicit parameter '" + implicit_name +
"' is not defined when invoking " + callable->ReadableName() +
" at " + PositionAsString(CurrentSourcePosition::Get()),
callable->signature().parameter_types.types[i]);
implicit_arguments.push_back(unititialized);
}
const Type* type = implicit_arguments.back().type();
if (const TopType* top_type = TopType::DynamicCast(type)) {
if (!callable->IsMacro() || callable->IsExternal()) {
ReportError(
"unititialized implicit parameters can only be passed to "
"Torque-defined macros: the ",
top_type->reason());
}
inline_macro = true;
}
}
std::vector<VisitResult> converted_arguments;
StackRange argument_range = assembler().TopRange(0);
std::vector<std::string> constexpr_arguments;
size_t current = 0;
for (; current < callable->signature().implicit_count; ++current) {
AddCallParameter(callable, implicit_arguments[current],
callable->signature().parameter_types.types[current],
&converted_arguments, &argument_range,
&constexpr_arguments, inline_macro);
}