blob: 7830cffb30c2dc45eb25c922e0eb579a8522183d [file] [log] [blame]
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
namespace internal {
namespace runtime {
extern runtime GetTemplateObject(implicit context: Context)(
TemplateObjectDescription, SharedFunctionInfo, Smi): JSAny;
extern runtime BytecodeBudgetInterruptFromCode(implicit context: Context)(
FeedbackCell): JSAny;
}
builtin GetTemplateObject(
context: Context, shared: SharedFunctionInfo,
description: TemplateObjectDescription, slot: uintptr,
maybeFeedbackVector: Undefined|FeedbackVector): JSArray {
// TODO(jgruber): Consider merging with the GetTemplateObject bytecode
// handler; the current advantage of the split implementation is that the
// bytecode can skip most work if feedback exists.
// TODO(v8:9891): Remove this assert once all callers are ported to Torque.
// This assert ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
assert(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
try {
const vector =
Cast<FeedbackVector>(maybeFeedbackVector) otherwise CallRuntime;
return Cast<JSArray>(ic::LoadFeedbackVectorSlot(vector, slot))
otherwise CallRuntime;
} label CallRuntime deferred {
const result = UnsafeCast<JSArray>(runtime::GetTemplateObject(
description, shared, Convert<Smi>(Signed(slot))));
const vector =
Cast<FeedbackVector>(maybeFeedbackVector) otherwise return result;
ic::StoreFeedbackVectorSlot(vector, slot, result);
return result;
}
}
builtin BytecodeBudgetInterruptFromCode(implicit context: Context)(
feedbackCell: FeedbackCell): Object {
// The runtime call is wrapped by a builtin since the calling sequence in
// generated code is shorter for builtins than for runtime calls.
tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell);
}
extern transitioning macro ForInPrepareForTorque(
Map | FixedArray, uintptr, Undefined | FeedbackVector): FixedArray;
transitioning builtin ForInPrepare(implicit _context: Context)(
enumerator: Map|FixedArray, slot: uintptr,
maybeFeedbackVector: Undefined|FeedbackVector): FixedArray {
return ForInPrepareForTorque(enumerator, slot, maybeFeedbackVector);
}
extern transitioning builtin ForInFilter(implicit context: Context)(
JSAny, HeapObject): JSAny;
extern enum ForInFeedback extends uint31 { kAny, ...}
extern macro UpdateFeedback(
SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr);
@export
transitioning macro ForInNextSlow(
context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
assert(receiver.map != cacheType); // Handled on the fast path.
UpdateFeedback(
SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot);
return ForInFilter(key, receiver);
}
// Note: the untagged {slot} parameter must be in the first couple of args to
// guarantee it's allocated in a register.
transitioning builtin ForInNext(
context: Context, slot: uintptr, receiver: JSAnyNotSmi,
cacheArray: FixedArray, cacheType: Object, cacheIndex: Smi,
maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
// Load the next key from the enumeration array.
const key = UnsafeCast<JSAny>(cacheArray.objects[cacheIndex]);
if (receiver.map == cacheType) {
// The enum cache is in use for {receiver}, the {key} is definitely valid.
return key;
}
return ForInNextSlow(
context, slot, receiver, key, cacheType, maybeFeedbackVector);
}
} // namespace internal