Unreviewed changes
2 is the latest approved patch-set.
The change was submitted with unreviewed changes in the following files:
```
The name of the file: src/maglev/maglev-graph-builder.cc
Insertions: 2, Deletions: 1.
@@ -4300,15 +4300,6 @@
// TODO(verwaest): Support other objects with possible known stable maps as
// well.
RETURN_IF_DONE(reducer_.TryFoldCheckMaps(object, maps));
- if (!object->is_tagged() && !object->is_holey_float64()) {
- // TODO(victorgomes): Implement the holey float64 case.
- auto heap_number_map =
- MakeRef(broker(), local_isolate()->factory()->heap_number_map());
- if (std::find(maps.begin(), maps.end(), heap_number_map) != maps.end()) {
- return ReduceResult::Done();
- }
- return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
- }
NodeInfo* known_info = GetOrCreateInfoFor(object);
@@ -4544,7 +4535,8 @@
!NodeTypeIs(GetType(value), NodeType::kSmi)) {
// Note that NodeType::kSmi MUST go through GetTaggedValue for proper
// canonicalization. If we see a Float64Constant with type kSmi, it has
- // passed BuildCheckSmi, i.e. it is guaranteed to be convertible to smi.
+ // passed BuildCheckSmi, i.e. the runtime value is guaranteed to be
+ // convertible to smi (we would have deoptimized otherwise).
//
// TODO(jgruber): We have to allocate a new object since the
// object field could contain a mutable HeapNumber and thus cannot
@@ -4756,6 +4748,7 @@
if (value->Is<InlinedAllocation>()) return;
InlinedAllocation* allocation = object->Cast<InlinedAllocation>();
VirtualObject* vobject = GetModifiableObjectFromAllocation(allocation);
+ CHECK_EQ(vobject->type(), VirtualObject::kDefault);
CHECK_NOT_NULL(vobject);
vobject->set(offset, value);
AddNonEscapingUses(allocation, 1);
@@ -4857,6 +4850,7 @@
if (CanTrackObjectChanges(elements, TrackObjectMode::kLoad)) {
VirtualObject* vobject =
GetObjectFromAllocation(elements->Cast<InlinedAllocation>());
+ CHECK_EQ(vobject->type(), VirtualObject::kDefault);
DCHECK(vobject->map().IsFixedArrayMap());
ValueNode* length_node = vobject->get(offsetof(FixedArray, length_));
if (auto length = TryGetInt32Constant(length_node)) {
@@ -4899,33 +4893,23 @@
ReduceResult MaglevGraphBuilder::BuildLoadFixedDoubleArrayElement(
ValueNode* elements, int index) {
- if (index < 0 || index >= FixedArray::kMaxLength) {
- return BuildAbort(AbortReason::kUnreachable);
- }
-
// We won't try to reason about the type of the elements array and thus also
// cannot end up with an empty type for it.
DCHECK(!IsEmptyNodeType(GetType(elements)));
-
if (CanTrackObjectChanges(elements, TrackObjectMode::kLoad)) {
VirtualObject* vobject =
GetObjectFromAllocation(elements->Cast<InlinedAllocation>());
- std::optional<uint32_t> length =
- TryGetUint32Constant(vobject->get(FixedArrayBase::kLengthOffset));
- if (length.has_value()) {
- if (static_cast<uint32_t>(index) < length.value()) {
- ValueNode* value =
- vobject->get(FixedDoubleArray::OffsetOfElementAt(index));
- static_assert(
- VirtualFixedDoubleArrayShape::kElementsAreFloat64Constant);
- DCHECK(value->Is<Float64Constant>());
- return value;
- } else {
- return BuildAbort(AbortReason::kUnreachable);
- }
+ compiler::FixedDoubleArrayRef elements_array = vobject->double_elements();
+ if (index >= 0 && static_cast<uint32_t>(index) < elements_array.length()) {
+ Float64 value = elements_array.GetFromImmutableFixedDoubleArray(index);
+ return GetFloat64Constant(value.get_scalar());
+ } else {
+ return BuildAbort(AbortReason::kUnreachable);
}
}
-
+ if (index < 0 || index >= FixedArray::kMaxLength) {
+ return BuildAbort(AbortReason::kUnreachable);
+ }
return AddNewNodeNoInputConversion<LoadFixedDoubleArrayElement>(
{elements, GetInt32Constant(index)});
}
@@ -13423,7 +13407,8 @@
if (size > kMaxRegularHeapObjectSize) return {};
fast_literal->set(
JSObject::kElementsOffset,
- CreateFixedDoubleArray(boilerplate_elements.AsFixedDoubleArray()));
+ CreateFixedDoubleArray(elements_length,
+ boilerplate_elements.AsFixedDoubleArray()));
} else {
int const size = FixedArray::SizeFor(elements_length);
if (size > kMaxRegularHeapObjectSize) return {};
@@ -13461,6 +13446,7 @@
}
VirtualObject* MaglevGraphBuilder::DeepCopyVirtualObject(VirtualObject* old) {
+ CHECK_EQ(old->type(), VirtualObject::kDefault);
VirtualObject* vobject = old->Clone(NewObjectId(), zone());
current_interpreter_frame_.add_object(vobject);
old->allocation()->UpdateObject(vobject);
@@ -13480,23 +13466,12 @@
}
VirtualObject* MaglevGraphBuilder::CreateFixedDoubleArray(
- const compiler::FixedDoubleArrayRef& elements) {
- using Shape = VirtualFixedDoubleArrayShape;
- uint32_t length = elements.length();
- SBXCHECK_GT(length, 0);
- int slot_count = Shape::header_slot_count + length;
- compiler::MapRef map = broker()->fixed_double_array_map();
- VirtualObject* vobj = NodeBase::New<VirtualObject>(
- zone(), 0, NewObjectId(), this, &Shape::kObjectLayout, map, slot_count);
- DCHECK_EQ(Shape::header_slot_count, 2);
- vobj->set(HeapObject::kMapOffset, GetConstant(map));
- vobj->set(FixedArrayBase::kLengthOffset, GetInt32Constant(length));
- for (uint32_t i = 0; i < length; i++) {
- static_assert(Shape::kElementsAreFloat64Constant);
- vobj->set(FixedDoubleArray::OffsetOfElementAt(i),
- GetFloat64Constant(elements.GetFromImmutableFixedDoubleArray(i)));
- }
- return vobj;
+ uint32_t elements_length, compiler::FixedDoubleArrayRef elements) {
+ // VirtualObjects are not added to the Maglev graph.
+ VirtualObject* vobject = NodeBase::New<VirtualObject>(
+ zone(), 0, broker()->fixed_double_array_map(), NewObjectId(),
+ elements_length, elements);
+ return vobject;
}
VirtualObject* MaglevGraphBuilder::CreateConsString(ValueNode* map,
@@ -13868,34 +13843,70 @@
});
}
+InlinedAllocation*
+MaglevGraphBuilder::BuildInlinedAllocationForDoubleFixedArray(
+ VirtualObject* vobject, AllocationType allocation_type) {
+ DCHECK(vobject->map().IsFixedDoubleArrayMap());
+ InlinedAllocation* allocation =
+ ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
+ int length = vobject->double_elements_length();
+ AddNonEscapingUses(allocation, length + 2);
+ ReduceResult result =
+ BuildStoreMap(allocation, broker()->fixed_double_array_map(),
+ StoreMap::Kind::kInlinedAllocation);
+ CHECK(!result.IsDoneWithAbort());
+ AddNewNodeNoInputConversion<StoreTaggedFieldNoWriteBarrier>(
+ {allocation, GetSmiConstant(length)},
+ static_cast<int>(offsetof(FixedDoubleArray, length_)),
+ StoreTaggedMode::kDefault);
+ for (int i = 0; i < length; ++i) {
+ AddNewNodeNoInputConversion<StoreFloat64>(
+ {allocation,
+ GetFloat64Constant(
+ vobject->double_elements().GetFromImmutableFixedDoubleArray(i))},
+ FixedDoubleArray::OffsetOfElementAt(i));
+ }
+ return allocation;
+}
+
InlinedAllocation* MaglevGraphBuilder::BuildInlinedAllocation(
VirtualObject* vobject, AllocationType allocation_type) {
current_interpreter_frame_.add_object(vobject);
InlinedAllocation* allocation;
-
- using ValueAndDesc = std::pair<ValueNode*, vobj::Field>;
- SmallZoneVector<ValueAndDesc, 8> values(zone());
- vobject->ForEachSlot([&](ValueNode* node, vobj::Field desc) {
- if (node->Is<VirtualObject>()) {
- VirtualObject* nested = node->Cast<VirtualObject>();
- node = BuildInlinedAllocation(nested, allocation_type);
- // Update the vobject's slot value.
- vobject->set(desc.offset, node);
+ switch (vobject->type()) {
+ case VirtualObject::kHeapNumber:
+ case VirtualObject::kConsString:
+ UNREACHABLE();
+ case VirtualObject::kFixedDoubleArray:
+ allocation =
+ BuildInlinedAllocationForDoubleFixedArray(vobject, allocation_type);
+ break;
+ case VirtualObject::kDefault: {
+ using ValueAndDesc = std::pair<ValueNode*, vobj::Field>;
+ SmallZoneVector<ValueAndDesc, 8> values(zone());
+ vobject->ForEachSlot([&](ValueNode* node, vobj::Field desc) {
+ if (node->Is<VirtualObject>()) {
+ VirtualObject* nested = node->Cast<VirtualObject>();
+ node = BuildInlinedAllocation(nested, allocation_type);
+ // Update the vobject's slot value.
+ vobject->set(desc.offset, node);
+ }
+ node = ConvertForField(node, desc, allocation_type);
+ values.push_back({node, desc});
+ });
+ allocation =
+ ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
+ AddNonEscapingUses(allocation, static_cast<int>(values.size()));
+ for (uint32_t i = 0; i < values.size(); i++) {
+ const auto [value, desc] = values[i];
+ BuildInitializeStore(desc, allocation, allocation_type, value);
+ }
+ if (is_loop_effect_tracking()) {
+ loop_effects_->allocations.insert(allocation);
+ }
+ break;
}
- node = ConvertForField(node, desc, allocation_type);
- values.push_back({node, desc});
- });
- allocation =
- ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
- AddNonEscapingUses(allocation, static_cast<int>(values.size()));
- for (uint32_t i = 0; i < values.size(); i++) {
- const auto [value, desc] = values[i];
- BuildInitializeStore(desc, allocation, allocation_type, value);
}
- if (is_loop_effect_tracking()) {
- loop_effects_->allocations.insert(allocation);
- }
-
if (v8_flags.maglev_allocation_folding < 2) {
ClearCurrentAllocationBlock();
}
@@ -16007,8 +16018,6 @@
ReduceResult MaglevGraphBuilder::EmitUnconditionalDeopt(
DeoptimizeReason reason) {
current_block()->set_deferred(true);
- // Create a block rather than calling finish, since we don't yet know the
- // next block's offset before the loop skipping the rest of the bytecodes.
FinishBlock<Deopt>({}, reason);
return ReduceResult::DoneWithAbort();
}
@@ -16468,8 +16477,6 @@
}
ReduceResult MaglevGraphBuilder::BuildAbort(AbortReason reason) {
- // Create a block rather than calling finish, since we don't yet know the
- // next block's offset before the loop skipping the rest of the bytecodes.
FinishBlock<Abort>({}, reason);
return ReduceResult::DoneWithAbort();
}
@@ -16724,22 +16731,32 @@
uint32_t offset,
LoadType type,
Args&&... args) {
- if (!CanTrackObjectChanges(object, TrackObjectMode::kLoad)) {
- return AddNewNode<Instruction>({object}, offset,
- std::forward<Args>(args)..., type);
+ if (offset != HeapObject::kMapOffset &&
+ CanTrackObjectChanges(object, TrackObjectMode::kLoad)) {
+ VirtualObject* vobject =
+ GetObjectFromAllocation(object->Cast<InlinedAllocation>());
+ ValueNode* value;
+ CHECK_NE(vobject->type(), VirtualObject::kHeapNumber);
+ if (vobject->type() == VirtualObject::kDefault) {
+ // TODO(jgruber): The VirtualObject knows whether the field at the
+ // given offset is tagged. Add a DCHECK.
+ value = vobject->get(offset);
+ } else {
+ DCHECK_EQ(vobject->type(), VirtualObject::kFixedDoubleArray);
+ // The only offset we're allowed to read from the a FixedDoubleArray as
+ // tagged field is the length.
+ CHECK_EQ(offset, offsetof(FixedDoubleArray, length_));
+ value = GetInt32Constant(vobject->double_elements_length());
+ }
+ if (v8_flags.trace_maglev_object_tracking) {
+ std::cout << " * Reusing value in virtual object "
+ << PrintNodeLabel(vobject) << "[" << offset
+ << "]: " << PrintNode(value) << std::endl;
+ }
+ return value;
}
-
- VirtualObject* vobject =
- GetObjectFromAllocation(object->Cast<InlinedAllocation>());
- ValueNode* value;
- CHECK_EQ(vobject->FieldForOffset(offset).type, vobj::FieldType::kTagged);
- value = vobject->get(offset);
- if (v8_flags.trace_maglev_object_tracking) {
- std::cout << " * Reusing value in virtual object "
- << PrintNodeLabel(vobject) << "[" << offset
- << "]: " << PrintNode(value) << std::endl;
- }
- return value;
+ return AddNewNode<Instruction>({object}, offset, std::forward<Args>(args)...,
+ type);
}
void MaglevGraphBuilder::AddDeoptUse(ValueNode* node) {
```