< prev index next >

src/share/vm/opto/valuetypenode.cpp

Print this page

        

@@ -56,26 +56,30 @@
 Node* ValueTypeNode::make(PhaseGVN& gvn, Node* mem, Node* oop) {
   // Create and initialize a ValueTypeNode by loading all field
   // values from a heap-allocated version and also save the oop.
   const TypeValueType* type = gvn.type(oop)->is_valuetypeptr()->value_type();
   ValueTypeNode* vt = new ValueTypeNode(type, oop);
-  vt->load_values(gvn, mem, oop, oop, type->value_klass());
+  vt->load(gvn, mem, oop, oop, type->value_klass());
+  assert(vt->is_allocated(&gvn), "value type should be allocated");
+  assert(oop->is_Con() || oop->is_CheckCastPP() || vt->is_loaded(&gvn, type) != NULL, "value type should be loaded");
   return gvn.transform(vt);
 }
 
 Node* ValueTypeNode::make(PhaseGVN& gvn, ciValueKlass* vk, Node* mem, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset) {
   // Create and initialize a ValueTypeNode by loading all field values from
   // a flattened value type field at 'holder_offset' or from a value type array.
   ValueTypeNode* vt = make(gvn, vk);
   // The value type is flattened into the object without an oop header. Subtract the
   // offset of the first field to account for the missing header when loading the values.
   holder_offset -= vk->first_field_offset();
-  vt->load_values(gvn, mem, obj, ptr, holder, holder_offset);
-  return gvn.transform(vt);
+  vt->load(gvn, mem, obj, ptr, holder, holder_offset);
+  vt = gvn.transform(vt)->as_ValueType();
+  assert(!vt->is_allocated(&gvn), "value type should not be allocated");
+  return vt;
 }
 
-void ValueTypeNode::load_values(PhaseGVN& gvn, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) {
+void ValueTypeNode::load(PhaseGVN& gvn, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) {
   // Initialize the value type by loading its field values from
   // memory and adding the values as input edges to the node.
   for (uint i = 0; i < field_count(); ++i) {
     int offset = holder_offset + field_offset(i);
     ciType* ftype = field_type(i);

@@ -116,25 +120,63 @@
     }
     set_field_value(i, gvn.transform(value));
   }
 }
 
-void ValueTypeNode::store(GraphKit* kit, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset) const {
+Node* ValueTypeNode::is_loaded(PhaseGVN* phase, const TypeValueType* t, Node* base, int holder_offset) {
+  for (uint i = 0; i < field_count(); ++i) {
+    int offset = holder_offset + field_offset(i);
+    Node* value = field_value(i);
+    if (value->isa_DecodeN()) {
+      // Skip DecodeN
+      value = value->in(1);
+    }
+    if (value->isa_Load()) {
+      AddPNode* load_addr = value->in(MemNode::Address)->as_AddP();
+      if (base == NULL) {
+        // Set base and check if pointer type matches
+        base = load_addr->base_node();
+        const TypeValueTypePtr* vtptr = phase->type(base)->isa_valuetypeptr();
+        if (vtptr == NULL || !vtptr->value_type()->eq(t)) {
+          return NULL;
+        }
+      }
+      // Check if base and offset of field load matches
+      Node* off = load_addr->in(AddPNode::Offset);
+      int load_offset = LP64_ONLY(off->get_long()) NOT_LP64(off->get_int());
+      if (base != load_addr->base_node() || offset != load_offset) {
+        return NULL;
+      }
+    } else if (value->isa_ValueType()) {
+      // Check value type field load recursively
+      ValueTypeNode* vt = value->as_ValueType();
+      base = vt->is_loaded(phase, t, base, offset - vt->value_klass()->first_field_offset());
+      if (base == NULL) {
+        return NULL;
+      }
+    } else {
+      return NULL;
+    }
+  }
+  return base;
+}
+
+void ValueTypeNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const {
   // The value type is embedded into the object without an oop header. Subtract the
   // offset of the first field to account for the missing header when storing the values.
   holder_offset -= value_klass()->first_field_offset();
-  store_values(kit, obj, ptr, holder, holder_offset);
+  store(kit, base, ptr, holder, holder_offset);
 }
 
-void ValueTypeNode::store_values(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const {
+void ValueTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const {
   // Write field values to memory
   for (uint i = 0; i < field_count(); ++i) {
     int offset = holder_offset + field_offset(i);
     Node* value = field_value(i);
     if (value->is_ValueType()) {
       // Recursively store the flattened value type field
-      value->isa_ValueType()->store(kit, base, ptr, holder, offset);
+      value->isa_ValueType()->store_flattened(kit, base, ptr, holder, offset);
     } else {
       const Type* base_type = kit->gvn().type(base);
       const TypePtr* adr_type = NULL;
       if (base_type->isa_aryptr()) {
         // In the case of a flattened value type array, each field has its own slice

@@ -151,29 +193,26 @@
         const TypeOopPtr* ft = TypeOopPtr::make_from_klass(field_type(i)->as_klass());
         assert(adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
         bool is_array = base_type->isa_aryptr() != NULL;
         kit->store_oop(kit->control(), base, adr, adr_type, value, ft, bt, is_array, MemNode::unordered);
       }
-
     }
   }
 }
 
-Node* ValueTypeNode::store_to_memory(GraphKit* kit) {
+Node* ValueTypeNode::allocate(GraphKit* kit) {
   Node* in_oop = get_oop();
   Node* null_ctl = kit->top();
   // Check if value type is already allocated
   Node* not_null_oop = kit->null_check_oop(in_oop, &null_ctl);
   if (null_ctl->is_top()) {
     // Value type is allocated
     return not_null_oop;
   }
   // Not able to prove that value type is allocated.
   // Emit runtime check that may be folded later.
-  const Type* oop_type = kit->gvn().type(in_oop);
-  assert(TypePtr::NULL_PTR->higher_equal(oop_type), "should not be allocated");
-
+  assert(!is_allocated(&kit->gvn()), "should not be allocated");
   const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(bottom_type()->isa_valuetype(), TypePtr::NotNull);
   RegionNode* region = new RegionNode(3);
   PhiNode* oop = new PhiNode(region, vtptr_type);
   PhiNode* io  = new PhiNode(region, Type::ABIO);
   PhiNode* mem = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);

@@ -187,13 +226,13 @@
   // Oop is NULL, allocate value type
   kit->set_control(null_ctl);
   kit->kill_dead_locals();
   ciValueKlass* vk = value_klass();
   Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
-  Node* alloc_oop  = kit->new_instance(klass_node);
+  Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, false, this);
   // Write field values to memory
-  store_values(kit, alloc_oop, alloc_oop, vk);
+  store(kit, alloc_oop, alloc_oop, vk);
   region->init_req(2, kit->control());
   oop   ->init_req(2, alloc_oop);
   io    ->init_req(2, kit->i_o());
   mem   ->init_req(2, kit->merged_memory());
 

@@ -212,10 +251,15 @@
   vt->set_oop(res_oop);
   kit->replace_in_map(this, kit->gvn().transform(vt));
   return res_oop;
 }
 
+bool ValueTypeNode::is_allocated(PhaseGVN* phase) const {
+  const Type* oop_type = phase->type(get_oop());
+  return oop_type->meet(TypePtr::NULL_PTR) != oop_type;
+}
+
 // Clones the values type to handle control flow merges involving multiple value types.
 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
 ValueTypeNode* ValueTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region) {
   assert(!has_phi_inputs(region), "already cloned with phis");
   ValueTypeNode* vt = clone()->as_ValueType();

@@ -413,14 +457,23 @@
   }
   return edges;
 }
 
 Node* ValueTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  if (!is_allocated(phase)) {
+    // Check if this value type is loaded from memory
+    Node* base = is_loaded(phase, type()->is_valuetype());
+    if (base != NULL) {
+      // Save the oop
+      set_oop(base);
+      assert(is_allocated(phase), "should now be allocated");
+    }
+  }
+
   if (can_reshape) {
     PhaseIterGVN* igvn = phase->is_IterGVN();
-    const Type* oop_type = igvn->type(get_oop());
-    if (oop_type->meet(TypePtr::NULL_PTR) != oop_type) {
+    if (is_allocated(igvn)) {
       // Value type is heap allocated, search for safepoint uses
       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
         Node* out = fast_out(i);
         if (out->is_SafePoint()) {
           // Let SafePointNode::Ideal() take care of re-wiring the

@@ -428,14 +481,80 @@
           igvn->rehash_node_delayed(out);
         }
       }
     }
   }
-
   return NULL;
 }
 
+// Search for multiple allocations of this value type
+// and try to replace them by dominating allocations.
+void ValueTypeNode::remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase) {
+  assert(EliminateAllocations, "allocation elimination should be enabled");
+  Node_List dead_allocations;
+  // Search for allocations of this value type
+  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+    Node* out1 = fast_out(i);
+    if (out1->is_Allocate() && out1->in(AllocateNode::ValueNode) == this) {
+      AllocateNode* alloc = out1->as_Allocate();
+      Node* res_dom = NULL;
+      if (is_allocated(igvn)) {
+        // The value type is already allocated but still connected to an AllocateNode.
+        // This can happen with late inlining when we first allocate a value type argument
+        // but later decide to inline the call with the callee code also allocating.
+        res_dom = get_oop();
+      } else {
+        // Search for a dominating allocation of the same value type
+        for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
+          Node* out2 = fast_out(j);
+          if (alloc != out2 && out2->is_Allocate() && out2->in(AllocateNode::ValueNode) == this &&
+              phase->is_dominator(out2, alloc)) {
+            AllocateNode* alloc_dom =  out2->as_Allocate();
+            assert(alloc->in(AllocateNode::KlassNode) == alloc_dom->in(AllocateNode::KlassNode), "klasses should match");
+            res_dom = alloc_dom->result_cast();
+            break;
+          }
+        }
+      }
+      if (res_dom != NULL) {
+        // Found a dominating allocation
+        Node* res = alloc->result_cast();
+        assert(res != NULL, "value type allocation should not be dead");
+        // Move users to dominating allocation
+        igvn->replace_node(res, res_dom);
+        // The dominated allocation is now dead, remove the
+        // value type node connection and adjust the iterator.
+        dead_allocations.push(alloc);
+        igvn->replace_input_of(alloc, AllocateNode::ValueNode, NULL);
+        --i; --imax;
+#ifdef ASSERT
+        if (PrintEliminateAllocations) {
+          tty->print("++++ Eliminated: %d Allocate ", alloc->_idx);
+          dump_spec(tty);
+          tty->cr();
+        }
+#endif
+      }
+    }
+  }
+
+  // Remove dead value type allocations by replacing the projection nodes
+  for (uint i = 0; i < dead_allocations.size(); ++i) {
+    CallProjections projs;
+    AllocateNode* alloc = dead_allocations.at(i)->as_Allocate();
+    alloc->extract_projections(&projs, true);
+    // Use lazy_replace to avoid corrupting the dominator tree of PhaseIdealLoop
+    phase->lazy_replace(projs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
+    phase->lazy_replace(projs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
+    phase->lazy_replace(projs.catchall_memproj, phase->C->top());
+    phase->lazy_replace(projs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
+    phase->lazy_replace(projs.catchall_ioproj, phase->C->top());
+    phase->lazy_replace(projs.catchall_catchproj, phase->C->top());
+    phase->lazy_replace(projs.resproj, phase->C->top());
+  }
+}
+
 // When a call returns multiple values, it has several result
 // projections, one per field. Replacing the result of the call by a
 // value type node (after late inlining) requires that for each result
 // projection, we find the corresponding value type field.
 void ValueTypeNode::replace_call_results(Node* call, Compile* C) {
< prev index next >