1 /*
   2  * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciValueKlass.hpp"
  27 #include "opto/addnode.hpp"
  28 #include "opto/graphKit.hpp"
  29 #include "opto/rootnode.hpp"
  30 #include "opto/valuetypenode.hpp"
  31 #include "opto/phaseX.hpp"
  32 
  33 ValueTypeNode* ValueTypeNode::make(PhaseGVN& gvn, ciValueKlass* klass) {
  34   // Create a new ValueTypeNode with uninitialized values and NULL oop
  35   const TypeValueType* type = TypeValueType::make(klass);
  36   return new ValueTypeNode(type, gvn.zerocon(T_VALUETYPE));
  37 }
  38 
  39 Node* ValueTypeNode::make_default(PhaseGVN& gvn, ciValueKlass* vk) {
  40   // TODO re-use constant oop of pre-allocated default value type here?
  41   // Create a new ValueTypeNode with default values
  42   ValueTypeNode* vt = ValueTypeNode::make(gvn, vk);
  43   for (uint i = 0; i < vt->field_count(); ++i) {
  44     ciType* field_type = vt->field_type(i);
  45     Node* value = NULL;
  46     if (field_type->is_valuetype()) {
  47       value = ValueTypeNode::make_default(gvn, field_type->as_value_klass());
  48     } else {
  49       value = gvn.zerocon(field_type->basic_type());
  50     }
  51     vt->set_field_value(i, value);
  52   }
  53   return gvn.transform(vt);
  54 }
  55 
  56 Node* ValueTypeNode::make(PhaseGVN& gvn, Node* mem, Node* oop) {
  57   // Create and initialize a ValueTypeNode by loading all field
  58   // values from a heap-allocated version and also save the oop.
  59   const TypeValueType* type = gvn.type(oop)->is_valuetypeptr()->value_type();
  60   ValueTypeNode* vt = new ValueTypeNode(type, oop);
  61   vt->load(gvn, mem, oop, oop, type->value_klass());
  62   assert(vt->is_allocated(&gvn), "value type should be allocated");
  63   assert(oop->is_Con() || oop->is_CheckCastPP() || vt->is_loaded(&gvn, type) == oop, "value type should be loaded");
  64   return gvn.transform(vt);
  65 }
  66 
  67 Node* ValueTypeNode::make(PhaseGVN& gvn, ciValueKlass* vk, Node* mem, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset) {
  68   // Create and initialize a ValueTypeNode by loading all field values from
  69   // a flattened value type field at 'holder_offset' or from a value type array.
  70   ValueTypeNode* vt = make(gvn, vk);
  71   // The value type is flattened into the object without an oop header. Subtract the
  72   // offset of the first field to account for the missing header when loading the values.
  73   holder_offset -= vk->first_field_offset();
  74   vt->load(gvn, mem, obj, ptr, holder, holder_offset);
  75   assert(vt->is_loaded(&gvn, vt->type()->isa_valuetype()) != obj, "holder oop should not be used as flattened value type oop");
  76   return gvn.transform(vt)->as_ValueType();
  77 }
  78 
  79 void ValueTypeNode::load(PhaseGVN& gvn, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) {
  80   // Initialize the value type by loading its field values from
  81   // memory and adding the values as input edges to the node.
  82   for (uint i = 0; i < field_count(); ++i) {
  83     int offset = holder_offset + field_offset(i);
  84     ciType* ftype = field_type(i);
  85     Node* value = NULL;
  86     if (ftype->is_valuetype()) {
  87       // Recursively load the flattened value type field
  88       value = ValueTypeNode::make(gvn, ftype->as_value_klass(), mem, base, ptr, holder, offset);
  89     } else {
  90       const Type* con_type = NULL;
  91       if (base->is_Con()) {
  92         // If the oop to the value type is constant (static final field), we can
  93         // also treat the fields as constants because the value type is immutable.
  94         const TypeOopPtr* oop_ptr = base->bottom_type()->isa_oopptr();
  95         ciObject* constant_oop = oop_ptr->const_oop();
  96         ciField* field = holder->get_field_by_offset(offset, false);
  97         ciConstant constant = constant_oop->as_instance()->field_value(field);
  98         con_type = Type::make_from_constant(constant, /*require_const=*/ true);
  99       }
 100       if (con_type != NULL) {
 101         // Found a constant field value
 102         value = gvn.makecon(con_type);
 103       } else {
 104         // Load field value from memory
 105         const Type* base_type = gvn.type(base);
 106         const TypePtr* adr_type = NULL;
 107         if (base_type->isa_aryptr()) {
 108           // In the case of a flattened value type array, each field
 109           // has its own slice
 110           adr_type = base_type->is_aryptr()->with_field_offset(offset)->add_offset(Type::OffsetBot);
 111         } else {
 112           ciField* field = holder->get_field_by_offset(offset, false);
 113           adr_type = gvn.C->alias_type(field)->adr_type();
 114         }
 115         Node* adr = gvn.transform(new AddPNode(base, ptr, gvn.MakeConX(offset)));
 116         BasicType bt = type2field[ftype->basic_type()];
 117         value = LoadNode::make(gvn, NULL, mem, adr, adr_type, Type::get_const_type(ftype), bt, MemNode::unordered);
 118       }
 119     }
 120     set_field_value(i, gvn.transform(value));
 121   }
 122 }
 123 
 124 Node* ValueTypeNode::is_loaded(PhaseGVN* phase, const TypeValueType* t, Node* base, int holder_offset) {
 125   if (field_count() == 0) {
 126     assert(t->value_klass() == phase->C->env()->___Value_klass(), "unexpected value type klass");
 127     assert(is_allocated(phase), "must be allocated");
 128     return get_oop();
 129   }
 130   for (uint i = 0; i < field_count(); ++i) {
 131     int offset = holder_offset + field_offset(i);
 132     Node* value = field_value(i);
 133     if (value->isa_DecodeN()) {
 134       // Skip DecodeN
 135       value = value->in(1);
 136     }
 137     if (value->isa_Load()) {
 138       AddPNode* load_addr = value->in(MemNode::Address)->as_AddP();
 139       if (base == NULL) {
 140         // Set base and check if pointer type matches
 141         base = load_addr->base_node();
 142         const TypeValueTypePtr* vtptr = phase->type(base)->isa_valuetypeptr();
 143         if (vtptr == NULL || !vtptr->value_type()->eq(t)) {
 144           return NULL;
 145         }
 146       }
 147       // Check if base and offset of field load matches
 148       Node* off = load_addr->in(AddPNode::Offset);
 149       int load_offset = LP64_ONLY(off->get_long()) NOT_LP64(off->get_int());
 150       if (base != load_addr->base_node() || offset != load_offset) {
 151         return NULL;
 152       }
 153     } else if (value->isa_ValueType()) {
 154       // Check value type field load recursively
 155       ValueTypeNode* vt = value->as_ValueType();
 156       base = vt->is_loaded(phase, t, base, offset - vt->value_klass()->first_field_offset());
 157       if (base == NULL) {
 158         return NULL;
 159       }
 160     } else {
 161       return NULL;
 162     }
 163   }
 164   return base;
 165 }
 166 
 167 void ValueTypeNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const {
 168   // The value type is embedded into the object without an oop header. Subtract the
 169   // offset of the first field to account for the missing header when storing the values.
 170   holder_offset -= value_klass()->first_field_offset();
 171   store(kit, base, ptr, holder, holder_offset);
 172 }
 173 
 174 void ValueTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const {
 175   // Write field values to memory
 176   for (uint i = 0; i < field_count(); ++i) {
 177     int offset = holder_offset + field_offset(i);
 178     Node* value = field_value(i);
 179     if (value->is_ValueType()) {
 180       // Recursively store the flattened value type field
 181       value->isa_ValueType()->store_flattened(kit, base, ptr, holder, offset);
 182     } else {
 183       const Type* base_type = kit->gvn().type(base);
 184       const TypePtr* adr_type = NULL;
 185       if (base_type->isa_aryptr()) {
 186         // In the case of a flattened value type array, each field has its own slice
 187         adr_type = base_type->is_aryptr()->with_field_offset(offset)->add_offset(Type::OffsetBot);
 188       } else {
 189         ciField* field = holder->get_field_by_offset(offset, false);
 190         adr_type = kit->C->alias_type(field)->adr_type();
 191       }
 192       Node* adr = kit->basic_plus_adr(base, ptr, offset);
 193       BasicType bt = type2field[field_type(i)->basic_type()];
 194       if (is_java_primitive(bt)) {
 195         kit->store_to_memory(kit->control(), adr, value, bt, adr_type, MemNode::unordered);
 196       } else {
 197         const TypeOopPtr* ft = TypeOopPtr::make_from_klass(field_type(i)->as_klass());
 198         assert(adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 199         bool is_array = base_type->isa_aryptr() != NULL;
 200         kit->store_oop(kit->control(), base, adr, adr_type, value, ft, bt, is_array, MemNode::unordered);
 201       }
 202     }
 203   }
 204 }
 205 
 206 Node* ValueTypeNode::allocate(GraphKit* kit) {
 207   Node* in_oop = get_oop();
 208   Node* null_ctl = kit->top();
 209   // Check if value type is already allocated
 210   Node* not_null_oop = kit->null_check_oop(in_oop, &null_ctl);
 211   if (null_ctl->is_top()) {
 212     // Value type is allocated
 213     return not_null_oop;
 214   }
 215   // Not able to prove that value type is allocated.
 216   // Emit runtime check that may be folded later.
 217   assert(!is_allocated(&kit->gvn()), "should not be allocated");
 218   const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(bottom_type()->isa_valuetype(), TypePtr::NotNull);
 219   RegionNode* region = new RegionNode(3);
 220   PhiNode* oop = new PhiNode(region, vtptr_type);
 221   PhiNode* io  = new PhiNode(region, Type::ABIO);
 222   PhiNode* mem = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 223 
 224   // Oop is non-NULL, use it
 225   region->init_req(1, kit->control());
 226   oop   ->init_req(1, not_null_oop);
 227   io    ->init_req(1, kit->i_o());
 228   mem   ->init_req(1, kit->merged_memory());
 229 
 230   // Oop is NULL, allocate value type
 231   kit->set_control(null_ctl);
 232   kit->kill_dead_locals();
 233   ciValueKlass* vk = value_klass();
 234   Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 235   Node* alloc_oop  = kit->new_instance(klass_node, NULL, NULL, false, this);
 236   // Write field values to memory
 237   store(kit, alloc_oop, alloc_oop, vk);
 238   region->init_req(2, kit->control());
 239   oop   ->init_req(2, alloc_oop);
 240   io    ->init_req(2, kit->i_o());
 241   mem   ->init_req(2, kit->merged_memory());
 242 
 243   // Update GraphKit
 244   kit->set_control(kit->gvn().transform(region));
 245   kit->set_i_o(kit->gvn().transform(io));
 246   kit->set_all_memory(kit->gvn().transform(mem));
 247   kit->record_for_igvn(region);
 248   kit->record_for_igvn(oop);
 249   kit->record_for_igvn(io);
 250   kit->record_for_igvn(mem);
 251 
 252   // Use cloned ValueTypeNode to propagate oop from now on
 253   Node* res_oop = kit->gvn().transform(oop);
 254   ValueTypeNode* vt = clone()->as_ValueType();
 255   vt->set_oop(res_oop);
 256   kit->replace_in_map(this, kit->gvn().transform(vt));
 257   return res_oop;
 258 }
 259 
 260 bool ValueTypeNode::is_allocated(PhaseGVN* phase) const {
 261   const Type* oop_type = phase->type(get_oop());
 262   return oop_type->meet(TypePtr::NULL_PTR) != oop_type;
 263 }
 264 
 265 // Clones the values type to handle control flow merges involving multiple value types.
 266 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
 267 ValueTypeNode* ValueTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region) {
 268   assert(!has_phi_inputs(region), "already cloned with phis");
 269   ValueTypeNode* vt = clone()->as_ValueType();
 270 
 271   // Create a PhiNode for merging the oop values
 272   const TypeValueTypePtr* vtptr = TypeValueTypePtr::make(vt->bottom_type()->isa_valuetype());
 273   PhiNode* oop = PhiNode::make(region, vt->get_oop(), vtptr);
 274   gvn->set_type(oop, vtptr);
 275   vt->set_oop(oop);
 276 
 277   // Create a PhiNode each for merging the field values
 278   for (uint i = 0; i < vt->field_count(); ++i) {
 279     ciType* type = vt->field_type(i);
 280     Node*  value = vt->field_value(i);
 281     if (type->is_valuetype()) {
 282       // Handle flattened value type fields recursively
 283       value = value->as_ValueType()->clone_with_phis(gvn, region);
 284     } else {
 285       const Type* phi_type = Type::get_const_type(type);
 286       value = PhiNode::make(region, value, phi_type);
 287       gvn->set_type(value, phi_type);
 288     }
 289     vt->set_field_value(i, value);
 290   }
 291   gvn->set_type(vt, vt->bottom_type());
 292   return vt;
 293 }
 294 
 295 // Checks if the inputs of the ValueTypeNode were replaced by PhiNodes
 296 // for the given region (see ValueTypeNode::clone_with_phis).
 297 bool ValueTypeNode::has_phi_inputs(Node* region) {
 298   // Check oop input
 299   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 300 #ifdef ASSERT
 301   if (result) {
 302     // Check all field value inputs for consistency
 303     for (uint i = Oop; i < field_count(); ++i) {
 304       Node* n = in(i);
 305       if (n->is_ValueType()) {
 306         assert(n->as_ValueType()->has_phi_inputs(region), "inconsistent phi inputs");
 307       } else {
 308         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 309       }
 310     }
 311   }
 312 #endif
 313   return result;
 314 }
 315 
 316 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 317 ValueTypeNode* ValueTypeNode::merge_with(PhaseGVN* gvn, const ValueTypeNode* other, int pnum, bool transform) {
 318   // Merge oop inputs
 319   PhiNode* phi = get_oop()->as_Phi();
 320   phi->set_req(pnum, other->get_oop());
 321   if (transform) {
 322     set_oop(gvn->transform(phi));
 323     gvn->record_for_igvn(phi);
 324   }
 325   // Merge field values
 326   for (uint i = 0; i < field_count(); ++i) {
 327     Node* val1 =        field_value(i);
 328     Node* val2 = other->field_value(i);
 329     if (val1->isa_ValueType()) {
 330       val1->as_ValueType()->merge_with(gvn, val2->as_ValueType(), pnum, transform);
 331     } else {
 332       assert(val1->is_Phi(), "must be a phi node");
 333       assert(!val2->is_ValueType(), "inconsistent merge values");
 334       val1->set_req(pnum, val2);
 335     }
 336     if (transform) {
 337       set_field_value(i, gvn->transform(val1));
 338       gvn->record_for_igvn(val1);
 339     }
 340   }
 341   return this;
 342 }
 343 
 344 Node* ValueTypeNode::field_value(uint index) const {
 345   assert(index < field_count(), "index out of bounds");
 346   return in(Values + index);
 347 }
 348 
 349 // Get the value of the field at the given offset.
 350 // If 'recursive' is true, flattened value type fields will be resolved recursively.
 351 Node* ValueTypeNode::field_value_by_offset(int offset, bool recursive) const {
 352   // If the field at 'offset' belongs to a flattened value type field, 'index' refers to the
 353   // corresponding ValueTypeNode input and 'sub_offset' is the offset in flattened value type.
 354   int index = value_klass()->field_index_by_offset(offset);
 355   int sub_offset = offset - field_offset(index);
 356   Node* value = field_value(index);
 357   if (recursive && value->is_ValueType()) {
 358     // Flattened value type field
 359     ValueTypeNode* vt = value->as_ValueType();
 360     sub_offset += vt->value_klass()->first_field_offset(); // Add header size
 361     return vt->field_value_by_offset(sub_offset);
 362   }
 363   assert(!(recursive && value->is_ValueType()), "should not be a value type");
 364   assert(sub_offset == 0, "offset mismatch");
 365   return value;
 366 }
 367 
 368 void ValueTypeNode::set_field_value(uint index, Node* value) {
 369   assert(index < field_count(), "index out of bounds");
 370   set_req(Values + index, value);
 371 }
 372 
 373 int ValueTypeNode::field_offset(uint index) const {
 374   assert(index < field_count(), "index out of bounds");
 375   return value_klass()->field_offset_by_index(index);
 376 }
 377 
 378 ciType* ValueTypeNode::field_type(uint index) const {
 379   assert(index < field_count(), "index out of bounds");
 380   return value_klass()->field_type_by_index(index);
 381 }
 382 
 383 void ValueTypeNode::make_scalar_in_safepoints(Compile* C) {
 384   const TypeValueTypePtr* res_type = TypeValueTypePtr::make(bottom_type()->isa_valuetype(), TypePtr::NotNull);
 385   ciValueKlass* vk = value_klass();
 386   uint nfields = vk->flattened_field_count();
 387   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 388     Node* u = fast_out(i);
 389     if (u->is_SafePoint() && (!u->is_Call() || u->as_Call()->has_debug_use(this))) {
 390       Node* in_oop = get_oop();
 391       const Type* oop_type = in_oop->bottom_type();
 392       SafePointNode* sfpt = u->as_SafePoint();
 393       JVMState* jvms = sfpt->jvms();
 394       int start = jvms->debug_start();
 395       int end   = jvms->debug_end();
 396       assert(TypePtr::NULL_PTR->higher_equal(oop_type), "already heap allocated value type should be linked directly");
 397       // Replace safepoint edge by SafePointScalarObjectNode and add field values
 398       assert(jvms != NULL, "missing JVMS");
 399       uint first_ind = (sfpt->req() - jvms->scloff());
 400       SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type,
 401 #ifdef ASSERT
 402                                                                       NULL,
 403 #endif
 404                                                                       first_ind, nfields);
 405       sobj->init_req(0, C->root());
 406       // Iterate over the value type fields in order of increasing
 407       // offset and add the field values to the safepoint.
 408       for (uint j = 0; j < nfields; ++j) {
 409         int offset = vk->nonstatic_field_at(j)->offset();
 410         Node* value = field_value_by_offset(offset, true /* include flattened value type fields */);
 411         sfpt->add_req(value);
 412       }
 413       jvms->set_endoff(sfpt->req());
 414       int nb = sfpt->replace_edges_in_range(this, sobj, start, end);
 415       --i; imax -= nb;
 416     }
 417   }
 418 }
 419 
 420 void ValueTypeNode::pass_klass(Node* n, uint pos, const GraphKit& kit) {
 421   ciValueKlass* vk = value_klass();
 422   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
 423   Node* arg = kit.makecon(tk);
 424   n->init_req(pos, arg);
 425 }
 426 
 427 uint ValueTypeNode::pass_fields(Node* n, int base_input, const GraphKit& kit, ciValueKlass* base_vk, int base_offset) {
 428   ciValueKlass* vk = value_klass();
 429   if (base_vk == NULL) {
 430     base_vk = vk;
 431   }
 432   uint edges = 0;
 433   for (uint i = 0; i < field_count(); i++) {
 434     ciType* f_type = field_type(i);
 435     int offset = base_offset + field_offset(i) - (base_offset > 0 ? vk->first_field_offset() : 0);
 436     Node* arg = field_value(i);
 437     if (f_type->is_valuetype()) {
 438       ciValueKlass* embedded_vk = f_type->as_value_klass();
 439       edges += arg->as_ValueType()->pass_fields(n, base_input, kit, base_vk, offset);
 440     } else {
 441       int j = 0; int extra = 0;
 442       for (; j < base_vk->nof_nonstatic_fields(); j++) {
 443         ciField* f = base_vk->nonstatic_field_at(j);
 444         if (offset == f->offset()) {
 445           assert(f->type() == f_type, "inconsistent field type");
 446           break;
 447         }
 448         BasicType bt = f->type()->basic_type();
 449         if (bt == T_LONG || bt == T_DOUBLE) {
 450           extra++;
 451         }
 452       }
 453       n->init_req(base_input + j + extra, arg);
 454       edges++;
 455       BasicType bt = f_type->basic_type();
 456       if (bt == T_LONG || bt == T_DOUBLE) {
 457         n->init_req(base_input + j + extra + 1, kit.top());
 458         edges++;
 459       }
 460     }
 461   }
 462   return edges;
 463 }
 464 
 465 Node* ValueTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 466   if (!is_allocated(phase)) {
 467     // Check if this value type is loaded from memory
 468     Node* base = is_loaded(phase, type()->is_valuetype());
 469     if (base != NULL) {
 470       // Save the oop
 471       set_oop(base);
 472       assert(is_allocated(phase), "should now be allocated");
 473       return this;
 474     }
 475   }
 476 
 477   if (can_reshape) {
 478     PhaseIterGVN* igvn = phase->is_IterGVN();
 479     if (is_allocated(igvn)) {
 480       // Value type is heap allocated, search for safepoint uses
 481       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 482         Node* out = fast_out(i);
 483         if (out->is_SafePoint()) {
 484           // Let SafePointNode::Ideal() take care of re-wiring the
 485           // safepoint to the oop input instead of the value type node.
 486           igvn->rehash_node_delayed(out);
 487         }
 488       }
 489     }
 490   }
 491   return NULL;
 492 }
 493 
 494 // Search for multiple allocations of this value type
 495 // and try to replace them by dominating allocations.
 496 void ValueTypeNode::remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase) {
 497   assert(EliminateAllocations, "allocation elimination should be enabled");
 498   Node_List dead_allocations;
 499   // Search for allocations of this value type
 500   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 501     Node* out1 = fast_out(i);
 502     if (out1->is_Allocate() && out1->in(AllocateNode::ValueNode) == this) {
 503       AllocateNode* alloc = out1->as_Allocate();
 504       Node* res_dom = NULL;
 505       if (is_allocated(igvn)) {
 506         // The value type is already allocated but still connected to an AllocateNode.
 507         // This can happen with late inlining when we first allocate a value type argument
 508         // but later decide to inline the call with the callee code also allocating.
 509         res_dom = get_oop();
 510       } else {
 511         // Search for a dominating allocation of the same value type
 512         for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
 513           Node* out2 = fast_out(j);
 514           if (alloc != out2 && out2->is_Allocate() && out2->in(AllocateNode::ValueNode) == this &&
 515               phase->is_dominator(out2, alloc)) {
 516             AllocateNode* alloc_dom =  out2->as_Allocate();
 517             assert(alloc->in(AllocateNode::KlassNode) == alloc_dom->in(AllocateNode::KlassNode), "klasses should match");
 518             res_dom = alloc_dom->result_cast();
 519             break;
 520           }
 521         }
 522       }
 523       if (res_dom != NULL) {
 524         // Found a dominating allocation
 525         Node* res = alloc->result_cast();
 526         assert(res != NULL, "value type allocation should not be dead");
 527         // Move users to dominating allocation
 528         igvn->replace_node(res, res_dom);
 529         // The dominated allocation is now dead, remove the
 530         // value type node connection and adjust the iterator.
 531         dead_allocations.push(alloc);
 532         igvn->replace_input_of(alloc, AllocateNode::ValueNode, NULL);
 533         --i; --imax;
 534 #ifdef ASSERT
 535         if (PrintEliminateAllocations) {
 536           tty->print("++++ Eliminated: %d Allocate ", alloc->_idx);
 537           dump_spec(tty);
 538           tty->cr();
 539         }
 540 #endif
 541       }
 542     }
 543   }
 544 
 545   // Remove dead value type allocations by replacing the projection nodes
 546   for (uint i = 0; i < dead_allocations.size(); ++i) {
 547     CallProjections projs;
 548     AllocateNode* alloc = dead_allocations.at(i)->as_Allocate();
 549     alloc->extract_projections(&projs, true);
 550     // Use lazy_replace to avoid corrupting the dominator tree of PhaseIdealLoop
 551     phase->lazy_replace(projs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
 552     phase->lazy_replace(projs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
 553     phase->lazy_replace(projs.catchall_memproj, phase->C->top());
 554     phase->lazy_replace(projs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
 555     phase->lazy_replace(projs.catchall_ioproj, phase->C->top());
 556     phase->lazy_replace(projs.catchall_catchproj, phase->C->top());
 557     phase->lazy_replace(projs.resproj, phase->C->top());
 558   }
 559 }
 560 
 561 // When a call returns multiple values, it has several result
 562 // projections, one per field. Replacing the result of the call by a
 563 // value type node (after late inlining) requires that for each result
 564 // projection, we find the corresponding value type field.
 565 void ValueTypeNode::replace_call_results(Node* call, Compile* C) {
 566   ciValueKlass* vk = value_klass();
 567   for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
 568     ProjNode *pn = call->fast_out(i)->as_Proj();
 569     uint con = pn->_con;
 570     if (con >= TypeFunc::Parms+1) {
 571       uint field_nb = con - (TypeFunc::Parms+1);
 572       int extra = 0;
 573       for (uint j = 0; j < field_nb - extra; j++) {
 574         ciField* f = vk->nonstatic_field_at(j);
 575         BasicType bt = f->type()->basic_type();
 576         if (bt == T_LONG || bt == T_DOUBLE) {
 577           extra++;
 578         }
 579       }
 580       ciField* f = vk->nonstatic_field_at(field_nb - extra);
 581       Node* field = field_value_by_offset(f->offset(), true);
 582 
 583       C->gvn_replace_by(pn, field);
 584       C->initial_gvn()->hash_delete(pn);
 585       pn->set_req(0, C->top());
 586       --i; --imax;
 587     }
 588   }
 589 }
 590 
 591 
 592 #ifndef PRODUCT
 593 
 594 void ValueTypeNode::dump_spec(outputStream* st) const {
 595   TypeNode::dump_spec(st);
 596 }
 597 
 598 #endif