< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page

        

*** 35,44 **** --- 35,45 ---- #include "opto/memnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" + #include "opto/valuetypenode.hpp" #include "runtime/arguments.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/copy.hpp"
*** 99,112 **** //------------------------------ON STACK REPLACEMENT--------------------------- // Construct a node which can be used to get incoming state for // on stack replacement. ! Node *Parse::fetch_interpreter_state(int index, ! BasicType bt, ! Node *local_addrs, ! Node *local_addrs_base) { Node *mem = memory(Compile::AliasIdxRaw); Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); Node *ctl = control(); // Very similar to LoadNode::make, except we handle un-aligned longs and --- 100,119 ---- //------------------------------ON STACK REPLACEMENT--------------------------- // Construct a node which can be used to get incoming state for // on stack replacement. ! Node* Parse::fetch_interpreter_state(int index, ! const Type* type, ! Node* local_addrs, ! Node* local_addrs_base) { ! BasicType bt = type->basic_type(); ! if (type == TypePtr::NULL_PTR) { ! // Ptr types are mixed together with T_ADDRESS but NULL is ! // really for T_OBJECT types so correct it. ! bt = T_OBJECT; ! } Node *mem = memory(Compile::AliasIdxRaw); Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); Node *ctl = control(); // Very similar to LoadNode::make, except we handle un-aligned longs and
*** 114,123 **** --- 121,131 ---- Node *l = NULL; switch (bt) { // Signature is flattened case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break; + case T_VALUETYPE: case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_LONG: case T_DOUBLE: { // Since arguments are in reverse order, the argument address 'adr' // refers to the back half of the long/double. Recompute adr.
*** 144,155 **** // The type is the type predicted by ciTypeFlow. Note that it is // not a general type, but can only come from Type::get_typeflow_type. // The safepoint is a map which will feed an uncommon trap. Node* Parse::check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit) { - const TypeOopPtr* tp = type->isa_oopptr(); // TypeFlow may assert null-ness if a type appears unloaded. if (type == TypePtr::NULL_PTR || (tp != NULL && !tp->klass()->is_loaded())) { // Value must be null, not a real oop. --- 152,166 ---- // The type is the type predicted by ciTypeFlow. Note that it is // not a general type, but can only come from Type::get_typeflow_type. // The safepoint is a map which will feed an uncommon trap. Node* Parse::check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit) { const TypeOopPtr* tp = type->isa_oopptr(); + if (type->isa_valuetype() != NULL) { + // The interpreter passes value types as oops + tp = TypeOopPtr::make_from_klass(type->isa_valuetype()->value_klass()); + } // TypeFlow may assert null-ness if a type appears unloaded. if (type == TypePtr::NULL_PTR || (tp != NULL && !tp->klass()->is_loaded())) { // Value must be null, not a real oop.
*** 168,177 **** --- 179,194 ---- // toward more specific classes. Make sure these specific classes // are still in effect. if (tp != NULL && tp->klass() != C->env()->Object_klass()) { // TypeFlow asserted a specific object type. Value must have that type. Node* bad_type_ctrl = NULL; + if (tp->is_valuetypeptr()) { + // Check value types for null here to prevent checkcast from adding an + // exception state before the bytecode entry (use 'bad_type_ctrl' instead). + l = null_check_oop(l, &bad_type_ctrl); + bad_type_exit->control()->add_req(bad_type_ctrl); + } l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl); bad_type_exit->control()->add_req(bad_type_ctrl); } BasicType bt_l = _gvn.type(l)->basic_type();
*** 186,196 **** void Parse::load_interpreter_state(Node* osr_buf) { int index; int max_locals = jvms()->loc_size(); int max_stack = jvms()->stk_size(); - // Mismatch between method and jvms can occur since map briefly held // an OSR entry state (which takes up one RawPtr word). assert(max_locals == method()->max_locals(), "sanity"); assert(max_stack >= method()->max_stack(), "sanity"); assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity"); --- 203,212 ----
*** 224,241 **** Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); for (index = 0; index < mcnt; index++) { // Make a BoxLockNode for the monitor. Node *box = _gvn.transform(new BoxLockNode(next_monitor())); - // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. ! Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); // Try and copy the displaced header to the BoxNode ! Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); ! store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); // Build a bogus FastLockNode (no code will be generated) and push the // monitor into our debug info. --- 240,255 ---- Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); for (index = 0; index < mcnt; index++) { // Make a BoxLockNode for the monitor. Node *box = _gvn.transform(new BoxLockNode(next_monitor())); // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. ! Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf); // Try and copy the displaced header to the BoxNode ! Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf); store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); // Build a bogus FastLockNode (no code will be generated) and push the // monitor into our debug info.
*** 298,314 **** // makes it go dead. if (type == Type::BOTTOM) { continue; } // Construct code to access the appropriate local. ! BasicType bt = type->basic_type(); ! if (type == TypePtr::NULL_PTR) { ! // Ptr types are mixed together with T_ADDRESS but NULL is ! // really for T_OBJECT types so correct it. ! bt = T_OBJECT; ! } ! Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf); set_local(index, value); } // Extract the needed stack entries from the interpreter frame. for (index = 0; index < sp(); index++) { --- 312,322 ---- // makes it go dead. if (type == Type::BOTTOM) { continue; } // Construct code to access the appropriate local. ! Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf); set_local(index, value); } // Extract the needed stack entries from the interpreter frame. for (index = 0; index < sp(); index++) {
*** 593,602 **** --- 601,622 ---- if (log) log->done("parse"); C->set_default_node_notes(caller_nn); return; } + // Handle value type arguments + int arg_size_sig = tf()->domain_sig()->cnt(); + for (uint i = 0; i < (uint)arg_size_sig; i++) { + Node* parm = map()->in(i); + const Type* t = _gvn.type(parm); + if (t->is_valuetypeptr() && t->value_klass()->is_scalarizable() && !t->maybe_null()) { + // Create ValueTypeNode from the oop and replace the parameter + Node* vt = ValueTypeNode::make_from_oop(this, parm, t->value_klass()); + map()->replace_edge(parm, vt); + } + } + entry_map = map(); // capture any changes performed by method setup code assert(jvms()->endoff() == map()->req(), "map matches JVMS layout"); // We begin parsing as if we have just encountered a jump to the // method entry.
*** 775,786 **** gvn().set_type_bottom(memphi); _exits.set_i_o(iophi); _exits.set_all_memory(memphi); // Add a return value to the exit state. (Do not push it yet.) ! if (tf()->range()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); if (ret_type->isa_int()) { BasicType ret_bt = method()->return_type()->basic_type(); if (ret_bt == T_BOOLEAN || ret_bt == T_CHAR || ret_bt == T_BYTE || --- 795,806 ---- gvn().set_type_bottom(memphi); _exits.set_i_o(iophi); _exits.set_all_memory(memphi); // Add a return value to the exit state. (Do not push it yet.) ! if (tf()->range_sig()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); if (ret_type->isa_int()) { BasicType ret_bt = method()->return_type()->basic_type(); if (ret_bt == T_BOOLEAN || ret_bt == T_CHAR || ret_bt == T_BYTE ||
*** 794,823 **** // types will not join when we transform and push in do_exits(). const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); ! assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it // Note: ret_phi is not yet pushed, until do_exits. } } - //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { ! int arg_size = tf->domain()->cnt(); ! int max_size = MAX2(arg_size, (int)tf->range()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); SafePointNode* map = new SafePointNode(max_size, NULL); record_for_igvn(map); assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); if (old_nn != NULL && has_method()) { Node_Notes* entry_nn = old_nn->clone(this); --- 814,849 ---- // types will not join when we transform and push in do_exits(). const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } + if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && + ret_type->is_valuetypeptr() && ret_type->value_klass()->is_scalarizable() && !ret_type->maybe_null()) { + // Scalarize value type return when inlining or with multiple return values + ret_type = TypeValueType::make(ret_type->value_klass()); + } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); ! assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it // Note: ret_phi is not yet pushed, until do_exits. } } //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { ! int arg_size = tf->domain_sig()->cnt(); ! int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); SafePointNode* map = new SafePointNode(max_size, NULL); + map->set_jvms(jvms); + jvms->set_map(map); record_for_igvn(map); assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); if (old_nn != NULL && has_method()) { Node_Notes* entry_nn = old_nn->clone(this);
*** 825,848 **** entry_jvms->set_offsets(0); entry_jvms->set_bci(entry_bci()); entry_nn->set_jvms(entry_jvms); set_default_node_notes(entry_nn); } ! uint i; ! for (i = 0; i < (uint)arg_size; i++) { ! Node* parm = initial_gvn()->transform(new ParmNode(start, i)); map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); } ! for (; i < map->req(); i++) { ! map->init_req(i, top()); } assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here"); set_default_node_notes(old_nn); - map->set_jvms(jvms); - jvms->set_map(map); return jvms; } //-----------------------------make_node_notes--------------------------------- Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) { --- 851,894 ---- entry_jvms->set_offsets(0); entry_jvms->set_bci(entry_bci()); entry_nn->set_jvms(entry_jvms); set_default_node_notes(entry_nn); } ! PhaseGVN& gvn = *initial_gvn(); ! uint j = 0; ! ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); ! for (uint i = 0; i < (uint)arg_size; i++) { ! const Type* t = tf->domain_sig()->field_at(i); ! Node* parm = NULL; ! if (has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { ! // Value type arguments are not passed by reference: we get an argument per ! // field of the value type. Build ValueTypeNodes from the value type arguments. ! GraphKit kit(jvms, &gvn); ! kit.set_control(map->control()); ! Node* old_mem = map->memory(); ! // Use immutable memory for value type loads and restore it below ! // TODO make sure value types are always loaded from immutable memory ! kit.set_all_memory(C->immutable_memory()); ! parm = ValueTypeNode::make_from_multi(&kit, start, sig_cc, t->value_klass(), j, true); ! map->set_control(kit.control()); ! map->set_memory(old_mem); ! } else { ! parm = gvn.transform(new ParmNode(start, j++)); ! BasicType bt = t->basic_type(); ! while (i >= TypeFunc::Parms && SigEntry::next_is_reserved(sig_cc, bt, true)) { ! j += type2size[bt]; // Skip reserved arguments ! } ! } map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); } ! for (; j < map->req(); j++) { ! map->init_req(j, top()); } assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here"); set_default_node_notes(old_nn); return jvms; } //-----------------------------make_node_notes--------------------------------- Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
*** 865,881 **** kit.i_o(), kit.reset_memory(), kit.frameptr(), kit.returnadr()); // Add zero or 1 return values ! int ret_size = tf()->range()->cnt() - TypeFunc::Parms; if (ret_size > 0) { kit.inc_sp(-ret_size); // pop the return value(s) kit.sync_jvms(); ! ret->add_req(kit.argument(0)); // Note: The second dummy edge is not needed by a ReturnNode. } // bind it to root root()->add_req(ret); record_for_igvn(ret); initial_gvn()->transform_no_reclaim(ret); } --- 911,947 ---- kit.i_o(), kit.reset_memory(), kit.frameptr(), kit.returnadr()); // Add zero or 1 return values ! int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms; if (ret_size > 0) { kit.inc_sp(-ret_size); // pop the return value(s) kit.sync_jvms(); ! Node* res = kit.argument(0); ! if (tf()->returns_value_type_as_fields()) { ! // Multiple return values (value type fields): add as many edges ! // to the Return node as returned values. ! assert(res->is_ValueType(), "what else supports multi value return?"); ! ValueTypeNode* vt = res->as_ValueType(); ! ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); ! if (vt->is_allocated(&kit.gvn()) && !StressValueTypeReturnedAsFields) { ! ret->init_req(TypeFunc::Parms, vt->get_oop()); ! } else { ! ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn())); ! } ! const Array<SigEntry>* sig_array = vt->type()->is_valuetype()->value_klass()->extended_sig(); ! GrowableArray<SigEntry> sig = GrowableArray<SigEntry>(sig_array->length()); ! sig.appendAll(sig_array); ! ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter()); ! uint idx = TypeFunc::Parms+1; ! vt->pass_fields(&kit, ret, sig_cc, idx); ! } else { ! ret->add_req(res); // Note: The second dummy edge is not needed by a ReturnNode. } + } // bind it to root root()->add_req(ret); record_for_igvn(ret); initial_gvn()->transform_no_reclaim(ret); }
*** 1027,1038 **** for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { // transform each slice of the original memphi: mms.set_memory(_gvn.transform(mms.memory())); } ! if (tf()->range()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); Node* ret_phi = _gvn.transform( _exits.argument(0) ); if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) { // In case of concurrent class loading, the type we set for the // ret_phi in build_exits() may have been too optimistic and the // ret_phi may be top now. --- 1093,1104 ---- for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { // transform each slice of the original memphi: mms.set_memory(_gvn.transform(mms.memory())); } ! if (tf()->range_sig()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); Node* ret_phi = _gvn.transform( _exits.argument(0) ); if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) { // In case of concurrent class loading, the type we set for the // ret_phi in build_exits() may have been too optimistic and the // ret_phi may be top now.
*** 1168,1178 **** set_all_memory(reset_memory()); } assert(merged_memory(), ""); // Now add the locals which are initially bound to arguments: ! uint arg_size = tf()->domain()->cnt(); ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args for (i = TypeFunc::Parms; i < arg_size; i++) { map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms)); } --- 1234,1244 ---- set_all_memory(reset_memory()); } assert(merged_memory(), ""); // Now add the locals which are initially bound to arguments: ! uint arg_size = tf()->domain_sig()->cnt(); ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args for (i = TypeFunc::Parms; i < arg_size; i++) { map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms)); }
*** 1627,1636 **** --- 1693,1735 ---- // Zap extra stack slots to top assert(sp() == target->start_sp(), ""); clean_stack(sp()); + // Check for merge conflicts involving value types + JVMState* old_jvms = map()->jvms(); + int old_bci = bci(); + JVMState* tmp_jvms = old_jvms->clone_shallow(C); + tmp_jvms->set_should_reexecute(true); + map()->set_jvms(tmp_jvms); + // Execution needs to restart a the next bytecode (entry of next + // block) + if (target->is_merged() || + pnum > PhiNode::Input || + target->is_handler() || + target->is_loop_head()) { + set_parse_bci(target->start()); + for (uint j = TypeFunc::Parms; j < map()->req(); j++) { + Node* n = map()->in(j); // Incoming change to target state. + const Type* t = NULL; + if (tmp_jvms->is_loc(j)) { + t = target->local_type_at(j - tmp_jvms->locoff()); + } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) { + t = target->stack_type_at(j - tmp_jvms->stkoff()); + } + if (t != NULL && t != Type::BOTTOM) { + if (n->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in src block to be able to merge it with oop in target block + map()->set_req(j, ValueTypePtrNode::make_from_value_type(this, n->as_ValueType(), true)); + } + assert(!t->isa_valuetype() || n->is_ValueType(), "inconsistent typeflow info"); + } + } + } + map()->set_jvms(old_jvms); + set_parse_bci(old_bci); + if (!target->is_merged()) { // No prior mapping at this bci if (TraceOptoParse) { tty->print(" with empty state"); } // If this path is dead, do not bother capturing it as a merge. // It is "as if" we had 1 fewer predecessors from the beginning.
*** 1680,1689 **** --- 1779,1789 ---- #ifdef ASSERT if (target->is_SEL_head()) { target->mark_merged_backedge(block()); } #endif + // We must not manufacture more phis if the target is already parsed. bool nophi = target->is_parsed(); SafePointNode* newin = map();// Hang on to incoming mapping Block* save_block = block(); // Hang on to incoming block;
*** 1715,1732 **** } // Update all the non-control inputs to map: assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); bool check_elide_phi = target->is_SEL_backedge(save_block); for (uint j = 1; j < newin->req(); j++) { Node* m = map()->in(j); // Current state of target. Node* n = newin->in(j); // Incoming change to target state. PhiNode* phi; ! if (m->is_Phi() && m->as_Phi()->region() == r) phi = m->as_Phi(); ! else phi = NULL; if (m != n) { // Different; must merge switch (j) { // Frame pointer and Return Address never changes case TypeFunc::FramePtr:// Drop m, use the original value case TypeFunc::ReturnAdr: --- 1815,1836 ---- } // Update all the non-control inputs to map: assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); bool check_elide_phi = target->is_SEL_backedge(save_block); + bool last_merge = (pnum == PhiNode::Input); for (uint j = 1; j < newin->req(); j++) { Node* m = map()->in(j); // Current state of target. Node* n = newin->in(j); // Incoming change to target state. PhiNode* phi; ! if (m->is_Phi() && m->as_Phi()->region() == r) { phi = m->as_Phi(); ! } else if (m->is_ValueType() && m->as_ValueType()->has_phi_inputs(r)){ ! phi = m->as_ValueType()->get_oop()->as_Phi(); ! } else { phi = NULL; + } if (m != n) { // Different; must merge switch (j) { // Frame pointer and Return Address never changes case TypeFunc::FramePtr:// Drop m, use the original value case TypeFunc::ReturnAdr:
*** 1756,1770 **** // At this point, n might be top if: // - there is no phi (because TypeFlow detected a conflict), or // - the corresponding control edges is top (a dead incoming path) // It is a bug if we create a phi which sees a garbage value on a live path. ! if (phi != NULL) { assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); assert(phi->region() == r, ""); phi->set_req(pnum, n); // Then add 'n' to the merge ! if (pnum == PhiNode::Input) { // Last merge for this Phi. // So far, Phis have had a reasonable type from ciTypeFlow. // Now _gvn will join that with the meet of current inputs. // BOTTOM is never permissible here, 'cause pessimistically // Phis of pointers cannot lose the basic pointer type. --- 1860,1897 ---- // At this point, n might be top if: // - there is no phi (because TypeFlow detected a conflict), or // - the corresponding control edges is top (a dead incoming path) // It is a bug if we create a phi which sees a garbage value on a live path. ! // Merging two value types? ! if (phi != NULL && n->is_ValueType()) { ! // Reload current state because it may have been updated by ensure_phi ! m = map()->in(j); ! ValueTypeNode* vtm = m->as_ValueType(); // Current value type ! ValueTypeNode* vtn = n->as_ValueType(); // Incoming value type ! assert(vtm->get_oop() == phi, "Value type should have Phi input"); ! if (TraceOptoParse) { ! #ifdef ASSERT ! tty->print_cr("\nMerging value types"); ! tty->print_cr("Current:"); ! vtm->dump(2); ! tty->print_cr("Incoming:"); ! vtn->dump(2); ! tty->cr(); ! #endif ! } ! // Do the merge ! vtm->merge_with(&_gvn, vtn, pnum, last_merge); ! if (last_merge) { ! map()->set_req(j, _gvn.transform_no_reclaim(vtm)); ! record_for_igvn(vtm); ! } ! } else if (phi != NULL) { assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); assert(phi->region() == r, ""); phi->set_req(pnum, n); // Then add 'n' to the merge ! if (last_merge) { // Last merge for this Phi. // So far, Phis have had a reasonable type from ciTypeFlow. // Now _gvn will join that with the meet of current inputs. // BOTTOM is never permissible here, 'cause pessimistically // Phis of pointers cannot lose the basic pointer type.
*** 1776,1787 **** record_for_igvn(phi); } } } // End of for all values to be merged ! if (pnum == PhiNode::Input && ! !r->in(0)) { // The occasional useless Region assert(control() == r, ""); set_control(r->nonnull_req()); } map()->merge_replaced_nodes_with(newin); --- 1903,1913 ---- record_for_igvn(phi); } } } // End of for all values to be merged ! if (last_merge && !r->in(0)) { // The occasional useless Region assert(control() == r, ""); set_control(r->nonnull_req()); } map()->merge_replaced_nodes_with(newin);
*** 1929,1938 **** --- 2055,2066 ---- } } else { if (n->is_Phi() && n->as_Phi()->region() == r) { assert(n->req() == pnum, "must be same size as region"); n->add_req(NULL); + } else if (n->is_ValueType() && n->as_ValueType()->has_phi_inputs(r)) { + n->as_ValueType()->add_new_path(r); } } } return pnum;
*** 1951,1960 **** --- 2079,2092 ---- if (o == top()) return NULL; // TOP always merges into TOP if (o->is_Phi() && o->as_Phi()->region() == region) { return o->as_Phi(); } + ValueTypeBaseNode* vt = o->isa_ValueType(); + if (vt != NULL && vt->has_phi_inputs(region)) { + return vt->get_oop()->as_Phi(); + } // Now use a Phi here for merging assert(!nocreate, "Cannot build a phi for a block already parsed."); const JVMState* jvms = map->jvms(); const Type* t = NULL;
*** 1970,1981 **** } else { assert(false, "no type information for this phi"); } // If the type falls to bottom, then this must be a local that ! // is mixing ints and oops or some such. Forcing it to top ! // makes it go dead. if (t == Type::BOTTOM) { map->set_req(idx, top()); return NULL; } --- 2102,2113 ---- } else { assert(false, "no type information for this phi"); } // If the type falls to bottom, then this must be a local that ! // is already dead or is mixing ints and oops or some such. ! // Forcing it to top makes it go dead. if (t == Type::BOTTOM) { map->set_req(idx, top()); return NULL; }
*** 1984,1998 **** --- 2116,2139 ---- if (t == Type::TOP || t == Type::HALF) { map->set_req(idx, top()); return NULL; } + if (vt != NULL) { + // Value types are merged by merging their field values. + // Create a cloned ValueTypeNode with phi inputs that + // represents the merged value type and update the map. + vt = vt->clone_with_phis(&_gvn, region); + map->set_req(idx, vt); + return vt->get_oop()->as_Phi(); + } else { PhiNode* phi = PhiNode::make(region, o, t); gvn().set_type(phi, t); if (C->do_escape_analysis()) record_for_igvn(phi); map->set_req(idx, phi); return phi; + } } //--------------------------ensure_memory_phi---------------------------------- // Turn the idx'th slice of the current memory into a Phi PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
*** 2168,2231 **** method()->intrinsic_id() == vmIntrinsics::_Object_init) { call_register_finalizer(); } // Do not set_parse_bci, so that return goo is credited to the return insn. ! set_bci(InvocationEntryBci); if (method()->is_synchronized() && GenerateSynchronizationCode) { shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); } if (C->env()->dtrace_method_probes()) { make_dtrace_method_exit(method()); } - SafePointNode* exit_return = _exits.map(); - exit_return->in( TypeFunc::Control )->add_req( control() ); - exit_return->in( TypeFunc::I_O )->add_req( i_o () ); - Node *mem = exit_return->in( TypeFunc::Memory ); - for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { - if (mms.is_empty()) { - // get a copy of the base memory, and patch just this one input - const TypePtr* adr_type = mms.adr_type(C); - Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); - assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); - gvn().set_type_bottom(phi); - phi->del_req(phi->req()-1); // prepare to re-patch - mms.set_memory(phi); - } - mms.memory()->add_req(mms.memory2()); - } - // frame pointer is always same, already captured if (value != NULL) { - // If returning oops to an interface-return, there is a silent free - // cast from oop to interface allowed by the Verifier. Make it explicit - // here. Node* phi = _exits.argument(0); ! const TypeInstPtr *tr = phi->bottom_type()->isa_instptr(); ! if (tr && tr->klass()->is_loaded() && ! tr->klass()->is_interface()) { ! const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); ! if (tp && tp->klass()->is_loaded() && ! !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking ! if (tp->higher_equal(TypeInstPtr::NOTNULL)) tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); value = _gvn.transform(new CheckCastPPNode(0, value, tr)); } } else { ! // Also handle returns of oop-arrays to an arrays-of-interface return const TypeInstPtr* phi_tip; const TypeInstPtr* val_tip; ! Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip); if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() && val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) { ! value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type())); } } phi->add_req(value); } if (_first_return) { _exits.map()->transfer_replaced_nodes_from(map(), _new_idx); _first_return = false; } else { _exits.map()->merge_replaced_nodes_with(map()); --- 2309,2389 ---- method()->intrinsic_id() == vmIntrinsics::_Object_init) { call_register_finalizer(); } // Do not set_parse_bci, so that return goo is credited to the return insn. ! // vreturn can trigger an allocation so vreturn can throw. Setting ! // the bci here breaks exception handling. Commenting this out ! // doesn't seem to break anything. ! // set_bci(InvocationEntryBci); if (method()->is_synchronized() && GenerateSynchronizationCode) { shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); } if (C->env()->dtrace_method_probes()) { make_dtrace_method_exit(method()); } // frame pointer is always same, already captured if (value != NULL) { Node* phi = _exits.argument(0); ! const Type* return_type = phi->bottom_type(); ! const TypeOopPtr* tr = return_type->isa_oopptr(); ! if (return_type->isa_valuetype()) { ! // Value type is returned as fields, make sure it is scalarized ! if (!value->is_ValueType()) { ! value = ValueTypeNode::make_from_oop(this, value, return_type->is_valuetype()->value_klass()); ! } ! if (!_caller->has_method()) { ! // Value type is returned as fields from root method, make ! // sure all non-flattened value type fields are allocated. ! assert(tf()->returns_value_type_as_fields(), "must be returned as fields"); ! value = value->as_ValueType()->allocate_fields(this); ! } ! } else if (value->is_ValueType()) { ! // Value type is returned as oop, make sure it is allocated ! assert(tr && tr->can_be_value_type(), "must return a value type pointer"); ! value = ValueTypePtrNode::make_from_value_type(this, value->as_ValueType()); ! } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) { ! // If returning oops to an interface-return, there is a silent free ! // cast from oop to interface allowed by the Verifier. Make it explicit here. ! const TypeInstPtr* tp = value->bottom_type()->isa_instptr(); ! if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking ! if (tp->higher_equal(TypeInstPtr::NOTNULL)) { tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); + } value = _gvn.transform(new CheckCastPPNode(0, value, tr)); } } else { ! // Handle returns of oop-arrays to an arrays-of-interface return const TypeInstPtr* phi_tip; const TypeInstPtr* val_tip; ! Type::get_arrays_base_elements(return_type, value->bottom_type(), &phi_tip, &val_tip); if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() && val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) { ! value = _gvn.transform(new CheckCastPPNode(0, value, return_type)); } } phi->add_req(value); } + SafePointNode* exit_return = _exits.map(); + exit_return->in( TypeFunc::Control )->add_req( control() ); + exit_return->in( TypeFunc::I_O )->add_req( i_o () ); + Node *mem = exit_return->in( TypeFunc::Memory ); + for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { + if (mms.is_empty()) { + // get a copy of the base memory, and patch just this one input + const TypePtr* adr_type = mms.adr_type(C); + Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); + assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); + gvn().set_type_bottom(phi); + phi->del_req(phi->req()-1); // prepare to re-patch + mms.set_memory(phi); + } + mms.memory()->add_req(mms.memory2()); + } + if (_first_return) { _exits.map()->transfer_replaced_nodes_from(map(), _new_idx); _first_return = false; } else { _exits.map()->merge_replaced_nodes_with(map());
< prev index next >