--- old/src/hotspot/share/opto/parse1.cpp 2019-03-11 14:26:54.150354492 +0100 +++ new/src/hotspot/share/opto/parse1.cpp 2019-03-11 14:26:53.938354494 +0100 @@ -37,6 +37,7 @@ #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" +#include "opto/valuetypenode.hpp" #include "runtime/arguments.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepointMechanism.hpp" @@ -101,10 +102,16 @@ // Construct a node which can be used to get incoming state for // on stack replacement. -Node *Parse::fetch_interpreter_state(int index, - BasicType bt, - Node *local_addrs, - Node *local_addrs_base) { +Node* Parse::fetch_interpreter_state(int index, + const Type* type, + Node* local_addrs, + Node* local_addrs_base) { + BasicType bt = type->basic_type(); + if (type == TypePtr::NULL_PTR) { + // Ptr types are mixed together with T_ADDRESS but NULL is + // really for T_OBJECT types so correct it. + bt = T_OBJECT; + } Node *mem = memory(Compile::AliasIdxRaw); Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); Node *ctl = control(); @@ -116,6 +123,7 @@ case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break; + case T_VALUETYPE: case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_LONG: case T_DOUBLE: { @@ -146,8 +154,11 @@ // The safepoint is a map which will feed an uncommon trap. Node* Parse::check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit) { - const TypeOopPtr* tp = type->isa_oopptr(); + if (type->isa_valuetype() != NULL) { + // The interpreter passes value types as oops + tp = TypeOopPtr::make_from_klass(type->isa_valuetype()->value_klass()); + } // TypeFlow may assert null-ness if a type appears unloaded. if (type == TypePtr::NULL_PTR || @@ -170,6 +181,12 @@ if (tp != NULL && tp->klass() != C->env()->Object_klass()) { // TypeFlow asserted a specific object type. Value must have that type. Node* bad_type_ctrl = NULL; + if (tp->is_valuetypeptr()) { + // Check value types for null here to prevent checkcast from adding an + // exception state before the bytecode entry (use 'bad_type_ctrl' instead). + l = null_check_oop(l, &bad_type_ctrl); + bad_type_exit->control()->add_req(bad_type_ctrl); + } l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl); bad_type_exit->control()->add_req(bad_type_ctrl); } @@ -188,7 +205,6 @@ int max_locals = jvms()->loc_size(); int max_stack = jvms()->stk_size(); - // Mismatch between method and jvms can occur since map briefly held // an OSR entry state (which takes up one RawPtr word). assert(max_locals == method()->max_locals(), "sanity"); @@ -226,14 +242,12 @@ // Make a BoxLockNode for the monitor. Node *box = _gvn.transform(new BoxLockNode(next_monitor())); - // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. - Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); + Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf); // Try and copy the displaced header to the BoxNode - Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); - + Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf); store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); @@ -300,13 +314,7 @@ continue; } // Construct code to access the appropriate local. - BasicType bt = type->basic_type(); - if (type == TypePtr::NULL_PTR) { - // Ptr types are mixed together with T_ADDRESS but NULL is - // really for T_OBJECT types so correct it. - bt = T_OBJECT; - } - Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf); + Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf); set_local(index, value); } @@ -595,6 +603,18 @@ return; } + // Handle value type arguments + int arg_size_sig = tf()->domain_sig()->cnt(); + for (uint i = 0; i < (uint)arg_size_sig; i++) { + Node* parm = map()->in(i); + const Type* t = _gvn.type(parm); + if (t->is_valuetypeptr() && t->value_klass()->is_scalarizable() && !t->maybe_null()) { + // Create ValueTypeNode from the oop and replace the parameter + Node* vt = ValueTypeNode::make_from_oop(this, parm, t->value_klass()); + map()->replace_edge(parm, vt); + } + } + entry_map = map(); // capture any changes performed by method setup code assert(jvms()->endoff() == map()->req(), "map matches JVMS layout"); @@ -777,8 +797,8 @@ _exits.set_all_memory(memphi); // Add a return value to the exit state. (Do not push it yet.) - if (tf()->range()->cnt() > TypeFunc::Parms) { - const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); + if (tf()->range_sig()->cnt() > TypeFunc::Parms) { + const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); if (ret_type->isa_int()) { BasicType ret_bt = method()->return_type()->basic_type(); if (ret_bt == T_BOOLEAN || @@ -796,26 +816,32 @@ if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } + if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && + ret_type->is_valuetypeptr() && ret_type->value_klass()->is_scalarizable() && !ret_type->maybe_null()) { + // Scalarize value type return when inlining or with multiple return values + ret_type = TypeValueType::make(ret_type->value_klass()); + } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); - assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); + assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it // Note: ret_phi is not yet pushed, until do_exits. } } - //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { - int arg_size = tf->domain()->cnt(); - int max_size = MAX2(arg_size, (int)tf->range()->cnt()); + int arg_size = tf->domain_sig()->cnt(); + int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); SafePointNode* map = new SafePointNode(max_size, NULL); + map->set_jvms(jvms); + jvms->set_map(map); record_for_igvn(map); assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); @@ -827,20 +853,40 @@ entry_nn->set_jvms(entry_jvms); set_default_node_notes(entry_nn); } - uint i; - for (i = 0; i < (uint)arg_size; i++) { - Node* parm = initial_gvn()->transform(new ParmNode(start, i)); + PhaseGVN& gvn = *initial_gvn(); + uint j = 0; + ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); + for (uint i = 0; i < (uint)arg_size; i++) { + const Type* t = tf->domain_sig()->field_at(i); + Node* parm = NULL; + if (has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { + // Value type arguments are not passed by reference: we get an argument per + // field of the value type. Build ValueTypeNodes from the value type arguments. + GraphKit kit(jvms, &gvn); + kit.set_control(map->control()); + Node* old_mem = map->memory(); + // Use immutable memory for value type loads and restore it below + // TODO make sure value types are always loaded from immutable memory + kit.set_all_memory(C->immutable_memory()); + parm = ValueTypeNode::make_from_multi(&kit, start, sig_cc, t->value_klass(), j, true); + map->set_control(kit.control()); + map->set_memory(old_mem); + } else { + parm = gvn.transform(new ParmNode(start, j++)); + BasicType bt = t->basic_type(); + while (i >= TypeFunc::Parms && SigEntry::next_is_reserved(sig_cc, bt, true)) { + j += type2size[bt]; // Skip reserved arguments + } + } map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); } - for (; i < map->req(); i++) { - map->init_req(i, top()); + for (; j < map->req(); j++) { + map->init_req(j, top()); } assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here"); set_default_node_notes(old_nn); - map->set_jvms(jvms); - jvms->set_map(map); return jvms; } @@ -867,12 +913,32 @@ kit.frameptr(), kit.returnadr()); // Add zero or 1 return values - int ret_size = tf()->range()->cnt() - TypeFunc::Parms; + int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms; if (ret_size > 0) { kit.inc_sp(-ret_size); // pop the return value(s) kit.sync_jvms(); - ret->add_req(kit.argument(0)); - // Note: The second dummy edge is not needed by a ReturnNode. + Node* res = kit.argument(0); + if (tf()->returns_value_type_as_fields()) { + // Multiple return values (value type fields): add as many edges + // to the Return node as returned values. + assert(res->is_ValueType(), "what else supports multi value return?"); + ValueTypeNode* vt = res->as_ValueType(); + ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); + if (vt->is_allocated(&kit.gvn()) && !StressValueTypeReturnedAsFields) { + ret->init_req(TypeFunc::Parms, vt->get_oop()); + } else { + ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn())); + } + const Array* sig_array = vt->type()->is_valuetype()->value_klass()->extended_sig(); + GrowableArray sig = GrowableArray(sig_array->length()); + sig.appendAll(sig_array); + ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter()); + uint idx = TypeFunc::Parms+1; + vt->pass_fields(&kit, ret, sig_cc, idx); + } else { + ret->add_req(res); + // Note: The second dummy edge is not needed by a ReturnNode. + } } // bind it to root root()->add_req(ret); @@ -1029,8 +1095,8 @@ mms.set_memory(_gvn.transform(mms.memory())); } - if (tf()->range()->cnt() > TypeFunc::Parms) { - const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); + if (tf()->range_sig()->cnt() > TypeFunc::Parms) { + const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); Node* ret_phi = _gvn.transform( _exits.argument(0) ); if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) { // In case of concurrent class loading, the type we set for the @@ -1170,7 +1236,7 @@ assert(merged_memory(), ""); // Now add the locals which are initially bound to arguments: - uint arg_size = tf()->domain()->cnt(); + uint arg_size = tf()->domain_sig()->cnt(); ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args for (i = TypeFunc::Parms; i < arg_size; i++) { map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms)); @@ -1629,6 +1695,39 @@ assert(sp() == target->start_sp(), ""); clean_stack(sp()); + // Check for merge conflicts involving value types + JVMState* old_jvms = map()->jvms(); + int old_bci = bci(); + JVMState* tmp_jvms = old_jvms->clone_shallow(C); + tmp_jvms->set_should_reexecute(true); + map()->set_jvms(tmp_jvms); + // Execution needs to restart a the next bytecode (entry of next + // block) + if (target->is_merged() || + pnum > PhiNode::Input || + target->is_handler() || + target->is_loop_head()) { + set_parse_bci(target->start()); + for (uint j = TypeFunc::Parms; j < map()->req(); j++) { + Node* n = map()->in(j); // Incoming change to target state. + const Type* t = NULL; + if (tmp_jvms->is_loc(j)) { + t = target->local_type_at(j - tmp_jvms->locoff()); + } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) { + t = target->stack_type_at(j - tmp_jvms->stkoff()); + } + if (t != NULL && t != Type::BOTTOM) { + if (n->is_ValueType() && !t->isa_valuetype()) { + // Allocate value type in src block to be able to merge it with oop in target block + map()->set_req(j, ValueTypePtrNode::make_from_value_type(this, n->as_ValueType(), true)); + } + assert(!t->isa_valuetype() || n->is_ValueType(), "inconsistent typeflow info"); + } + } + } + map()->set_jvms(old_jvms); + set_parse_bci(old_bci); + if (!target->is_merged()) { // No prior mapping at this bci if (TraceOptoParse) { tty->print(" with empty state"); } @@ -1682,6 +1781,7 @@ target->mark_merged_backedge(block()); } #endif + // We must not manufacture more phis if the target is already parsed. bool nophi = target->is_parsed(); @@ -1717,14 +1817,18 @@ // Update all the non-control inputs to map: assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); bool check_elide_phi = target->is_SEL_backedge(save_block); + bool last_merge = (pnum == PhiNode::Input); for (uint j = 1; j < newin->req(); j++) { Node* m = map()->in(j); // Current state of target. Node* n = newin->in(j); // Incoming change to target state. PhiNode* phi; - if (m->is_Phi() && m->as_Phi()->region() == r) + if (m->is_Phi() && m->as_Phi()->region() == r) { phi = m->as_Phi(); - else + } else if (m->is_ValueType() && m->as_ValueType()->has_phi_inputs(r)){ + phi = m->as_ValueType()->get_oop()->as_Phi(); + } else { phi = NULL; + } if (m != n) { // Different; must merge switch (j) { // Frame pointer and Return Address never changes @@ -1758,11 +1862,34 @@ // - the corresponding control edges is top (a dead incoming path) // It is a bug if we create a phi which sees a garbage value on a live path. - if (phi != NULL) { + // Merging two value types? + if (phi != NULL && n->is_ValueType()) { + // Reload current state because it may have been updated by ensure_phi + m = map()->in(j); + ValueTypeNode* vtm = m->as_ValueType(); // Current value type + ValueTypeNode* vtn = n->as_ValueType(); // Incoming value type + assert(vtm->get_oop() == phi, "Value type should have Phi input"); + if (TraceOptoParse) { +#ifdef ASSERT + tty->print_cr("\nMerging value types"); + tty->print_cr("Current:"); + vtm->dump(2); + tty->print_cr("Incoming:"); + vtn->dump(2); + tty->cr(); +#endif + } + // Do the merge + vtm->merge_with(&_gvn, vtn, pnum, last_merge); + if (last_merge) { + map()->set_req(j, _gvn.transform_no_reclaim(vtm)); + record_for_igvn(vtm); + } + } else if (phi != NULL) { assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); assert(phi->region() == r, ""); phi->set_req(pnum, n); // Then add 'n' to the merge - if (pnum == PhiNode::Input) { + if (last_merge) { // Last merge for this Phi. // So far, Phis have had a reasonable type from ciTypeFlow. // Now _gvn will join that with the meet of current inputs. @@ -1778,8 +1905,7 @@ } } // End of for all values to be merged - if (pnum == PhiNode::Input && - !r->in(0)) { // The occasional useless Region + if (last_merge && !r->in(0)) { // The occasional useless Region assert(control() == r, ""); set_control(r->nonnull_req()); } @@ -1931,6 +2057,8 @@ if (n->is_Phi() && n->as_Phi()->region() == r) { assert(n->req() == pnum, "must be same size as region"); n->add_req(NULL); + } else if (n->is_ValueType() && n->as_ValueType()->has_phi_inputs(r)) { + n->as_ValueType()->add_new_path(r); } } } @@ -1953,6 +2081,10 @@ if (o->is_Phi() && o->as_Phi()->region() == region) { return o->as_Phi(); } + ValueTypeBaseNode* vt = o->isa_ValueType(); + if (vt != NULL && vt->has_phi_inputs(region)) { + return vt->get_oop()->as_Phi(); + } // Now use a Phi here for merging assert(!nocreate, "Cannot build a phi for a block already parsed."); @@ -1972,8 +2104,8 @@ } // If the type falls to bottom, then this must be a local that - // is mixing ints and oops or some such. Forcing it to top - // makes it go dead. + // is already dead or is mixing ints and oops or some such. + // Forcing it to top makes it go dead. if (t == Type::BOTTOM) { map->set_req(idx, top()); return NULL; @@ -1986,11 +2118,20 @@ return NULL; } - PhiNode* phi = PhiNode::make(region, o, t); - gvn().set_type(phi, t); - if (C->do_escape_analysis()) record_for_igvn(phi); - map->set_req(idx, phi); - return phi; + if (vt != NULL) { + // Value types are merged by merging their field values. + // Create a cloned ValueTypeNode with phi inputs that + // represents the merged value type and update the map. + vt = vt->clone_with_phis(&_gvn, region); + map->set_req(idx, vt); + return vt->get_oop()->as_Phi(); + } else { + PhiNode* phi = PhiNode::make(region, o, t); + gvn().set_type(phi, t); + if (C->do_escape_analysis()) record_for_igvn(phi); + map->set_req(idx, phi); + return phi; + } } //--------------------------ensure_memory_phi---------------------------------- @@ -2170,60 +2311,77 @@ } // Do not set_parse_bci, so that return goo is credited to the return insn. - set_bci(InvocationEntryBci); + // vreturn can trigger an allocation so vreturn can throw. Setting + // the bci here breaks exception handling. Commenting this out + // doesn't seem to break anything. + // set_bci(InvocationEntryBci); if (method()->is_synchronized() && GenerateSynchronizationCode) { shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); } if (C->env()->dtrace_method_probes()) { make_dtrace_method_exit(method()); } - SafePointNode* exit_return = _exits.map(); - exit_return->in( TypeFunc::Control )->add_req( control() ); - exit_return->in( TypeFunc::I_O )->add_req( i_o () ); - Node *mem = exit_return->in( TypeFunc::Memory ); - for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { - if (mms.is_empty()) { - // get a copy of the base memory, and patch just this one input - const TypePtr* adr_type = mms.adr_type(C); - Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); - assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); - gvn().set_type_bottom(phi); - phi->del_req(phi->req()-1); // prepare to re-patch - mms.set_memory(phi); - } - mms.memory()->add_req(mms.memory2()); - } - // frame pointer is always same, already captured if (value != NULL) { - // If returning oops to an interface-return, there is a silent free - // cast from oop to interface allowed by the Verifier. Make it explicit - // here. Node* phi = _exits.argument(0); - const TypeInstPtr *tr = phi->bottom_type()->isa_instptr(); - if (tr && tr->klass()->is_loaded() && - tr->klass()->is_interface()) { - const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); - if (tp && tp->klass()->is_loaded() && - !tp->klass()->is_interface()) { + const Type* return_type = phi->bottom_type(); + const TypeOopPtr* tr = return_type->isa_oopptr(); + if (return_type->isa_valuetype()) { + // Value type is returned as fields, make sure it is scalarized + if (!value->is_ValueType()) { + value = ValueTypeNode::make_from_oop(this, value, return_type->is_valuetype()->value_klass()); + } + if (!_caller->has_method()) { + // Value type is returned as fields from root method, make + // sure all non-flattened value type fields are allocated. + assert(tf()->returns_value_type_as_fields(), "must be returned as fields"); + value = value->as_ValueType()->allocate_fields(this); + } + } else if (value->is_ValueType()) { + // Value type is returned as oop, make sure it is allocated + assert(tr && tr->can_be_value_type(), "must return a value type pointer"); + value = ValueTypePtrNode::make_from_value_type(this, value->as_ValueType()); + } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) { + // If returning oops to an interface-return, there is a silent free + // cast from oop to interface allowed by the Verifier. Make it explicit here. + const TypeInstPtr* tp = value->bottom_type()->isa_instptr(); + if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking - if (tp->higher_equal(TypeInstPtr::NOTNULL)) + if (tp->higher_equal(TypeInstPtr::NOTNULL)) { tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); + } value = _gvn.transform(new CheckCastPPNode(0, value, tr)); } } else { - // Also handle returns of oop-arrays to an arrays-of-interface return + // Handle returns of oop-arrays to an arrays-of-interface return const TypeInstPtr* phi_tip; const TypeInstPtr* val_tip; - Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip); + Type::get_arrays_base_elements(return_type, value->bottom_type(), &phi_tip, &val_tip); if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() && val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) { - value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type())); + value = _gvn.transform(new CheckCastPPNode(0, value, return_type)); } } phi->add_req(value); } + SafePointNode* exit_return = _exits.map(); + exit_return->in( TypeFunc::Control )->add_req( control() ); + exit_return->in( TypeFunc::I_O )->add_req( i_o () ); + Node *mem = exit_return->in( TypeFunc::Memory ); + for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { + if (mms.is_empty()) { + // get a copy of the base memory, and patch just this one input + const TypePtr* adr_type = mms.adr_type(C); + Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); + assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); + gvn().set_type_bottom(phi); + phi->del_req(phi->req()-1); // prepare to re-patch + mms.set_memory(phi); + } + mms.memory()->add_req(mms.memory2()); + } + if (_first_return) { _exits.map()->transfer_replaced_nodes_from(map(), _new_idx); _first_return = false;