< prev index next >

src/share/vm/opto/parse1.cpp

Print this page

        

*** 779,790 **** gvn().set_type_bottom(memphi); _exits.set_i_o(iophi); _exits.set_all_memory(memphi); // Add a return value to the exit state. (Do not push it yet.) ! if (tf()->range()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); if (ret_type->isa_int()) { BasicType ret_bt = method()->return_type()->basic_type(); if (ret_bt == T_BOOLEAN || ret_bt == T_CHAR || ret_bt == T_BYTE || --- 779,790 ---- gvn().set_type_bottom(memphi); _exits.set_i_o(iophi); _exits.set_all_memory(memphi); // Add a return value to the exit state. (Do not push it yet.) ! if (tf()->range_sig()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); if (ret_type->isa_int()) { BasicType ret_bt = method()->return_type()->basic_type(); if (ret_bt == T_BOOLEAN || ret_bt == T_CHAR || ret_bt == T_BYTE ||
*** 798,834 **** // types will not join when we transform and push in do_exits(). const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } ! if (_caller->has_method() && ret_type->isa_valuetypeptr()) { ! // When inlining, return value type as ValueTypeNode not as oop ret_type = ret_type->is_valuetypeptr()->value_type(); } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); ! assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it // Note: ret_phi is not yet pushed, until do_exits. } } // Helper function to create a ValueTypeNode from its fields passed as // arguments. Fields are passed in order of increasing offsets. ! Node* Compile::create_vt_node(Node* n, ciValueKlass* vk, ciValueKlass* base_vk, int base_offset, int base_input) { assert(base_offset >= 0, "offset in value type always positive"); PhaseGVN& gvn = *initial_gvn(); ValueTypeNode* vt = ValueTypeNode::make(gvn, vk); for (uint i = 0; i < vt->field_count(); i++) { ciType* field_type = vt->field_type(i); int offset = base_offset + vt->field_offset(i) - (base_offset > 0 ? vk->first_field_offset() : 0); if (field_type->is_valuetype()) { ciValueKlass* embedded_vk = field_type->as_value_klass(); ! Node* embedded_vt = create_vt_node(n, embedded_vk, base_vk, offset, base_input); vt->set_field_value(i, embedded_vt); } else { int j = 0; int extra = 0; for (; j < base_vk->nof_nonstatic_fields(); j++) { ciField* f = base_vk->nonstatic_field_at(j); --- 798,837 ---- // types will not join when we transform and push in do_exits(). const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } ! if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && ! ret_type->isa_valuetypeptr() && ! ret_type->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { ! // When inlining or with multiple return values: return value ! // type as ValueTypeNode not as oop ret_type = ret_type->is_valuetypeptr()->value_type(); } int ret_size = type2size[ret_type->basic_type()]; Node* ret_phi = new PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); ! assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it // Note: ret_phi is not yet pushed, until do_exits. } } // Helper function to create a ValueTypeNode from its fields passed as // arguments. Fields are passed in order of increasing offsets. ! Node* Compile::create_vt_node(Node* n, ciValueKlass* vk, ciValueKlass* base_vk, int base_offset, int base_input, bool in) { assert(base_offset >= 0, "offset in value type always positive"); PhaseGVN& gvn = *initial_gvn(); ValueTypeNode* vt = ValueTypeNode::make(gvn, vk); for (uint i = 0; i < vt->field_count(); i++) { ciType* field_type = vt->field_type(i); int offset = base_offset + vt->field_offset(i) - (base_offset > 0 ? vk->first_field_offset() : 0); if (field_type->is_valuetype()) { ciValueKlass* embedded_vk = field_type->as_value_klass(); ! Node* embedded_vt = create_vt_node(n, embedded_vk, base_vk, offset, base_input, in); vt->set_field_value(i, embedded_vt); } else { int j = 0; int extra = 0; for (; j < base_vk->nof_nonstatic_fields(); j++) { ciField* f = base_vk->nonstatic_field_at(j);
*** 842,855 **** --- 845,863 ---- } } assert(j != base_vk->nof_nonstatic_fields(), "must find"); Node* parm = NULL; if (n->is_Start()) { + assert(in, "return from start?"); parm = gvn.transform(new ParmNode(n->as_Start(), base_input + j + extra)); } else { + if (in) { assert(n->is_Call(), "nothing else here"); parm = n->in(base_input + j + extra); + } else { + parm = gvn.transform(new ProjNode(n->as_Call(), base_input + j + extra)); + } } vt->set_field_value(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); }
*** 860,870 **** //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { int arg_size_sig = tf->domain_sig()->cnt(); ! int max_size = MAX2(arg_size_sig, (int)tf->range()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); SafePointNode* map = new SafePointNode(max_size, NULL); record_for_igvn(map); assert(arg_size_sig == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); --- 868,878 ---- //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { int arg_size_sig = tf->domain_sig()->cnt(); ! int max_size = MAX2(arg_size_sig, (int)tf->range_cc()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); SafePointNode* map = new SafePointNode(max_size, NULL); record_for_igvn(map); assert(arg_size_sig == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes();
*** 893,903 **** // argument per field of the value type. Build ValueTypeNodes // from the value type arguments. const Type* t = tf->domain_sig()->field_at(i); if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); ! Node* vt = create_vt_node(start, vk, vk, 0, j); map->init_req(i, gvn.transform(vt)); j += vk->value_arg_slots(); } else { Node* parm = gvn.transform(new ParmNode(start, j)); map->init_req(i, parm); --- 901,911 ---- // argument per field of the value type. Build ValueTypeNodes // from the value type arguments. const Type* t = tf->domain_sig()->field_at(i); if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); ! Node* vt = create_vt_node(start, vk, vk, 0, j, true); map->init_req(i, gvn.transform(vt)); j += vk->value_arg_slots(); } else { Node* parm = gvn.transform(new ParmNode(start, j)); map->init_req(i, parm);
*** 950,966 **** kit.i_o(), kit.reset_memory(), kit.frameptr(), kit.returnadr()); // Add zero or 1 return values ! int ret_size = tf()->range()->cnt() - TypeFunc::Parms; if (ret_size > 0) { kit.inc_sp(-ret_size); // pop the return value(s) kit.sync_jvms(); ! ret->add_req(kit.argument(0)); // Note: The second dummy edge is not needed by a ReturnNode. } // bind it to root root()->add_req(ret); record_for_igvn(ret); initial_gvn()->transform_no_reclaim(ret); } --- 958,985 ---- kit.i_o(), kit.reset_memory(), kit.frameptr(), kit.returnadr()); // Add zero or 1 return values ! int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms; if (ret_size > 0) { kit.inc_sp(-ret_size); // pop the return value(s) kit.sync_jvms(); ! Node* res = kit.argument(0); ! if (tf()->returns_value_type_as_fields()) { ! // Multiple return values (value type fields): add as many edges ! // to the Return node as returned values. ! assert(res->is_ValueType(), "what else supports multi value return"); ! ValueTypeNode* vt = res->as_ValueType(); ! ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); ! vt->pass_klass(ret, TypeFunc::Parms, kit); ! vt->pass_fields(ret, TypeFunc::Parms+1, kit); ! } else { ! ret->add_req(res); // Note: The second dummy edge is not needed by a ReturnNode. } + } // bind it to root root()->add_req(ret); record_for_igvn(ret); initial_gvn()->transform_no_reclaim(ret); }
*** 1112,1123 **** for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { // transform each slice of the original memphi: mms.set_memory(_gvn.transform(mms.memory())); } ! if (tf()->range()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); Node* ret_phi = _gvn.transform( _exits.argument(0) ); if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) { // In case of concurrent class loading, the type we set for the // ret_phi in build_exits() may have been too optimistic and the // ret_phi may be top now. --- 1131,1142 ---- for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { // transform each slice of the original memphi: mms.set_memory(_gvn.transform(mms.memory())); } ! if (tf()->range_sig()->cnt() > TypeFunc::Parms) { ! const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms); Node* ret_phi = _gvn.transform( _exits.argument(0) ); if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) { // In case of concurrent class loading, the type we set for the // ret_phi in build_exits() may have been too optimistic and the // ret_phi may be top now.
*** 2292,2313 **** } //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { ! if (value != NULL && value->is_ValueType() && !_caller->has_method()) { ! // Returning from root JVMState, make sure value type is allocated value = value->as_ValueType()->store_to_memory(this); } if (RegisterFinalizersAtInit && method()->intrinsic_id() == vmIntrinsics::_Object_init) { call_register_finalizer(); } // Do not set_parse_bci, so that return goo is credited to the return insn. ! set_bci(InvocationEntryBci); if (method()->is_synchronized() && GenerateSynchronizationCode) { shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); } if (C->env()->dtrace_method_probes()) { make_dtrace_method_exit(method()); --- 2311,2337 ---- } //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { ! if (value != NULL && value->is_ValueType() && !_caller->has_method() && ! !tf()->returns_value_type_as_fields()) { ! // Returning from root JVMState without multiple returned values, ! // make sure value type is allocated value = value->as_ValueType()->store_to_memory(this); } if (RegisterFinalizersAtInit && method()->intrinsic_id() == vmIntrinsics::_Object_init) { call_register_finalizer(); } // Do not set_parse_bci, so that return goo is credited to the return insn. ! // vreturn can trigger an allocation so vreturn can throw. Setting ! // the bci here breaks exception handling. Commenting this out ! // doesn't seem to break anything. ! // set_bci(InvocationEntryBci); if (method()->is_synchronized() && GenerateSynchronizationCode) { shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); } if (C->env()->dtrace_method_probes()) { make_dtrace_method_exit(method());
< prev index next >