< prev index next >

src/share/vm/opto/parse1.cpp

Print this page




 764 void Parse::build_exits() {
 765   // make a clone of caller to prevent sharing of side-effects
 766   _exits.set_map(_exits.clone_map());
 767   _exits.clean_stack(_exits.sp());
 768   _exits.sync_jvms();
 769 
 770   RegionNode* region = new RegionNode(1);
 771   record_for_igvn(region);
 772   gvn().set_type_bottom(region);
 773   _exits.set_control(region);
 774 
 775   // Note:  iophi and memphi are not transformed until do_exits.
 776   Node* iophi  = new PhiNode(region, Type::ABIO);
 777   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 778   gvn().set_type_bottom(iophi);
 779   gvn().set_type_bottom(memphi);
 780   _exits.set_i_o(iophi);
 781   _exits.set_all_memory(memphi);
 782 
 783   // Add a return value to the exit state.  (Do not push it yet.)
 784   if (tf()->range()->cnt() > TypeFunc::Parms) {
 785     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
 786     if (ret_type->isa_int()) {
 787       BasicType ret_bt = method()->return_type()->basic_type();
 788       if (ret_bt == T_BOOLEAN ||
 789           ret_bt == T_CHAR ||
 790           ret_bt == T_BYTE ||
 791           ret_bt == T_SHORT) {
 792         ret_type = TypeInt::INT;
 793       }
 794     }
 795 
 796     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 797     // becomes loaded during the subsequent parsing, the loaded and unloaded
 798     // types will not join when we transform and push in do_exits().
 799     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 800     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
 801       ret_type = TypeOopPtr::BOTTOM;
 802     }
 803     if (_caller->has_method() && ret_type->isa_valuetypeptr()) {
 804       // When inlining, return value type as ValueTypeNode not as oop



 805       ret_type = ret_type->is_valuetypeptr()->value_type();
 806     }
 807     int         ret_size = type2size[ret_type->basic_type()];
 808     Node*       ret_phi  = new PhiNode(region, ret_type);
 809     gvn().set_type_bottom(ret_phi);
 810     _exits.ensure_stack(ret_size);
 811     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 812     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 813     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 814     // Note:  ret_phi is not yet pushed, until do_exits.
 815   }
 816 }
 817 
 818 // Helper function to create a ValueTypeNode from its fields passed as
 819 // arguments. Fields are passed in order of increasing offsets.
 820 Node* Compile::create_vt_node(Node* n, ciValueKlass* vk, ciValueKlass* base_vk, int base_offset, int base_input) {
 821   assert(base_offset >= 0, "offset in value type always positive");
 822   PhaseGVN& gvn = *initial_gvn();
 823   ValueTypeNode* vt = ValueTypeNode::make(gvn, vk);
 824   for (uint i = 0; i < vt->field_count(); i++) {
 825     ciType* field_type = vt->field_type(i);
 826     int offset = base_offset + vt->field_offset(i) - (base_offset > 0 ? vk->first_field_offset() : 0);
 827     if (field_type->is_valuetype()) {
 828       ciValueKlass* embedded_vk = field_type->as_value_klass();
 829       Node* embedded_vt = create_vt_node(n, embedded_vk, base_vk, offset, base_input);
 830       vt->set_field_value(i, embedded_vt);
 831     } else {
 832       int j = 0; int extra = 0;
 833       for (; j < base_vk->nof_nonstatic_fields(); j++) {
 834         ciField* f = base_vk->nonstatic_field_at(j);
 835         if (offset == f->offset()) {
 836           assert(f->type() == field_type, "inconsistent field type");
 837           break;
 838         }
 839         BasicType bt = f->type()->basic_type();
 840         if (bt == T_LONG || bt == T_DOUBLE) {
 841           extra++;
 842         }
 843       }
 844       assert(j != base_vk->nof_nonstatic_fields(), "must find");
 845       Node* parm = NULL;
 846       if (n->is_Start()) {

 847         parm = gvn.transform(new ParmNode(n->as_Start(), base_input + j + extra));
 848       } else {

 849         assert(n->is_Call(), "nothing else here");
 850         parm = n->in(base_input + j + extra);



 851       }
 852       vt->set_field_value(i, parm);
 853       // Record all these guys for later GVN.
 854       record_for_igvn(parm);
 855     }
 856   }
 857   return gvn.transform(vt);
 858 }
 859 
 860 //----------------------------build_start_state-------------------------------
 861 // Construct a state which contains only the incoming arguments from an
 862 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
 863 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 864   int        arg_size_sig = tf->domain_sig()->cnt();
 865   int        max_size = MAX2(arg_size_sig, (int)tf->range()->cnt());
 866   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 867   SafePointNode* map  = new SafePointNode(max_size, NULL);
 868   record_for_igvn(map);
 869   assert(arg_size_sig == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 870   Node_Notes* old_nn = default_node_notes();
 871   if (old_nn != NULL && has_method()) {
 872     Node_Notes* entry_nn = old_nn->clone(this);
 873     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 874     entry_jvms->set_offsets(0);
 875     entry_jvms->set_bci(entry_bci());
 876     entry_nn->set_jvms(entry_jvms);
 877     set_default_node_notes(entry_nn);
 878   }
 879   PhaseGVN& gvn = *initial_gvn();
 880   uint j = 0;
 881   for (uint i = 0; i < (uint)arg_size_sig; i++) {
 882     assert(j >= i, "less actual arguments than in the signature?");
 883     if (ValueTypePassFieldsAsArgs) {
 884       if (i < TypeFunc::Parms) {
 885         assert(i == j, "no change before the actual arguments");
 886         Node* parm = gvn.transform(new ParmNode(start, i));
 887         map->init_req(i, parm);
 888         // Record all these guys for later GVN.
 889         record_for_igvn(parm);
 890         j++;
 891       } else {
 892         // Value type arguments are not passed by reference: we get an
 893         // argument per field of the value type. Build ValueTypeNodes
 894         // from the value type arguments.
 895         const Type* t = tf->domain_sig()->field_at(i);
 896         if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
 897           ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 898           Node* vt = create_vt_node(start, vk, vk, 0, j);
 899           map->init_req(i, gvn.transform(vt));
 900           j += vk->value_arg_slots();
 901         } else {
 902           Node* parm = gvn.transform(new ParmNode(start, j));
 903           map->init_req(i, parm);
 904           // Record all these guys for later GVN.
 905           record_for_igvn(parm);
 906           j++;
 907         }
 908       }
 909     } else {
 910      Node* parm = gvn.transform(new ParmNode(start, i));
 911      // Check if parameter is a value type pointer
 912      if (gvn.type(parm)->isa_valuetypeptr()) {
 913        // Create ValueTypeNode from the oop and replace the parameter
 914        parm = ValueTypeNode::make(gvn, map->memory(), parm);
 915      }
 916      map->init_req(i, parm);
 917      // Record all these guys for later GVN.
 918      record_for_igvn(parm);


 935   Node_Notes* nn = caller_nn->clone(C);
 936   JVMState* caller_jvms = nn->jvms();
 937   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 938   jvms->set_offsets(0);
 939   jvms->set_bci(_entry_bci);
 940   nn->set_jvms(jvms);
 941   return nn;
 942 }
 943 
 944 
 945 //--------------------------return_values--------------------------------------
 946 void Compile::return_values(JVMState* jvms) {
 947   GraphKit kit(jvms);
 948   Node* ret = new ReturnNode(TypeFunc::Parms,
 949                              kit.control(),
 950                              kit.i_o(),
 951                              kit.reset_memory(),
 952                              kit.frameptr(),
 953                              kit.returnadr());
 954   // Add zero or 1 return values
 955   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
 956   if (ret_size > 0) {
 957     kit.inc_sp(-ret_size);  // pop the return value(s)
 958     kit.sync_jvms();
 959     ret->add_req(kit.argument(0));










 960     // Note:  The second dummy edge is not needed by a ReturnNode.
 961   }

 962   // bind it to root
 963   root()->add_req(ret);
 964   record_for_igvn(ret);
 965   initial_gvn()->transform_no_reclaim(ret);
 966 }
 967 
 968 //------------------------rethrow_exceptions-----------------------------------
 969 // Bind all exception states in the list into a single RethrowNode.
 970 void Compile::rethrow_exceptions(JVMState* jvms) {
 971   GraphKit kit(jvms);
 972   if (!kit.has_exceptions())  return;  // nothing to generate
 973   // Load my combined exception state into the kit, with all phis transformed:
 974   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 975   Node* ex_oop = kit.use_exception_state(ex_map);
 976   RethrowNode* exit = new RethrowNode(kit.control(),
 977                                       kit.i_o(), kit.reset_memory(),
 978                                       kit.frameptr(), kit.returnadr(),
 979                                       // like a return but with exception input
 980                                       ex_oop);
 981   // bind to root


1097     }
1098   }
1099 
1100   // Any method can write a @Stable field; insert memory barriers
1101   // after those also. Can't bind predecessor allocation node (if any)
1102   // with barrier because allocation doesn't always dominate
1103   // MemBarRelease.
1104   if (wrote_stable()) {
1105     _exits.insert_mem_bar(Op_MemBarRelease);
1106     if (PrintOpto && (Verbose || WizardMode)) {
1107       method()->print_name();
1108       tty->print_cr(" writes @Stable and needs a memory barrier");
1109     }
1110   }
1111 
1112   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1113     // transform each slice of the original memphi:
1114     mms.set_memory(_gvn.transform(mms.memory()));
1115   }
1116 
1117   if (tf()->range()->cnt() > TypeFunc::Parms) {
1118     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1119     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1120     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1121       // In case of concurrent class loading, the type we set for the
1122       // ret_phi in build_exits() may have been too optimistic and the
1123       // ret_phi may be top now.
1124       // Otherwise, we've encountered an error and have to mark the method as
1125       // not compilable. Just using an assertion instead would be dangerous
1126       // as this could lead to an infinite compile loop in non-debug builds.
1127       {
1128         MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
1129         if (C->env()->system_dictionary_modification_counter_changed()) {
1130           C->record_failure(C2Compiler::retry_class_loading_during_parsing());
1131         } else {
1132           C->record_method_not_compilable("Can't determine return type.");
1133         }
1134       }
1135       return;
1136     }
1137     if (ret_type->isa_int()) {
1138       BasicType ret_bt = method()->return_type()->basic_type();


2277   // Set starting bci for uncommon trap.
2278   set_parse_bci(0);
2279 
2280   const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2281   Node* mc_adr = makecon(adr_type);
2282   Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2283   Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2284   Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2285   store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2286   Node *chk   = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2287   Node* tst   = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2288   { BuildCutout unless(this, tst, PROB_ALWAYS);
2289     uncommon_trap(Deoptimization::Reason_tenured,
2290                   Deoptimization::Action_make_not_entrant);
2291   }
2292 }
2293 
2294 //------------------------------return_current---------------------------------
2295 // Append current _map to _exit_return
2296 void Parse::return_current(Node* value) {
2297   if (value != NULL && value->is_ValueType() && !_caller->has_method()) {
2298     // Returning from root JVMState, make sure value type is allocated


2299     value = value->as_ValueType()->store_to_memory(this);
2300   }
2301 
2302   if (RegisterFinalizersAtInit &&
2303       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2304     call_register_finalizer();
2305   }
2306 
2307   // Do not set_parse_bci, so that return goo is credited to the return insn.
2308   set_bci(InvocationEntryBci);



2309   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2310     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2311   }
2312   if (C->env()->dtrace_method_probes()) {
2313     make_dtrace_method_exit(method());
2314   }
2315   SafePointNode* exit_return = _exits.map();
2316   exit_return->in( TypeFunc::Control  )->add_req( control() );
2317   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2318   Node *mem = exit_return->in( TypeFunc::Memory   );
2319   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2320     if (mms.is_empty()) {
2321       // get a copy of the base memory, and patch just this one input
2322       const TypePtr* adr_type = mms.adr_type(C);
2323       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2324       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2325       gvn().set_type_bottom(phi);
2326       phi->del_req(phi->req()-1);  // prepare to re-patch
2327       mms.set_memory(phi);
2328     }




 764 void Parse::build_exits() {
 765   // make a clone of caller to prevent sharing of side-effects
 766   _exits.set_map(_exits.clone_map());
 767   _exits.clean_stack(_exits.sp());
 768   _exits.sync_jvms();
 769 
 770   RegionNode* region = new RegionNode(1);
 771   record_for_igvn(region);
 772   gvn().set_type_bottom(region);
 773   _exits.set_control(region);
 774 
 775   // Note:  iophi and memphi are not transformed until do_exits.
 776   Node* iophi  = new PhiNode(region, Type::ABIO);
 777   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 778   gvn().set_type_bottom(iophi);
 779   gvn().set_type_bottom(memphi);
 780   _exits.set_i_o(iophi);
 781   _exits.set_all_memory(memphi);
 782 
 783   // Add a return value to the exit state.  (Do not push it yet.)
 784   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 785     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 786     if (ret_type->isa_int()) {
 787       BasicType ret_bt = method()->return_type()->basic_type();
 788       if (ret_bt == T_BOOLEAN ||
 789           ret_bt == T_CHAR ||
 790           ret_bt == T_BYTE ||
 791           ret_bt == T_SHORT) {
 792         ret_type = TypeInt::INT;
 793       }
 794     }
 795 
 796     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 797     // becomes loaded during the subsequent parsing, the loaded and unloaded
 798     // types will not join when we transform and push in do_exits().
 799     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 800     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
 801       ret_type = TypeOopPtr::BOTTOM;
 802     }
 803     if ((_caller->has_method() || tf()->returns_value_type_as_fields()) &&
 804         ret_type->isa_valuetypeptr() &&
 805         ret_type->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
 806       // When inlining or with multiple return values: return value
 807       // type as ValueTypeNode not as oop
 808       ret_type = ret_type->is_valuetypeptr()->value_type();
 809     }
 810     int         ret_size = type2size[ret_type->basic_type()];
 811     Node*       ret_phi  = new PhiNode(region, ret_type);
 812     gvn().set_type_bottom(ret_phi);
 813     _exits.ensure_stack(ret_size);
 814     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 815     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 816     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 817     // Note:  ret_phi is not yet pushed, until do_exits.
 818   }
 819 }
 820 
 821 // Helper function to create a ValueTypeNode from its fields passed as
 822 // arguments. Fields are passed in order of increasing offsets.
 823 Node* Compile::create_vt_node(Node* n, ciValueKlass* vk, ciValueKlass* base_vk, int base_offset, int base_input, bool in) {
 824   assert(base_offset >= 0, "offset in value type always positive");
 825   PhaseGVN& gvn = *initial_gvn();
 826   ValueTypeNode* vt = ValueTypeNode::make(gvn, vk);
 827   for (uint i = 0; i < vt->field_count(); i++) {
 828     ciType* field_type = vt->field_type(i);
 829     int offset = base_offset + vt->field_offset(i) - (base_offset > 0 ? vk->first_field_offset() : 0);
 830     if (field_type->is_valuetype()) {
 831       ciValueKlass* embedded_vk = field_type->as_value_klass();
 832       Node* embedded_vt = create_vt_node(n, embedded_vk, base_vk, offset, base_input, in);
 833       vt->set_field_value(i, embedded_vt);
 834     } else {
 835       int j = 0; int extra = 0;
 836       for (; j < base_vk->nof_nonstatic_fields(); j++) {
 837         ciField* f = base_vk->nonstatic_field_at(j);
 838         if (offset == f->offset()) {
 839           assert(f->type() == field_type, "inconsistent field type");
 840           break;
 841         }
 842         BasicType bt = f->type()->basic_type();
 843         if (bt == T_LONG || bt == T_DOUBLE) {
 844           extra++;
 845         }
 846       }
 847       assert(j != base_vk->nof_nonstatic_fields(), "must find");
 848       Node* parm = NULL;
 849       if (n->is_Start()) {
 850         assert(in, "return from start?");
 851         parm = gvn.transform(new ParmNode(n->as_Start(), base_input + j + extra));
 852       } else {
 853         if (in) {
 854           assert(n->is_Call(), "nothing else here");
 855           parm = n->in(base_input + j + extra);
 856         } else {
 857           parm = gvn.transform(new ProjNode(n->as_Call(), base_input + j + extra));
 858         }
 859       }
 860       vt->set_field_value(i, parm);
 861       // Record all these guys for later GVN.
 862       record_for_igvn(parm);
 863     }
 864   }
 865   return gvn.transform(vt);
 866 }
 867 
 868 //----------------------------build_start_state-------------------------------
 869 // Construct a state which contains only the incoming arguments from an
 870 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
 871 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 872   int        arg_size_sig = tf->domain_sig()->cnt();
 873   int        max_size = MAX2(arg_size_sig, (int)tf->range_cc()->cnt());
 874   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 875   SafePointNode* map  = new SafePointNode(max_size, NULL);
 876   record_for_igvn(map);
 877   assert(arg_size_sig == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 878   Node_Notes* old_nn = default_node_notes();
 879   if (old_nn != NULL && has_method()) {
 880     Node_Notes* entry_nn = old_nn->clone(this);
 881     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 882     entry_jvms->set_offsets(0);
 883     entry_jvms->set_bci(entry_bci());
 884     entry_nn->set_jvms(entry_jvms);
 885     set_default_node_notes(entry_nn);
 886   }
 887   PhaseGVN& gvn = *initial_gvn();
 888   uint j = 0;
 889   for (uint i = 0; i < (uint)arg_size_sig; i++) {
 890     assert(j >= i, "less actual arguments than in the signature?");
 891     if (ValueTypePassFieldsAsArgs) {
 892       if (i < TypeFunc::Parms) {
 893         assert(i == j, "no change before the actual arguments");
 894         Node* parm = gvn.transform(new ParmNode(start, i));
 895         map->init_req(i, parm);
 896         // Record all these guys for later GVN.
 897         record_for_igvn(parm);
 898         j++;
 899       } else {
 900         // Value type arguments are not passed by reference: we get an
 901         // argument per field of the value type. Build ValueTypeNodes
 902         // from the value type arguments.
 903         const Type* t = tf->domain_sig()->field_at(i);
 904         if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
 905           ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 906           Node* vt = create_vt_node(start, vk, vk, 0, j, true);
 907           map->init_req(i, gvn.transform(vt));
 908           j += vk->value_arg_slots();
 909         } else {
 910           Node* parm = gvn.transform(new ParmNode(start, j));
 911           map->init_req(i, parm);
 912           // Record all these guys for later GVN.
 913           record_for_igvn(parm);
 914           j++;
 915         }
 916       }
 917     } else {
 918      Node* parm = gvn.transform(new ParmNode(start, i));
 919      // Check if parameter is a value type pointer
 920      if (gvn.type(parm)->isa_valuetypeptr()) {
 921        // Create ValueTypeNode from the oop and replace the parameter
 922        parm = ValueTypeNode::make(gvn, map->memory(), parm);
 923      }
 924      map->init_req(i, parm);
 925      // Record all these guys for later GVN.
 926      record_for_igvn(parm);


 943   Node_Notes* nn = caller_nn->clone(C);
 944   JVMState* caller_jvms = nn->jvms();
 945   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 946   jvms->set_offsets(0);
 947   jvms->set_bci(_entry_bci);
 948   nn->set_jvms(jvms);
 949   return nn;
 950 }
 951 
 952 
 953 //--------------------------return_values--------------------------------------
 954 void Compile::return_values(JVMState* jvms) {
 955   GraphKit kit(jvms);
 956   Node* ret = new ReturnNode(TypeFunc::Parms,
 957                              kit.control(),
 958                              kit.i_o(),
 959                              kit.reset_memory(),
 960                              kit.frameptr(),
 961                              kit.returnadr());
 962   // Add zero or 1 return values
 963   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
 964   if (ret_size > 0) {
 965     kit.inc_sp(-ret_size);  // pop the return value(s)
 966     kit.sync_jvms();
 967     Node* res = kit.argument(0);
 968     if (tf()->returns_value_type_as_fields()) {
 969       // Multiple return values (value type fields): add as many edges
 970       // to the Return node as returned values.
 971       assert(res->is_ValueType(), "what else supports multi value return");
 972       ValueTypeNode* vt = res->as_ValueType();
 973       ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
 974       vt->pass_klass(ret, TypeFunc::Parms, kit);
 975       vt->pass_fields(ret, TypeFunc::Parms+1, kit);
 976     } else {
 977       ret->add_req(res);
 978       // Note:  The second dummy edge is not needed by a ReturnNode.
 979     }
 980   }
 981   // bind it to root
 982   root()->add_req(ret);
 983   record_for_igvn(ret);
 984   initial_gvn()->transform_no_reclaim(ret);
 985 }
 986 
 987 //------------------------rethrow_exceptions-----------------------------------
 988 // Bind all exception states in the list into a single RethrowNode.
 989 void Compile::rethrow_exceptions(JVMState* jvms) {
 990   GraphKit kit(jvms);
 991   if (!kit.has_exceptions())  return;  // nothing to generate
 992   // Load my combined exception state into the kit, with all phis transformed:
 993   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 994   Node* ex_oop = kit.use_exception_state(ex_map);
 995   RethrowNode* exit = new RethrowNode(kit.control(),
 996                                       kit.i_o(), kit.reset_memory(),
 997                                       kit.frameptr(), kit.returnadr(),
 998                                       // like a return but with exception input
 999                                       ex_oop);
1000   // bind to root


1116     }
1117   }
1118 
1119   // Any method can write a @Stable field; insert memory barriers
1120   // after those also. Can't bind predecessor allocation node (if any)
1121   // with barrier because allocation doesn't always dominate
1122   // MemBarRelease.
1123   if (wrote_stable()) {
1124     _exits.insert_mem_bar(Op_MemBarRelease);
1125     if (PrintOpto && (Verbose || WizardMode)) {
1126       method()->print_name();
1127       tty->print_cr(" writes @Stable and needs a memory barrier");
1128     }
1129   }
1130 
1131   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1132     // transform each slice of the original memphi:
1133     mms.set_memory(_gvn.transform(mms.memory()));
1134   }
1135 
1136   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1137     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1138     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1139     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1140       // In case of concurrent class loading, the type we set for the
1141       // ret_phi in build_exits() may have been too optimistic and the
1142       // ret_phi may be top now.
1143       // Otherwise, we've encountered an error and have to mark the method as
1144       // not compilable. Just using an assertion instead would be dangerous
1145       // as this could lead to an infinite compile loop in non-debug builds.
1146       {
1147         MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
1148         if (C->env()->system_dictionary_modification_counter_changed()) {
1149           C->record_failure(C2Compiler::retry_class_loading_during_parsing());
1150         } else {
1151           C->record_method_not_compilable("Can't determine return type.");
1152         }
1153       }
1154       return;
1155     }
1156     if (ret_type->isa_int()) {
1157       BasicType ret_bt = method()->return_type()->basic_type();


2296   // Set starting bci for uncommon trap.
2297   set_parse_bci(0);
2298 
2299   const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2300   Node* mc_adr = makecon(adr_type);
2301   Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2302   Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2303   Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2304   store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2305   Node *chk   = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2306   Node* tst   = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2307   { BuildCutout unless(this, tst, PROB_ALWAYS);
2308     uncommon_trap(Deoptimization::Reason_tenured,
2309                   Deoptimization::Action_make_not_entrant);
2310   }
2311 }
2312 
2313 //------------------------------return_current---------------------------------
2314 // Append current _map to _exit_return
2315 void Parse::return_current(Node* value) {
2316   if (value != NULL && value->is_ValueType() && !_caller->has_method() &&
2317       !tf()->returns_value_type_as_fields()) {
2318     // Returning from root JVMState without multiple returned values,
2319     // make sure value type is allocated
2320     value = value->as_ValueType()->store_to_memory(this);
2321   }
2322 
2323   if (RegisterFinalizersAtInit &&
2324       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2325     call_register_finalizer();
2326   }
2327 
2328   // Do not set_parse_bci, so that return goo is credited to the return insn.
2329   // vreturn can trigger an allocation so vreturn can throw. Setting
2330   // the bci here breaks exception handling. Commenting this out
2331   // doesn't seem to break anything.
2332   //  set_bci(InvocationEntryBci);
2333   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2334     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2335   }
2336   if (C->env()->dtrace_method_probes()) {
2337     make_dtrace_method_exit(method());
2338   }
2339   SafePointNode* exit_return = _exits.map();
2340   exit_return->in( TypeFunc::Control  )->add_req( control() );
2341   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2342   Node *mem = exit_return->in( TypeFunc::Memory   );
2343   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2344     if (mms.is_empty()) {
2345       // get a copy of the base memory, and patch just this one input
2346       const TypePtr* adr_type = mms.adr_type(C);
2347       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2348       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2349       gvn().set_type_bottom(phi);
2350       phi->del_req(phi->req()-1);  // prepare to re-patch
2351       mms.set_memory(phi);
2352     }


< prev index next >