< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page




 106                                      const Type* type,
 107                                      Node* local_addrs,
 108                                      Node* local_addrs_base) {
 109   BasicType bt = type->basic_type();
 110   if (type == TypePtr::NULL_PTR) {
 111     // Ptr types are mixed together with T_ADDRESS but NULL is
 112     // really for T_OBJECT types so correct it.
 113     bt = T_OBJECT;
 114   }
 115   Node *mem = memory(Compile::AliasIdxRaw);
 116   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 117   Node *ctl = control();
 118 
 119   // Very similar to LoadNode::make, except we handle un-aligned longs and
 120   // doubles on Sparc.  Intel can handle them just fine directly.
 121   Node *l = NULL;
 122   switch (bt) {                // Signature is flattened
 123   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 124   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 125   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;

 126   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 127   case T_VALUETYPE: {
 128     // Load oop and create a new ValueTypeNode
 129     const TypeInstPtr* ptr_type = TypeInstPtr::make(TypePtr::BotPTR, type->is_valuetype()->value_klass());
 130     l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, ptr_type, MemNode::unordered));
 131     l = ValueTypeNode::make_from_oop(this, l, type->is_valuetype()->value_klass(), /* buffer_check */ true, /* null2default */ false);
 132     break;
 133   }
 134   case T_LONG:
 135   case T_DOUBLE: {
 136     // Since arguments are in reverse order, the argument address 'adr'
 137     // refers to the back half of the long/double.  Recompute adr.
 138     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 139     if (Matcher::misaligned_doubles_ok) {
 140       l = (bt == T_DOUBLE)
 141         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 142         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 143     } else {
 144       l = (bt == T_DOUBLE)
 145         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 146         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 147     }
 148     break;
 149   }
 150   default: ShouldNotReachHere();
 151   }
 152   return _gvn.transform(l);
 153 }
 154 
 155 // Helper routine to prevent the interpreter from handing
 156 // unexpected typestate to an OSR method.
 157 // The Node l is a value newly dug out of the interpreter frame.
 158 // The type is the type predicted by ciTypeFlow.  Note that it is
 159 // not a general type, but can only come from Type::get_typeflow_type.
 160 // The safepoint is a map which will feed an uncommon trap.
 161 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 162                                     SafePointNode* &bad_type_exit) {
 163 
 164   const TypeOopPtr* tp = type->isa_oopptr();




 165 
 166   // TypeFlow may assert null-ness if a type appears unloaded.
 167   if (type == TypePtr::NULL_PTR ||
 168       (tp != NULL && !tp->klass()->is_loaded())) {
 169     // Value must be null, not a real oop.
 170     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 171     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 172     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 173     set_control(_gvn.transform( new IfTrueNode(iff) ));
 174     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 175     bad_type_exit->control()->add_req(bad_type);
 176     l = null();
 177   }
 178 
 179   // Typeflow can also cut off paths from the CFG, based on
 180   // types which appear unloaded, or call sites which appear unlinked.
 181   // When paths are cut off, values at later merge points can rise
 182   // toward more specific classes.  Make sure these specific classes
 183   // are still in effect.
 184   if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
 185     // TypeFlow asserted a specific object type.  Value must have that type.
 186     Node* bad_type_ctrl = NULL;






 187     l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
 188     bad_type_exit->control()->add_req(bad_type_ctrl);
 189   }
 190 
 191   BasicType bt_l = _gvn.type(l)->basic_type();
 192   BasicType bt_t = type->basic_type();
 193   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 194   return l;
 195 }
 196 
 197 // Helper routine which sets up elements of the initial parser map when
 198 // performing a parse for on stack replacement.  Add values into map.
 199 // The only parameter contains the address of a interpreter arguments.
 200 void Parse::load_interpreter_state(Node* osr_buf) {
 201   int index;
 202   int max_locals = jvms()->loc_size();
 203   int max_stack  = jvms()->stk_size();
 204 
 205   // Mismatch between method and jvms can occur since map briefly held
 206   // an OSR entry state (which takes up one RawPtr word).


 592   if (depth() == 1 && !failing()) {
 593     // Add check to deoptimize the nmethod if RTM state was changed
 594     rtm_deopt();
 595   }
 596 
 597   // Check for bailouts during method entry or RTM state check setup.
 598   if (failing()) {
 599     if (log)  log->done("parse");
 600     C->set_default_node_notes(caller_nn);
 601     return;
 602   }
 603 
 604   // Handle value type arguments
 605   int arg_size_sig = tf()->domain_sig()->cnt();
 606   for (uint i = 0; i < (uint)arg_size_sig; i++) {
 607     Node* parm = map()->in(i);
 608     const Type* t = _gvn.type(parm);
 609     if (!ValueTypePassFieldsAsArgs) {
 610       if (t->is_valuetypeptr()) {
 611         // Create ValueTypeNode from the oop and replace the parameter
 612         assert(!t->is_ptr()->maybe_null(), "value type arguments should never be null");
 613         Node* vt = ValueTypeNode::make_from_oop(this, parm, t->value_klass(), /* buffer_check */ true, /* null2default */ false);

 614         map()->replace_edge(parm, vt);
 615       }

 616     } else {
 617       assert(false, "FIXME");
 618       // TODO move the code from build_start_state and do_late_inline here
 619     }
 620   }
 621 
 622   entry_map = map();  // capture any changes performed by method setup code
 623   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 624 
 625   // We begin parsing as if we have just encountered a jump to the
 626   // method entry.
 627   Block* entry_block = start_block();
 628   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 629   set_map_clone(entry_map);
 630   merge_common(entry_block, entry_block->next_path_num());
 631 
 632 #ifndef PRODUCT
 633   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 634   set_parse_histogram( parse_histogram_obj );
 635 #endif


 803   // Add a return value to the exit state.  (Do not push it yet.)
 804   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 805     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 806     if (ret_type->isa_int()) {
 807       BasicType ret_bt = method()->return_type()->basic_type();
 808       if (ret_bt == T_BOOLEAN ||
 809           ret_bt == T_CHAR ||
 810           ret_bt == T_BYTE ||
 811           ret_bt == T_SHORT) {
 812         ret_type = TypeInt::INT;
 813       }
 814     }
 815 
 816     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 817     // becomes loaded during the subsequent parsing, the loaded and unloaded
 818     // types will not join when we transform and push in do_exits().
 819     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 820     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
 821       ret_type = TypeOopPtr::BOTTOM;
 822     }
 823     if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && ret_type->is_valuetypeptr()) {

 824       // When inlining or with multiple return values: return value
 825       // type as ValueTypeNode not as oop
 826       ret_type = TypeValueType::make(ret_type->value_klass());
 827     }
 828     int         ret_size = type2size[ret_type->basic_type()];
 829     Node*       ret_phi  = new PhiNode(region, ret_type);
 830     gvn().set_type_bottom(ret_phi);
 831     _exits.ensure_stack(ret_size);
 832     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 833     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 834     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 835     // Note:  ret_phi is not yet pushed, until do_exits.
 836   }
 837 }
 838 
 839 //----------------------------build_start_state-------------------------------
 840 // Construct a state which contains only the incoming arguments from an
 841 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
 842 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 843   int        arg_size_sig = tf->domain_sig()->cnt();


1723   // Execution needs to restart a the next bytecode (entry of next
1724   // block)
1725   if (target->is_merged() ||
1726       pnum > PhiNode::Input ||
1727       target->is_handler() ||
1728       target->is_loop_head()) {
1729     set_parse_bci(target->start());
1730     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1731       Node* n = map()->in(j);                 // Incoming change to target state.
1732       const Type* t = NULL;
1733       if (tmp_jvms->is_loc(j)) {
1734         t = target->local_type_at(j - tmp_jvms->locoff());
1735       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1736         t = target->stack_type_at(j - tmp_jvms->stkoff());
1737       }
1738       if (t != NULL && t != Type::BOTTOM) {
1739         if (n->is_ValueType() && !t->isa_valuetype()) {
1740           // Allocate value type in src block to be able to merge it with oop in target block
1741           map()->set_req(j, ValueTypePtrNode::make_from_value_type(this, n->as_ValueType(), true));
1742         }
1743         if (t->isa_valuetype() && !n->is_ValueType()) {
1744           // check for a null constant
1745           assert(n->bottom_type()->remove_speculative() == TypePtr::NULL_PTR, "Anything other than null?");
1746           uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
1747           assert(stopped(), "should be a dead path now");
1748           set_parse_bci(old_bci);
1749           return;
1750         }
1751       }
1752     }
1753   }
1754   map()->set_jvms(old_jvms);
1755   set_parse_bci(old_bci);
1756 
1757   if (!target->is_merged()) {   // No prior mapping at this bci
1758     if (TraceOptoParse) { tty->print(" with empty state");  }
1759 
1760     // If this path is dead, do not bother capturing it as a merge.
1761     // It is "as if" we had 1 fewer predecessors from the beginning.
1762     if (stopped()) {
1763       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1764       return;


1872                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1873               // BoxLock nodes are not commoning.
1874               // Use old BoxLock node as merged box.
1875               assert(newin->jvms()->is_monitor_box(j), "sanity");
1876               // This assert also tests that nodes are BoxLock.
1877               assert(BoxLockNode::same_slot(n, m), "sanity");
1878               C->gvn_replace_by(n, m);
1879             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1880               phi = ensure_phi(j, nophi);
1881             }
1882           }
1883           break;
1884         }
1885       }
1886       // At this point, n might be top if:
1887       //  - there is no phi (because TypeFlow detected a conflict), or
1888       //  - the corresponding control edges is top (a dead incoming path)
1889       // It is a bug if we create a phi which sees a garbage value on a live path.
1890 
1891       // Merging two value types?
1892       if (phi != NULL && n->isa_ValueType()) {
1893         // Reload current state because it may have been updated by ensure_phi
1894         m = map()->in(j);
1895         ValueTypeNode* vtm = m->as_ValueType(); // Current value type
1896         ValueTypeNode* vtn = n->as_ValueType(); // Incoming value type
1897         assert(vtm->get_oop() == phi, "Value type should have Phi input");
1898         if (TraceOptoParse) {
1899 #ifdef ASSERT
1900           tty->print_cr("\nMerging value types");
1901           tty->print_cr("Current:");
1902           vtm->dump(2);
1903           tty->print_cr("Incoming:");
1904           vtn->dump(2);
1905           tty->cr();
1906 #endif
1907         }
1908         // Do the merge
1909         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1910         if (last_merge) {
1911           map()->set_req(j, _gvn.transform_no_reclaim(vtm));
1912           record_for_igvn(vtm);


2314   // Set starting bci for uncommon trap.
2315   set_parse_bci(0);
2316 
2317   const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2318   Node* mc_adr = makecon(adr_type);
2319   Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2320   Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2321   Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2322   store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2323   Node *chk   = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2324   Node* tst   = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2325   { BuildCutout unless(this, tst, PROB_ALWAYS);
2326     uncommon_trap(Deoptimization::Reason_tenured,
2327                   Deoptimization::Action_make_not_entrant);
2328   }
2329 }
2330 
2331 //------------------------------return_current---------------------------------
2332 // Append current _map to _exit_return
2333 void Parse::return_current(Node* value) {
2334   if (value != NULL && value->is_ValueType() && !_caller->has_method()) {
2335     // Returning a value type from root JVMState
2336     if (tf()->returns_value_type_as_fields()) {

2337       // Value type is returned as fields, make sure non-flattened value type fields are allocated
2338       value = value->as_ValueType()->allocate_fields(this);
2339     } else {
2340       // Value type is returned as oop, make sure it's allocated
2341       value = value->as_ValueType()->allocate(this)->get_oop();
2342     }
2343   }
2344 
2345   if (RegisterFinalizersAtInit &&
2346       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2347     call_register_finalizer();
2348   }
2349 
2350   // Do not set_parse_bci, so that return goo is credited to the return insn.
2351   // vreturn can trigger an allocation so vreturn can throw. Setting
2352   // the bci here breaks exception handling. Commenting this out
2353   // doesn't seem to break anything.
2354   //  set_bci(InvocationEntryBci);
2355   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2356     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2357   }
2358   if (C->env()->dtrace_method_probes()) {
2359     make_dtrace_method_exit(method());
2360   }
2361   // frame pointer is always same, already captured
2362   if (value != NULL) {
2363     Node* phi = _exits.argument(0);
2364     const TypeOopPtr* tr = phi->bottom_type()->isa_oopptr();
2365     if (tr && tr->isa_instptr() && tr->klass()->is_loaded() &&
2366         tr->klass()->is_interface()) {


2367       // If returning oops to an interface-return, there is a silent free
2368       // cast from oop to interface allowed by the Verifier. Make it explicit here.
2369       const TypeInstPtr* tp = value->bottom_type()->isa_instptr();
2370       if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) {
2371         // sharpen the type eagerly; this eases certain assert checking
2372         if (tp->higher_equal(TypeInstPtr::NOTNULL)) {
2373           tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2374         }
2375         value = _gvn.transform(new CheckCastPPNode(0, value, tr));
2376       }
2377     } else if (tr && tr->isa_instptr() && value->is_ValueType()) {
2378       // Value type to Object return
2379       assert(tr->isa_instptr()->klass()->is_java_lang_Object(), "must be java.lang.Object");
2380       assert(_caller->has_method(), "value type should be returned as oop");
2381     } else if (phi->bottom_type()->isa_valuetype() && !value->is_ValueType()) {
2382       assert(value->bottom_type()->remove_speculative() == TypePtr::NULL_PTR, "Anything other than null?");
2383       inc_sp(1);
2384       uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
2385       dec_sp(1);
2386       return;
2387     } else {
2388       // Handle returns of oop-arrays to an arrays-of-interface return
2389       const TypeInstPtr* phi_tip;
2390       const TypeInstPtr* val_tip;
2391       Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip);
2392       if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
2393           val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
2394         value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type()));
2395       }
2396     }
2397     phi->add_req(value);
2398   }
2399 
2400   SafePointNode* exit_return = _exits.map();
2401   exit_return->in( TypeFunc::Control  )->add_req( control() );




 106                                      const Type* type,
 107                                      Node* local_addrs,
 108                                      Node* local_addrs_base) {
 109   BasicType bt = type->basic_type();
 110   if (type == TypePtr::NULL_PTR) {
 111     // Ptr types are mixed together with T_ADDRESS but NULL is
 112     // really for T_OBJECT types so correct it.
 113     bt = T_OBJECT;
 114   }
 115   Node *mem = memory(Compile::AliasIdxRaw);
 116   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 117   Node *ctl = control();
 118 
 119   // Very similar to LoadNode::make, except we handle un-aligned longs and
 120   // doubles on Sparc.  Intel can handle them just fine directly.
 121   Node *l = NULL;
 122   switch (bt) {                // Signature is flattened
 123   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 124   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 125   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 126   case T_VALUETYPE:
 127   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;







 128   case T_LONG:
 129   case T_DOUBLE: {
 130     // Since arguments are in reverse order, the argument address 'adr'
 131     // refers to the back half of the long/double.  Recompute adr.
 132     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 133     if (Matcher::misaligned_doubles_ok) {
 134       l = (bt == T_DOUBLE)
 135         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 136         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 137     } else {
 138       l = (bt == T_DOUBLE)
 139         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 140         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 141     }
 142     break;
 143   }
 144   default: ShouldNotReachHere();
 145   }
 146   return _gvn.transform(l);
 147 }
 148 
 149 // Helper routine to prevent the interpreter from handing
 150 // unexpected typestate to an OSR method.
 151 // The Node l is a value newly dug out of the interpreter frame.
 152 // The type is the type predicted by ciTypeFlow.  Note that it is
 153 // not a general type, but can only come from Type::get_typeflow_type.
 154 // The safepoint is a map which will feed an uncommon trap.
 155 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 156                                     SafePointNode* &bad_type_exit) {

 157   const TypeOopPtr* tp = type->isa_oopptr();
 158   if (type->isa_valuetype() != NULL) {
 159     // The interpreter passes value types as oops
 160     tp = TypeOopPtr::make_from_klass(type->isa_valuetype()->value_klass());
 161   }
 162 
 163   // TypeFlow may assert null-ness if a type appears unloaded.
 164   if (type == TypePtr::NULL_PTR ||
 165       (tp != NULL && !tp->klass()->is_loaded())) {
 166     // Value must be null, not a real oop.
 167     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 168     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 169     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 170     set_control(_gvn.transform( new IfTrueNode(iff) ));
 171     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 172     bad_type_exit->control()->add_req(bad_type);
 173     l = null();
 174   }
 175 
 176   // Typeflow can also cut off paths from the CFG, based on
 177   // types which appear unloaded, or call sites which appear unlinked.
 178   // When paths are cut off, values at later merge points can rise
 179   // toward more specific classes.  Make sure these specific classes
 180   // are still in effect.
 181   if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
 182     // TypeFlow asserted a specific object type.  Value must have that type.
 183     Node* bad_type_ctrl = NULL;
 184     if (tp->is_valuetypeptr()) {
 185       // Check value types for null here to prevent checkcast from adding an
 186       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
 187       l = null_check_oop(l, &bad_type_ctrl);
 188       bad_type_exit->control()->add_req(bad_type_ctrl);
 189     }
 190     l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
 191     bad_type_exit->control()->add_req(bad_type_ctrl);
 192   }
 193 
 194   BasicType bt_l = _gvn.type(l)->basic_type();
 195   BasicType bt_t = type->basic_type();
 196   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 197   return l;
 198 }
 199 
 200 // Helper routine which sets up elements of the initial parser map when
 201 // performing a parse for on stack replacement.  Add values into map.
 202 // The only parameter contains the address of a interpreter arguments.
 203 void Parse::load_interpreter_state(Node* osr_buf) {
 204   int index;
 205   int max_locals = jvms()->loc_size();
 206   int max_stack  = jvms()->stk_size();
 207 
 208   // Mismatch between method and jvms can occur since map briefly held
 209   // an OSR entry state (which takes up one RawPtr word).


 595   if (depth() == 1 && !failing()) {
 596     // Add check to deoptimize the nmethod if RTM state was changed
 597     rtm_deopt();
 598   }
 599 
 600   // Check for bailouts during method entry or RTM state check setup.
 601   if (failing()) {
 602     if (log)  log->done("parse");
 603     C->set_default_node_notes(caller_nn);
 604     return;
 605   }
 606 
 607   // Handle value type arguments
 608   int arg_size_sig = tf()->domain_sig()->cnt();
 609   for (uint i = 0; i < (uint)arg_size_sig; i++) {
 610     Node* parm = map()->in(i);
 611     const Type* t = _gvn.type(parm);
 612     if (!ValueTypePassFieldsAsArgs) {
 613       if (t->is_valuetypeptr()) {
 614         // Create ValueTypeNode from the oop and replace the parameter
 615         assert(!t->maybe_null(), "value type arguments should never be null");
 616         if (t->value_klass()->is_scalarizable()) {
 617           Node* vt = ValueTypeNode::make_from_oop(this, parm, t->value_klass(), /* buffer_check */ true);
 618           map()->replace_edge(parm, vt);
 619         }
 620       }
 621     } else {
 622       assert(false, "FIXME");
 623       // TODO move the code from build_start_state and do_late_inline here
 624     }
 625   }
 626 
 627   entry_map = map();  // capture any changes performed by method setup code
 628   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 629 
 630   // We begin parsing as if we have just encountered a jump to the
 631   // method entry.
 632   Block* entry_block = start_block();
 633   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 634   set_map_clone(entry_map);
 635   merge_common(entry_block, entry_block->next_path_num());
 636 
 637 #ifndef PRODUCT
 638   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 639   set_parse_histogram( parse_histogram_obj );
 640 #endif


 808   // Add a return value to the exit state.  (Do not push it yet.)
 809   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 810     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 811     if (ret_type->isa_int()) {
 812       BasicType ret_bt = method()->return_type()->basic_type();
 813       if (ret_bt == T_BOOLEAN ||
 814           ret_bt == T_CHAR ||
 815           ret_bt == T_BYTE ||
 816           ret_bt == T_SHORT) {
 817         ret_type = TypeInt::INT;
 818       }
 819     }
 820 
 821     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 822     // becomes loaded during the subsequent parsing, the loaded and unloaded
 823     // types will not join when we transform and push in do_exits().
 824     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 825     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
 826       ret_type = TypeOopPtr::BOTTOM;
 827     }
 828     if ((_caller->has_method() || tf()->returns_value_type_as_fields()) &&
 829         ret_type->is_valuetypeptr() && ret_type->value_klass()->is_scalarizable()) {
 830       // When inlining or with multiple return values: return value
 831       // type as ValueTypeNode not as oop
 832       ret_type = TypeValueType::make(ret_type->value_klass());
 833     }
 834     int         ret_size = type2size[ret_type->basic_type()];
 835     Node*       ret_phi  = new PhiNode(region, ret_type);
 836     gvn().set_type_bottom(ret_phi);
 837     _exits.ensure_stack(ret_size);
 838     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 839     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 840     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 841     // Note:  ret_phi is not yet pushed, until do_exits.
 842   }
 843 }
 844 
 845 //----------------------------build_start_state-------------------------------
 846 // Construct a state which contains only the incoming arguments from an
 847 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
 848 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 849   int        arg_size_sig = tf->domain_sig()->cnt();


1729   // Execution needs to restart a the next bytecode (entry of next
1730   // block)
1731   if (target->is_merged() ||
1732       pnum > PhiNode::Input ||
1733       target->is_handler() ||
1734       target->is_loop_head()) {
1735     set_parse_bci(target->start());
1736     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1737       Node* n = map()->in(j);                 // Incoming change to target state.
1738       const Type* t = NULL;
1739       if (tmp_jvms->is_loc(j)) {
1740         t = target->local_type_at(j - tmp_jvms->locoff());
1741       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1742         t = target->stack_type_at(j - tmp_jvms->stkoff());
1743       }
1744       if (t != NULL && t != Type::BOTTOM) {
1745         if (n->is_ValueType() && !t->isa_valuetype()) {
1746           // Allocate value type in src block to be able to merge it with oop in target block
1747           map()->set_req(j, ValueTypePtrNode::make_from_value_type(this, n->as_ValueType(), true));
1748         }
1749         if ((t->isa_valuetype() || t->is_valuetypeptr()) && !n->is_ValueType() && gvn().type(n)->maybe_null()) {

1750           assert(n->bottom_type()->remove_speculative() == TypePtr::NULL_PTR, "Anything other than null?");
1751           uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
1752           assert(stopped(), "should be a dead path now");
1753           set_parse_bci(old_bci);
1754           return;
1755         }
1756       }
1757     }
1758   }
1759   map()->set_jvms(old_jvms);
1760   set_parse_bci(old_bci);
1761 
1762   if (!target->is_merged()) {   // No prior mapping at this bci
1763     if (TraceOptoParse) { tty->print(" with empty state");  }
1764 
1765     // If this path is dead, do not bother capturing it as a merge.
1766     // It is "as if" we had 1 fewer predecessors from the beginning.
1767     if (stopped()) {
1768       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1769       return;


1877                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1878               // BoxLock nodes are not commoning.
1879               // Use old BoxLock node as merged box.
1880               assert(newin->jvms()->is_monitor_box(j), "sanity");
1881               // This assert also tests that nodes are BoxLock.
1882               assert(BoxLockNode::same_slot(n, m), "sanity");
1883               C->gvn_replace_by(n, m);
1884             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1885               phi = ensure_phi(j, nophi);
1886             }
1887           }
1888           break;
1889         }
1890       }
1891       // At this point, n might be top if:
1892       //  - there is no phi (because TypeFlow detected a conflict), or
1893       //  - the corresponding control edges is top (a dead incoming path)
1894       // It is a bug if we create a phi which sees a garbage value on a live path.
1895 
1896       // Merging two value types?
1897       if (phi != NULL && n->is_ValueType()) {
1898         // Reload current state because it may have been updated by ensure_phi
1899         m = map()->in(j);
1900         ValueTypeNode* vtm = m->as_ValueType(); // Current value type
1901         ValueTypeNode* vtn = n->as_ValueType(); // Incoming value type
1902         assert(vtm->get_oop() == phi, "Value type should have Phi input");
1903         if (TraceOptoParse) {
1904 #ifdef ASSERT
1905           tty->print_cr("\nMerging value types");
1906           tty->print_cr("Current:");
1907           vtm->dump(2);
1908           tty->print_cr("Incoming:");
1909           vtn->dump(2);
1910           tty->cr();
1911 #endif
1912         }
1913         // Do the merge
1914         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1915         if (last_merge) {
1916           map()->set_req(j, _gvn.transform_no_reclaim(vtm));
1917           record_for_igvn(vtm);


2319   // Set starting bci for uncommon trap.
2320   set_parse_bci(0);
2321 
2322   const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2323   Node* mc_adr = makecon(adr_type);
2324   Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2325   Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2326   Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2327   store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2328   Node *chk   = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2329   Node* tst   = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2330   { BuildCutout unless(this, tst, PROB_ALWAYS);
2331     uncommon_trap(Deoptimization::Reason_tenured,
2332                   Deoptimization::Action_make_not_entrant);
2333   }
2334 }
2335 
2336 //------------------------------return_current---------------------------------
2337 // Append current _map to _exit_return
2338 void Parse::return_current(Node* value) {


2339   if (tf()->returns_value_type_as_fields()) {
2340     assert(false, "Fix this with the calling convention changes");
2341     // Value type is returned as fields, make sure non-flattened value type fields are allocated
2342     // value = value->as_ValueType()->allocate_fields(this);




2343   }
2344 
2345   if (RegisterFinalizersAtInit &&
2346       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2347     call_register_finalizer();
2348   }
2349 
2350   // Do not set_parse_bci, so that return goo is credited to the return insn.
2351   // vreturn can trigger an allocation so vreturn can throw. Setting
2352   // the bci here breaks exception handling. Commenting this out
2353   // doesn't seem to break anything.
2354   //  set_bci(InvocationEntryBci);
2355   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2356     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2357   }
2358   if (C->env()->dtrace_method_probes()) {
2359     make_dtrace_method_exit(method());
2360   }
2361   // frame pointer is always same, already captured
2362   if (value != NULL) {
2363     Node* phi = _exits.argument(0);
2364     const TypeOopPtr* tr = phi->bottom_type()->isa_oopptr();
2365     if (value->is_ValueType() && (!_caller->has_method() || (tr && tr->is_valuetypeptr()))) {
2366       // Value type is returned as oop, make sure it's allocated
2367       value = value->as_ValueType()->allocate(this)->get_oop();
2368     } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) {
2369       // If returning oops to an interface-return, there is a silent free
2370       // cast from oop to interface allowed by the Verifier. Make it explicit here.
2371       const TypeInstPtr* tp = value->bottom_type()->isa_instptr();
2372       if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) {
2373         // sharpen the type eagerly; this eases certain assert checking
2374         if (tp->higher_equal(TypeInstPtr::NOTNULL)) {
2375           tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2376         }
2377         value = _gvn.transform(new CheckCastPPNode(0, value, tr));
2378       }
2379     } else if ((phi->bottom_type()->isa_valuetype() || phi->bottom_type()->is_valuetypeptr())
2380                && !value->is_ValueType() && gvn().type(value)->maybe_null()) {



2381       assert(value->bottom_type()->remove_speculative() == TypePtr::NULL_PTR, "Anything other than null?");
2382       inc_sp(1);
2383       uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
2384       dec_sp(1);
2385       return;
2386     } else {
2387       // Handle returns of oop-arrays to an arrays-of-interface return
2388       const TypeInstPtr* phi_tip;
2389       const TypeInstPtr* val_tip;
2390       Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip);
2391       if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
2392           val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
2393         value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type()));
2394       }
2395     }
2396     phi->add_req(value);
2397   }
2398 
2399   SafePointNode* exit_return = _exits.map();
2400   exit_return->in( TypeFunc::Control  )->add_req( control() );


< prev index next >