92 }
93 }
94 #endif
95
96 //------------------------------ON STACK REPLACEMENT---------------------------
97
98 // Construct a node which can be used to get incoming state for
99 // on stack replacement.
100 Node *Parse::fetch_interpreter_state(int index,
101 BasicType bt,
102 Node *local_addrs,
103 Node *local_addrs_base) {
104 Node *mem = memory(Compile::AliasIdxRaw);
105 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
106 Node *ctl = control();
107
108 // Very similar to LoadNode::make, except we handle un-aligned longs and
109 // doubles on Sparc. Intel can handle them just fine directly.
110 Node *l;
111 switch (bt) { // Signature is flattened
112 case T_INT: l = new (C) LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
113 case T_FLOAT: l = new (C) LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
114 case T_ADDRESS: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
115 case T_OBJECT: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
116 case T_LONG:
117 case T_DOUBLE: {
118 // Since arguments are in reverse order, the argument address 'adr'
119 // refers to the back half of the long/double. Recompute adr.
120 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
121 if (Matcher::misaligned_doubles_ok) {
122 l = (bt == T_DOUBLE)
123 ? (Node*)new (C) LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
124 : (Node*)new (C) LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
125 } else {
126 l = (bt == T_DOUBLE)
127 ? (Node*)new (C) LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
128 : (Node*)new (C) LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
129 }
130 break;
131 }
132 default: ShouldNotReachHere();
133 }
134 return _gvn.transform(l);
135 }
136
137 // Helper routine to prevent the interpreter from handing
138 // unexpected typestate to an OSR method.
139 // The Node l is a value newly dug out of the interpreter frame.
140 // The type is the type predicted by ciTypeFlow. Note that it is
141 // not a general type, but can only come from Type::get_typeflow_type.
142 // The safepoint is a map which will feed an uncommon trap.
143 Node* Parse::check_interpreter_type(Node* l, const Type* type,
144 SafePointNode* &bad_type_exit) {
145
146 const TypeOopPtr* tp = type->isa_oopptr();
147
148 // TypeFlow may assert null-ness if a type appears unloaded.
149 if (type == TypePtr::NULL_PTR ||
150 (tp != NULL && !tp->klass()->is_loaded())) {
151 // Value must be null, not a real oop.
152 Node* chk = _gvn.transform( new (C) CmpPNode(l, null()) );
153 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
154 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
155 set_control(_gvn.transform( new (C) IfTrueNode(iff) ));
156 Node* bad_type = _gvn.transform( new (C) IfFalseNode(iff) );
157 bad_type_exit->control()->add_req(bad_type);
158 l = null();
159 }
160
161 // Typeflow can also cut off paths from the CFG, based on
162 // types which appear unloaded, or call sites which appear unlinked.
163 // When paths are cut off, values at later merge points can rise
164 // toward more specific classes. Make sure these specific classes
165 // are still in effect.
166 if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
167 // TypeFlow asserted a specific object type. Value must have that type.
168 Node* bad_type_ctrl = NULL;
169 l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
170 bad_type_exit->control()->add_req(bad_type_ctrl);
171 }
172
173 BasicType bt_l = _gvn.type(l)->basic_type();
174 BasicType bt_t = type->basic_type();
175 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
176 return l;
203 set_sp(osr_block->start_sp());
204
205 // Check bailouts. We currently do not perform on stack replacement
206 // of loops in catch blocks or loops which branch with a non-empty stack.
207 if (sp() != 0) {
208 C->record_method_not_compilable("OSR starts with non-empty stack");
209 return;
210 }
211 // Do not OSR inside finally clauses:
212 if (osr_block->has_trap_at(osr_block->start())) {
213 C->record_method_not_compilable("OSR starts with an immediate trap");
214 return;
215 }
216
217 // Commute monitors from interpreter frame to compiler frame.
218 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
219 int mcnt = osr_block->flow()->monitor_count();
220 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
221 for (index = 0; index < mcnt; index++) {
222 // Make a BoxLockNode for the monitor.
223 Node *box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
224
225
226 // Displaced headers and locked objects are interleaved in the
227 // temp OSR buffer. We only copy the locked objects out here.
228 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
229 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
230 // Try and copy the displaced header to the BoxNode
231 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
232
233
234 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
235
236 // Build a bogus FastLockNode (no code will be generated) and push the
237 // monitor into our debug info.
238 const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock();
239 map()->push_monitor(flock);
240
241 // If the lock is our method synchronization lock, tuck it away in
242 // _sync_lock for return and rethrow exit paths.
243 if (index == 0 && method()->is_synchronized()) {
244 _synch_lock = flock;
245 }
246 }
247
248 // Use the raw liveness computation to make sure that unexpected
249 // values don't propagate into the OSR frame.
250 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
251 if (!live_locals.is_valid()) {
252 // Degenerate or breakpointed method.
253 C->record_method_not_compilable("OSR in empty or breakpointed method");
254 return;
255 }
256
257 // Extract the needed locals from the interpreter frame.
258 Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
308
309 // Extract the needed stack entries from the interpreter frame.
310 for (index = 0; index < sp(); index++) {
311 const Type *type = osr_block->stack_type_at(index);
312 if (type != Type::TOP) {
313 // Currently the compiler bails out when attempting to on stack replace
314 // at a bci with a non-empty stack. We should not reach here.
315 ShouldNotReachHere();
316 }
317 }
318
319 // End the OSR migration
320 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
321 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
322 "OSR_migration_end", TypeRawPtr::BOTTOM,
323 osr_buf);
324
325 // Now that the interpreter state is loaded, make sure it will match
326 // at execution time what the compiler is expecting now:
327 SafePointNode* bad_type_exit = clone_map();
328 bad_type_exit->set_control(new (C) RegionNode(1));
329
330 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
331 for (index = 0; index < max_locals; index++) {
332 if (stopped()) break;
333 Node* l = local(index);
334 if (l->is_top()) continue; // nothing here
335 const Type *type = osr_block->local_type_at(index);
336 if (type->isa_oopptr() != NULL) {
337 if (!live_oops.at(index)) {
338 // skip type check for dead oops
339 continue;
340 }
341 }
342 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
343 // In our current system it's illegal for jsr addresses to be
344 // live into an OSR entry point because the compiler performs
345 // inlining of jsrs. ciTypeFlow has a bailout that detect this
346 // case and aborts the compile if addresses are live into an OSR
347 // entry point. Because of that we can assume that any address
348 // locals at the OSR entry point are dead. Method liveness
644 }
645
646 blocks_parsed++;
647
648 progress = true;
649 if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
650 // Not all preds have been parsed. We must build phis everywhere.
651 // (Note that dead locals do not get phis built, ever.)
652 ensure_phis_everywhere();
653
654 if (block->is_SEL_head() &&
655 (UseLoopPredicate || LoopLimitCheck)) {
656 // Add predicate to single entry (not irreducible) loop head.
657 assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
658 // Need correct bci for predicate.
659 // It is fine to set it here since do_one_block() will set it anyway.
660 set_parse_bci(block->start());
661 add_predicate();
662 // Add new region for back branches.
663 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
664 RegionNode *r = new (C) RegionNode(edges+1);
665 _gvn.set_type(r, Type::CONTROL);
666 record_for_igvn(r);
667 r->init_req(edges, control());
668 set_control(r);
669 // Add new phis.
670 ensure_phis_everywhere();
671 }
672
673 // Leave behind an undisturbed copy of the map, for future merges.
674 set_map(clone_map());
675 }
676
677 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
678 // In the absence of irreducible loops, the Region and Phis
679 // associated with a merge that doesn't involve a backedge can
680 // be simplified now since the RPO parsing order guarantees
681 // that any path which was supposed to reach here has already
682 // been parsed or must be dead.
683 Node* c = control();
684 Node* result = _gvn.transform_no_reclaim(control());
711 for (int rpo = 0; rpo < block_count(); rpo++) {
712 Block* block = rpo_at(rpo);
713 if (!block->is_parsed()) {
714 if (TraceOptoParse) {
715 tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
716 }
717 assert(!block->is_merged(), "no half-processed blocks");
718 }
719 }
720 #endif
721 }
722
723 //-------------------------------build_exits----------------------------------
724 // Build normal and exceptional exit merge points.
725 void Parse::build_exits() {
726 // make a clone of caller to prevent sharing of side-effects
727 _exits.set_map(_exits.clone_map());
728 _exits.clean_stack(_exits.sp());
729 _exits.sync_jvms();
730
731 RegionNode* region = new (C) RegionNode(1);
732 record_for_igvn(region);
733 gvn().set_type_bottom(region);
734 _exits.set_control(region);
735
736 // Note: iophi and memphi are not transformed until do_exits.
737 Node* iophi = new (C) PhiNode(region, Type::ABIO);
738 Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
739 gvn().set_type_bottom(iophi);
740 gvn().set_type_bottom(memphi);
741 _exits.set_i_o(iophi);
742 _exits.set_all_memory(memphi);
743
744 // Add a return value to the exit state. (Do not push it yet.)
745 if (tf()->range()->cnt() > TypeFunc::Parms) {
746 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
747 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
748 // becomes loaded during the subsequent parsing, the loaded and unloaded
749 // types will not join when we transform and push in do_exits().
750 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
751 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
752 ret_type = TypeOopPtr::BOTTOM;
753 }
754 int ret_size = type2size[ret_type->basic_type()];
755 Node* ret_phi = new (C) PhiNode(region, ret_type);
756 gvn().set_type_bottom(ret_phi);
757 _exits.ensure_stack(ret_size);
758 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
759 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
760 _exits.set_argument(0, ret_phi); // here is where the parser finds it
761 // Note: ret_phi is not yet pushed, until do_exits.
762 }
763 }
764
765
766 //----------------------------build_start_state-------------------------------
767 // Construct a state which contains only the incoming arguments from an
768 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
769 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
770 int arg_size = tf->domain()->cnt();
771 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
772 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
773 SafePointNode* map = new (this) SafePointNode(max_size, NULL);
774 record_for_igvn(map);
775 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
776 Node_Notes* old_nn = default_node_notes();
777 if (old_nn != NULL && has_method()) {
778 Node_Notes* entry_nn = old_nn->clone(this);
779 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
780 entry_jvms->set_offsets(0);
781 entry_jvms->set_bci(entry_bci());
782 entry_nn->set_jvms(entry_jvms);
783 set_default_node_notes(entry_nn);
784 }
785 uint i;
786 for (i = 0; i < (uint)arg_size; i++) {
787 Node* parm = initial_gvn()->transform(new (this) ParmNode(start, i));
788 map->init_req(i, parm);
789 // Record all these guys for later GVN.
790 record_for_igvn(parm);
791 }
792 for (; i < map->req(); i++) {
793 map->init_req(i, top());
794 }
795 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
796 set_default_node_notes(old_nn);
797 map->set_jvms(jvms);
798 jvms->set_map(map);
799 return jvms;
800 }
801
802 //-----------------------------make_node_notes---------------------------------
803 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
804 if (caller_nn == NULL) return NULL;
805 Node_Notes* nn = caller_nn->clone(C);
806 JVMState* caller_jvms = nn->jvms();
807 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
808 jvms->set_offsets(0);
809 jvms->set_bci(_entry_bci);
810 nn->set_jvms(jvms);
811 return nn;
812 }
813
814
815 //--------------------------return_values--------------------------------------
816 void Compile::return_values(JVMState* jvms) {
817 GraphKit kit(jvms);
818 Node* ret = new (this) ReturnNode(TypeFunc::Parms,
819 kit.control(),
820 kit.i_o(),
821 kit.reset_memory(),
822 kit.frameptr(),
823 kit.returnadr());
824 // Add zero or 1 return values
825 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
826 if (ret_size > 0) {
827 kit.inc_sp(-ret_size); // pop the return value(s)
828 kit.sync_jvms();
829 ret->add_req(kit.argument(0));
830 // Note: The second dummy edge is not needed by a ReturnNode.
831 }
832 // bind it to root
833 root()->add_req(ret);
834 record_for_igvn(ret);
835 initial_gvn()->transform_no_reclaim(ret);
836 }
837
838 //------------------------rethrow_exceptions-----------------------------------
839 // Bind all exception states in the list into a single RethrowNode.
840 void Compile::rethrow_exceptions(JVMState* jvms) {
841 GraphKit kit(jvms);
842 if (!kit.has_exceptions()) return; // nothing to generate
843 // Load my combined exception state into the kit, with all phis transformed:
844 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
845 Node* ex_oop = kit.use_exception_state(ex_map);
846 RethrowNode* exit = new (this) RethrowNode(kit.control(),
847 kit.i_o(), kit.reset_memory(),
848 kit.frameptr(), kit.returnadr(),
849 // like a return but with exception input
850 ex_oop);
851 // bind to root
852 root()->add_req(exit);
853 record_for_igvn(exit);
854 initial_gvn()->transform_no_reclaim(exit);
855 }
856
857 //---------------------------do_exceptions-------------------------------------
858 // Process exceptions arising from the current bytecode.
859 // Send caught exceptions to the proper handler within this method.
860 // Unhandled exceptions feed into _exit.
861 void Parse::do_exceptions() {
862 if (!has_exceptions()) return;
863
864 if (failing()) {
865 // Pop them all off and throw them away.
866 while (pop_exception_state() != NULL) ;
1048 C->record_method_not_compilable_all_tiers("too many local variables");
1049 return NULL;
1050 }
1051
1052 // If this is an inlined method, we may have to do a receiver null check.
1053 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1054 GraphKit kit(_caller);
1055 kit.null_check_receiver_before_call(method());
1056 _caller = kit.transfer_exceptions_into_jvms();
1057 if (kit.stopped()) {
1058 _exits.add_exception_states_from(_caller);
1059 _exits.set_jvms(_caller);
1060 return NULL;
1061 }
1062 }
1063
1064 assert(method() != NULL, "parser must have a method");
1065
1066 // Create an initial safepoint to hold JVM state during parsing
1067 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1068 set_map(new (C) SafePointNode(len, jvms));
1069 jvms->set_map(map());
1070 record_for_igvn(map());
1071 assert(jvms->endoff() == len, "correct jvms sizing");
1072
1073 SafePointNode* inmap = _caller->map();
1074 assert(inmap != NULL, "must have inmap");
1075
1076 uint i;
1077
1078 // Pass thru the predefined input parameters.
1079 for (i = 0; i < TypeFunc::Parms; i++) {
1080 map()->init_req(i, inmap->in(i));
1081 }
1082
1083 if (depth() == 1) {
1084 assert(map()->memory()->Opcode() == Op_Parm, "");
1085 // Insert the memory aliasing node
1086 set_all_memory(reset_memory());
1087 }
1088 assert(merged_memory(), "");
1561 // which must not be allowed into this block's map.)
1562 if (pnum > PhiNode::Input // Known multiple inputs.
1563 || target->is_handler() // These have unpredictable inputs.
1564 || target->is_loop_head() // Known multiple inputs
1565 || control()->is_Region()) { // We must hide this guy.
1566
1567 int current_bci = bci();
1568 set_parse_bci(target->start()); // Set target bci
1569 if (target->is_SEL_head()) {
1570 DEBUG_ONLY( target->mark_merged_backedge(block()); )
1571 if (target->start() == 0) {
1572 // Add loop predicate for the special case when
1573 // there are backbranches to the method entry.
1574 add_predicate();
1575 }
1576 }
1577 // Add a Region to start the new basic block. Phis will be added
1578 // later lazily.
1579 int edges = target->pred_count();
1580 if (edges < pnum) edges = pnum; // might be a new path!
1581 RegionNode *r = new (C) RegionNode(edges+1);
1582 gvn().set_type(r, Type::CONTROL);
1583 record_for_igvn(r);
1584 // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1585 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1586 r->init_req(pnum, control());
1587 set_control(r);
1588 set_parse_bci(current_bci); // Restore bci
1589 }
1590
1591 // Convert the existing Parser mapping into a mapping at this bci.
1592 store_state_to(target);
1593 assert(target->is_merged(), "do not come here twice");
1594
1595 } else { // Prior mapping at this bci
1596 if (TraceOptoParse) { tty->print(" with previous state"); }
1597 #ifdef ASSERT
1598 if (target->is_SEL_head()) {
1599 target->mark_merged_backedge(block());
1600 }
1601 #endif
1956 const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
1957 if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) {
1958 // The type isn't known exactly so see if CHA tells us anything.
1959 ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
1960 if (!Dependencies::has_finalizable_subclass(ik)) {
1961 // No finalizable subclasses so skip the dynamic check.
1962 C->dependencies()->assert_has_no_finalizable_subclasses(ik);
1963 return;
1964 }
1965 }
1966
1967 // Insert a dynamic test for whether the instance needs
1968 // finalization. In general this will fold up since the concrete
1969 // class is often visible so the access flags are constant.
1970 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
1971 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
1972
1973 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
1974 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
1975
1976 Node* mask = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
1977 Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0)));
1978 Node* test = _gvn.transform(new (C) BoolNode(check, BoolTest::ne));
1979
1980 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
1981
1982 RegionNode* result_rgn = new (C) RegionNode(3);
1983 record_for_igvn(result_rgn);
1984
1985 Node *skip_register = _gvn.transform(new (C) IfFalseNode(iff));
1986 result_rgn->init_req(1, skip_register);
1987
1988 Node *needs_register = _gvn.transform(new (C) IfTrueNode(iff));
1989 set_control(needs_register);
1990 if (stopped()) {
1991 // There is no slow path.
1992 result_rgn->init_req(2, top());
1993 } else {
1994 Node *call = make_runtime_call(RC_NO_LEAF,
1995 OptoRuntime::register_finalizer_Type(),
1996 OptoRuntime::register_finalizer_Java(),
1997 NULL, TypePtr::BOTTOM,
1998 receiver);
1999 make_slow_call_ex(call, env()->Throwable_klass(), true);
2000
2001 Node* fast_io = call->in(TypeFunc::I_O);
2002 Node* fast_mem = call->in(TypeFunc::Memory);
2003 // These two phis are pre-filled with copies of of the fast IO and Memory
2004 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO);
2005 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2006
2007 result_rgn->init_req(2, control());
2008 io_phi ->init_req(2, i_o());
2022 assert(C->method() != NULL, "only for normal compilations");
2023 assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
2024 assert(depth() == 1, "generate check only for main compiled method");
2025
2026 // Set starting bci for uncommon trap.
2027 set_parse_bci(is_osr_parse() ? osr_bci() : 0);
2028
2029 // Load the rtm_state from the MethodData.
2030 const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data());
2031 Node* mdo = makecon(adr_type);
2032 int offset = MethodData::rtm_state_offset_in_bytes();
2033 Node* adr_node = basic_plus_adr(mdo, mdo, offset);
2034 Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2035
2036 // Separate Load from Cmp by Opaque.
2037 // In expand_macro_nodes() it will be replaced either
2038 // with this load when there are locks in the code
2039 // or with ProfileRTM (cmp->in(2)) otherwise so that
2040 // the check will fold.
2041 Node* profile_state = makecon(TypeInt::make(ProfileRTM));
2042 Node* opq = _gvn.transform( new (C) Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) );
2043 Node* chk = _gvn.transform( new (C) CmpINode(opq, profile_state) );
2044 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
2045 // Branch to failure if state was changed
2046 { BuildCutout unless(this, tst, PROB_ALWAYS);
2047 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2048 Deoptimization::Action_make_not_entrant);
2049 }
2050 }
2051 #endif
2052 }
2053
2054 void Parse::decrement_age() {
2055 MethodCounters* mc = method()->ensure_method_counters();
2056 if (mc == NULL) {
2057 C->record_failure("Must have MCs");
2058 return;
2059 }
2060 assert(!is_osr_parse(), "Not doing this for OSRs");
2061
2062 // Set starting bci for uncommon trap.
2063 set_parse_bci(0);
2064
2065 const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2066 Node* mc_adr = makecon(adr_type);
2067 Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2068 Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2069 Node* decr = _gvn.transform(new (C) SubINode(cnt, makecon(TypeInt::ONE)));
2070 store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2071 Node *chk = _gvn.transform(new (C) CmpINode(decr, makecon(TypeInt::ZERO)));
2072 Node* tst = _gvn.transform(new (C) BoolNode(chk, BoolTest::gt));
2073 { BuildCutout unless(this, tst, PROB_ALWAYS);
2074 uncommon_trap(Deoptimization::Reason_tenured,
2075 Deoptimization::Action_make_not_entrant);
2076 }
2077 }
2078
2079 //------------------------------return_current---------------------------------
2080 // Append current _map to _exit_return
2081 void Parse::return_current(Node* value) {
2082 if (RegisterFinalizersAtInit &&
2083 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2084 call_register_finalizer();
2085 }
2086
2087 // Do not set_parse_bci, so that return goo is credited to the return insn.
2088 set_bci(InvocationEntryBci);
2089 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2090 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2091 }
2092 if (C->env()->dtrace_method_probes()) {
2107 mms.set_memory(phi);
2108 }
2109 mms.memory()->add_req(mms.memory2());
2110 }
2111
2112 // frame pointer is always same, already captured
2113 if (value != NULL) {
2114 // If returning oops to an interface-return, there is a silent free
2115 // cast from oop to interface allowed by the Verifier. Make it explicit
2116 // here.
2117 Node* phi = _exits.argument(0);
2118 const TypeInstPtr *tr = phi->bottom_type()->isa_instptr();
2119 if( tr && tr->klass()->is_loaded() &&
2120 tr->klass()->is_interface() ) {
2121 const TypeInstPtr *tp = value->bottom_type()->isa_instptr();
2122 if (tp && tp->klass()->is_loaded() &&
2123 !tp->klass()->is_interface()) {
2124 // sharpen the type eagerly; this eases certain assert checking
2125 if (tp->higher_equal(TypeInstPtr::NOTNULL))
2126 tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2127 value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr));
2128 }
2129 }
2130 phi->add_req(value);
2131 }
2132
2133 stop_and_kill_map(); // This CFG path dies here
2134 }
2135
2136
2137 //------------------------------add_safepoint----------------------------------
2138 void Parse::add_safepoint() {
2139 // See if we can avoid this safepoint. No need for a SafePoint immediately
2140 // after a Call (except Leaf Call) or another SafePoint.
2141 Node *proj = control();
2142 bool add_poll_param = SafePointNode::needs_polling_address_input();
2143 uint parms = add_poll_param ? TypeFunc::Parms+1 : TypeFunc::Parms;
2144 if( proj->is_Proj() ) {
2145 Node *n0 = proj->in(0);
2146 if( n0->is_Catch() ) {
2147 n0 = n0->in(0)->in(0);
2148 assert( n0->is_Call(), "expect a call here" );
2149 }
2150 if( n0->is_Call() ) {
2151 if( n0->as_Call()->guaranteed_safepoint() )
2152 return;
2153 } else if( n0->is_SafePoint() && n0->req() >= parms ) {
2154 return;
2155 }
2156 }
2157
2158 // Clear out dead values from the debug info.
2159 kill_dead_locals();
2160
2161 // Clone the JVM State
2162 SafePointNode *sfpnt = new (C) SafePointNode(parms, NULL);
2163
2164 // Capture memory state BEFORE a SafePoint. Since we can block at a
2165 // SafePoint we need our GC state to be safe; i.e. we need all our current
2166 // write barriers (card marks) to not float down after the SafePoint so we
2167 // must read raw memory. Likewise we need all oop stores to match the card
2168 // marks. If deopt can happen, we need ALL stores (we need the correct JVM
2169 // state on a deopt).
2170
2171 // We do not need to WRITE the memory state after a SafePoint. The control
2172 // edge will keep card-marks and oop-stores from floating up from below a
2173 // SafePoint and our true dependency added here will keep them from floating
2174 // down below a SafePoint.
2175
2176 // Clone the current memory state
2177 Node* mem = MergeMemNode::make(C, map()->memory());
2178
2179 mem = _gvn.transform(mem);
2180
2181 // Pass control through the safepoint
2182 sfpnt->init_req(TypeFunc::Control , control());
|
92 }
93 }
94 #endif
95
96 //------------------------------ON STACK REPLACEMENT---------------------------
97
98 // Construct a node which can be used to get incoming state for
99 // on stack replacement.
100 Node *Parse::fetch_interpreter_state(int index,
101 BasicType bt,
102 Node *local_addrs,
103 Node *local_addrs_base) {
104 Node *mem = memory(Compile::AliasIdxRaw);
105 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
106 Node *ctl = control();
107
108 // Very similar to LoadNode::make, except we handle un-aligned longs and
109 // doubles on Sparc. Intel can handle them just fine directly.
110 Node *l;
111 switch (bt) { // Signature is flattened
112 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
113 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
114 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
115 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
116 case T_LONG:
117 case T_DOUBLE: {
118 // Since arguments are in reverse order, the argument address 'adr'
119 // refers to the back half of the long/double. Recompute adr.
120 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
121 if (Matcher::misaligned_doubles_ok) {
122 l = (bt == T_DOUBLE)
123 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
124 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
125 } else {
126 l = (bt == T_DOUBLE)
127 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
128 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
129 }
130 break;
131 }
132 default: ShouldNotReachHere();
133 }
134 return _gvn.transform(l);
135 }
136
137 // Helper routine to prevent the interpreter from handing
138 // unexpected typestate to an OSR method.
139 // The Node l is a value newly dug out of the interpreter frame.
140 // The type is the type predicted by ciTypeFlow. Note that it is
141 // not a general type, but can only come from Type::get_typeflow_type.
142 // The safepoint is a map which will feed an uncommon trap.
143 Node* Parse::check_interpreter_type(Node* l, const Type* type,
144 SafePointNode* &bad_type_exit) {
145
146 const TypeOopPtr* tp = type->isa_oopptr();
147
148 // TypeFlow may assert null-ness if a type appears unloaded.
149 if (type == TypePtr::NULL_PTR ||
150 (tp != NULL && !tp->klass()->is_loaded())) {
151 // Value must be null, not a real oop.
152 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
153 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
154 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
155 set_control(_gvn.transform( new IfTrueNode(iff) ));
156 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
157 bad_type_exit->control()->add_req(bad_type);
158 l = null();
159 }
160
161 // Typeflow can also cut off paths from the CFG, based on
162 // types which appear unloaded, or call sites which appear unlinked.
163 // When paths are cut off, values at later merge points can rise
164 // toward more specific classes. Make sure these specific classes
165 // are still in effect.
166 if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
167 // TypeFlow asserted a specific object type. Value must have that type.
168 Node* bad_type_ctrl = NULL;
169 l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
170 bad_type_exit->control()->add_req(bad_type_ctrl);
171 }
172
173 BasicType bt_l = _gvn.type(l)->basic_type();
174 BasicType bt_t = type->basic_type();
175 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
176 return l;
203 set_sp(osr_block->start_sp());
204
205 // Check bailouts. We currently do not perform on stack replacement
206 // of loops in catch blocks or loops which branch with a non-empty stack.
207 if (sp() != 0) {
208 C->record_method_not_compilable("OSR starts with non-empty stack");
209 return;
210 }
211 // Do not OSR inside finally clauses:
212 if (osr_block->has_trap_at(osr_block->start())) {
213 C->record_method_not_compilable("OSR starts with an immediate trap");
214 return;
215 }
216
217 // Commute monitors from interpreter frame to compiler frame.
218 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
219 int mcnt = osr_block->flow()->monitor_count();
220 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
221 for (index = 0; index < mcnt; index++) {
222 // Make a BoxLockNode for the monitor.
223 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
224
225
226 // Displaced headers and locked objects are interleaved in the
227 // temp OSR buffer. We only copy the locked objects out here.
228 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
229 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
230 // Try and copy the displaced header to the BoxNode
231 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
232
233
234 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
235
236 // Build a bogus FastLockNode (no code will be generated) and push the
237 // monitor into our debug info.
238 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
239 map()->push_monitor(flock);
240
241 // If the lock is our method synchronization lock, tuck it away in
242 // _sync_lock for return and rethrow exit paths.
243 if (index == 0 && method()->is_synchronized()) {
244 _synch_lock = flock;
245 }
246 }
247
248 // Use the raw liveness computation to make sure that unexpected
249 // values don't propagate into the OSR frame.
250 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
251 if (!live_locals.is_valid()) {
252 // Degenerate or breakpointed method.
253 C->record_method_not_compilable("OSR in empty or breakpointed method");
254 return;
255 }
256
257 // Extract the needed locals from the interpreter frame.
258 Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
308
309 // Extract the needed stack entries from the interpreter frame.
310 for (index = 0; index < sp(); index++) {
311 const Type *type = osr_block->stack_type_at(index);
312 if (type != Type::TOP) {
313 // Currently the compiler bails out when attempting to on stack replace
314 // at a bci with a non-empty stack. We should not reach here.
315 ShouldNotReachHere();
316 }
317 }
318
319 // End the OSR migration
320 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
321 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
322 "OSR_migration_end", TypeRawPtr::BOTTOM,
323 osr_buf);
324
325 // Now that the interpreter state is loaded, make sure it will match
326 // at execution time what the compiler is expecting now:
327 SafePointNode* bad_type_exit = clone_map();
328 bad_type_exit->set_control(new RegionNode(1));
329
330 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
331 for (index = 0; index < max_locals; index++) {
332 if (stopped()) break;
333 Node* l = local(index);
334 if (l->is_top()) continue; // nothing here
335 const Type *type = osr_block->local_type_at(index);
336 if (type->isa_oopptr() != NULL) {
337 if (!live_oops.at(index)) {
338 // skip type check for dead oops
339 continue;
340 }
341 }
342 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
343 // In our current system it's illegal for jsr addresses to be
344 // live into an OSR entry point because the compiler performs
345 // inlining of jsrs. ciTypeFlow has a bailout that detect this
346 // case and aborts the compile if addresses are live into an OSR
347 // entry point. Because of that we can assume that any address
348 // locals at the OSR entry point are dead. Method liveness
644 }
645
646 blocks_parsed++;
647
648 progress = true;
649 if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
650 // Not all preds have been parsed. We must build phis everywhere.
651 // (Note that dead locals do not get phis built, ever.)
652 ensure_phis_everywhere();
653
654 if (block->is_SEL_head() &&
655 (UseLoopPredicate || LoopLimitCheck)) {
656 // Add predicate to single entry (not irreducible) loop head.
657 assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
658 // Need correct bci for predicate.
659 // It is fine to set it here since do_one_block() will set it anyway.
660 set_parse_bci(block->start());
661 add_predicate();
662 // Add new region for back branches.
663 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
664 RegionNode *r = new RegionNode(edges+1);
665 _gvn.set_type(r, Type::CONTROL);
666 record_for_igvn(r);
667 r->init_req(edges, control());
668 set_control(r);
669 // Add new phis.
670 ensure_phis_everywhere();
671 }
672
673 // Leave behind an undisturbed copy of the map, for future merges.
674 set_map(clone_map());
675 }
676
677 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
678 // In the absence of irreducible loops, the Region and Phis
679 // associated with a merge that doesn't involve a backedge can
680 // be simplified now since the RPO parsing order guarantees
681 // that any path which was supposed to reach here has already
682 // been parsed or must be dead.
683 Node* c = control();
684 Node* result = _gvn.transform_no_reclaim(control());
711 for (int rpo = 0; rpo < block_count(); rpo++) {
712 Block* block = rpo_at(rpo);
713 if (!block->is_parsed()) {
714 if (TraceOptoParse) {
715 tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
716 }
717 assert(!block->is_merged(), "no half-processed blocks");
718 }
719 }
720 #endif
721 }
722
723 //-------------------------------build_exits----------------------------------
724 // Build normal and exceptional exit merge points.
725 void Parse::build_exits() {
726 // make a clone of caller to prevent sharing of side-effects
727 _exits.set_map(_exits.clone_map());
728 _exits.clean_stack(_exits.sp());
729 _exits.sync_jvms();
730
731 RegionNode* region = new RegionNode(1);
732 record_for_igvn(region);
733 gvn().set_type_bottom(region);
734 _exits.set_control(region);
735
736 // Note: iophi and memphi are not transformed until do_exits.
737 Node* iophi = new PhiNode(region, Type::ABIO);
738 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
739 gvn().set_type_bottom(iophi);
740 gvn().set_type_bottom(memphi);
741 _exits.set_i_o(iophi);
742 _exits.set_all_memory(memphi);
743
744 // Add a return value to the exit state. (Do not push it yet.)
745 if (tf()->range()->cnt() > TypeFunc::Parms) {
746 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
747 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
748 // becomes loaded during the subsequent parsing, the loaded and unloaded
749 // types will not join when we transform and push in do_exits().
750 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
751 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
752 ret_type = TypeOopPtr::BOTTOM;
753 }
754 int ret_size = type2size[ret_type->basic_type()];
755 Node* ret_phi = new PhiNode(region, ret_type);
756 gvn().set_type_bottom(ret_phi);
757 _exits.ensure_stack(ret_size);
758 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
759 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
760 _exits.set_argument(0, ret_phi); // here is where the parser finds it
761 // Note: ret_phi is not yet pushed, until do_exits.
762 }
763 }
764
765
766 //----------------------------build_start_state-------------------------------
767 // Construct a state which contains only the incoming arguments from an
768 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
769 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
770 int arg_size = tf->domain()->cnt();
771 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
772 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
773 SafePointNode* map = new SafePointNode(max_size, NULL);
774 record_for_igvn(map);
775 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
776 Node_Notes* old_nn = default_node_notes();
777 if (old_nn != NULL && has_method()) {
778 Node_Notes* entry_nn = old_nn->clone(this);
779 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
780 entry_jvms->set_offsets(0);
781 entry_jvms->set_bci(entry_bci());
782 entry_nn->set_jvms(entry_jvms);
783 set_default_node_notes(entry_nn);
784 }
785 uint i;
786 for (i = 0; i < (uint)arg_size; i++) {
787 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
788 map->init_req(i, parm);
789 // Record all these guys for later GVN.
790 record_for_igvn(parm);
791 }
792 for (; i < map->req(); i++) {
793 map->init_req(i, top());
794 }
795 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
796 set_default_node_notes(old_nn);
797 map->set_jvms(jvms);
798 jvms->set_map(map);
799 return jvms;
800 }
801
802 //-----------------------------make_node_notes---------------------------------
803 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
804 if (caller_nn == NULL) return NULL;
805 Node_Notes* nn = caller_nn->clone(C);
806 JVMState* caller_jvms = nn->jvms();
807 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
808 jvms->set_offsets(0);
809 jvms->set_bci(_entry_bci);
810 nn->set_jvms(jvms);
811 return nn;
812 }
813
814
815 //--------------------------return_values--------------------------------------
816 void Compile::return_values(JVMState* jvms) {
817 GraphKit kit(jvms);
818 Node* ret = new ReturnNode(TypeFunc::Parms,
819 kit.control(),
820 kit.i_o(),
821 kit.reset_memory(),
822 kit.frameptr(),
823 kit.returnadr());
824 // Add zero or 1 return values
825 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
826 if (ret_size > 0) {
827 kit.inc_sp(-ret_size); // pop the return value(s)
828 kit.sync_jvms();
829 ret->add_req(kit.argument(0));
830 // Note: The second dummy edge is not needed by a ReturnNode.
831 }
832 // bind it to root
833 root()->add_req(ret);
834 record_for_igvn(ret);
835 initial_gvn()->transform_no_reclaim(ret);
836 }
837
838 //------------------------rethrow_exceptions-----------------------------------
839 // Bind all exception states in the list into a single RethrowNode.
840 void Compile::rethrow_exceptions(JVMState* jvms) {
841 GraphKit kit(jvms);
842 if (!kit.has_exceptions()) return; // nothing to generate
843 // Load my combined exception state into the kit, with all phis transformed:
844 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
845 Node* ex_oop = kit.use_exception_state(ex_map);
846 RethrowNode* exit = new RethrowNode(kit.control(),
847 kit.i_o(), kit.reset_memory(),
848 kit.frameptr(), kit.returnadr(),
849 // like a return but with exception input
850 ex_oop);
851 // bind to root
852 root()->add_req(exit);
853 record_for_igvn(exit);
854 initial_gvn()->transform_no_reclaim(exit);
855 }
856
857 //---------------------------do_exceptions-------------------------------------
858 // Process exceptions arising from the current bytecode.
859 // Send caught exceptions to the proper handler within this method.
860 // Unhandled exceptions feed into _exit.
861 void Parse::do_exceptions() {
862 if (!has_exceptions()) return;
863
864 if (failing()) {
865 // Pop them all off and throw them away.
866 while (pop_exception_state() != NULL) ;
1048 C->record_method_not_compilable_all_tiers("too many local variables");
1049 return NULL;
1050 }
1051
1052 // If this is an inlined method, we may have to do a receiver null check.
1053 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1054 GraphKit kit(_caller);
1055 kit.null_check_receiver_before_call(method());
1056 _caller = kit.transfer_exceptions_into_jvms();
1057 if (kit.stopped()) {
1058 _exits.add_exception_states_from(_caller);
1059 _exits.set_jvms(_caller);
1060 return NULL;
1061 }
1062 }
1063
1064 assert(method() != NULL, "parser must have a method");
1065
1066 // Create an initial safepoint to hold JVM state during parsing
1067 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1068 set_map(new SafePointNode(len, jvms));
1069 jvms->set_map(map());
1070 record_for_igvn(map());
1071 assert(jvms->endoff() == len, "correct jvms sizing");
1072
1073 SafePointNode* inmap = _caller->map();
1074 assert(inmap != NULL, "must have inmap");
1075
1076 uint i;
1077
1078 // Pass thru the predefined input parameters.
1079 for (i = 0; i < TypeFunc::Parms; i++) {
1080 map()->init_req(i, inmap->in(i));
1081 }
1082
1083 if (depth() == 1) {
1084 assert(map()->memory()->Opcode() == Op_Parm, "");
1085 // Insert the memory aliasing node
1086 set_all_memory(reset_memory());
1087 }
1088 assert(merged_memory(), "");
1561 // which must not be allowed into this block's map.)
1562 if (pnum > PhiNode::Input // Known multiple inputs.
1563 || target->is_handler() // These have unpredictable inputs.
1564 || target->is_loop_head() // Known multiple inputs
1565 || control()->is_Region()) { // We must hide this guy.
1566
1567 int current_bci = bci();
1568 set_parse_bci(target->start()); // Set target bci
1569 if (target->is_SEL_head()) {
1570 DEBUG_ONLY( target->mark_merged_backedge(block()); )
1571 if (target->start() == 0) {
1572 // Add loop predicate for the special case when
1573 // there are backbranches to the method entry.
1574 add_predicate();
1575 }
1576 }
1577 // Add a Region to start the new basic block. Phis will be added
1578 // later lazily.
1579 int edges = target->pred_count();
1580 if (edges < pnum) edges = pnum; // might be a new path!
1581 RegionNode *r = new RegionNode(edges+1);
1582 gvn().set_type(r, Type::CONTROL);
1583 record_for_igvn(r);
1584 // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1585 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1586 r->init_req(pnum, control());
1587 set_control(r);
1588 set_parse_bci(current_bci); // Restore bci
1589 }
1590
1591 // Convert the existing Parser mapping into a mapping at this bci.
1592 store_state_to(target);
1593 assert(target->is_merged(), "do not come here twice");
1594
1595 } else { // Prior mapping at this bci
1596 if (TraceOptoParse) { tty->print(" with previous state"); }
1597 #ifdef ASSERT
1598 if (target->is_SEL_head()) {
1599 target->mark_merged_backedge(block());
1600 }
1601 #endif
1956 const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
1957 if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) {
1958 // The type isn't known exactly so see if CHA tells us anything.
1959 ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
1960 if (!Dependencies::has_finalizable_subclass(ik)) {
1961 // No finalizable subclasses so skip the dynamic check.
1962 C->dependencies()->assert_has_no_finalizable_subclasses(ik);
1963 return;
1964 }
1965 }
1966
1967 // Insert a dynamic test for whether the instance needs
1968 // finalization. In general this will fold up since the concrete
1969 // class is often visible so the access flags are constant.
1970 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
1971 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
1972
1973 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
1974 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
1975
1976 Node* mask = _gvn.transform(new AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
1977 Node* check = _gvn.transform(new CmpINode(mask, intcon(0)));
1978 Node* test = _gvn.transform(new BoolNode(check, BoolTest::ne));
1979
1980 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
1981
1982 RegionNode* result_rgn = new RegionNode(3);
1983 record_for_igvn(result_rgn);
1984
1985 Node *skip_register = _gvn.transform(new IfFalseNode(iff));
1986 result_rgn->init_req(1, skip_register);
1987
1988 Node *needs_register = _gvn.transform(new IfTrueNode(iff));
1989 set_control(needs_register);
1990 if (stopped()) {
1991 // There is no slow path.
1992 result_rgn->init_req(2, top());
1993 } else {
1994 Node *call = make_runtime_call(RC_NO_LEAF,
1995 OptoRuntime::register_finalizer_Type(),
1996 OptoRuntime::register_finalizer_Java(),
1997 NULL, TypePtr::BOTTOM,
1998 receiver);
1999 make_slow_call_ex(call, env()->Throwable_klass(), true);
2000
2001 Node* fast_io = call->in(TypeFunc::I_O);
2002 Node* fast_mem = call->in(TypeFunc::Memory);
2003 // These two phis are pre-filled with copies of of the fast IO and Memory
2004 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO);
2005 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2006
2007 result_rgn->init_req(2, control());
2008 io_phi ->init_req(2, i_o());
2022 assert(C->method() != NULL, "only for normal compilations");
2023 assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
2024 assert(depth() == 1, "generate check only for main compiled method");
2025
2026 // Set starting bci for uncommon trap.
2027 set_parse_bci(is_osr_parse() ? osr_bci() : 0);
2028
2029 // Load the rtm_state from the MethodData.
2030 const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data());
2031 Node* mdo = makecon(adr_type);
2032 int offset = MethodData::rtm_state_offset_in_bytes();
2033 Node* adr_node = basic_plus_adr(mdo, mdo, offset);
2034 Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2035
2036 // Separate Load from Cmp by Opaque.
2037 // In expand_macro_nodes() it will be replaced either
2038 // with this load when there are locks in the code
2039 // or with ProfileRTM (cmp->in(2)) otherwise so that
2040 // the check will fold.
2041 Node* profile_state = makecon(TypeInt::make(ProfileRTM));
2042 Node* opq = _gvn.transform( new Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) );
2043 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2044 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2045 // Branch to failure if state was changed
2046 { BuildCutout unless(this, tst, PROB_ALWAYS);
2047 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2048 Deoptimization::Action_make_not_entrant);
2049 }
2050 }
2051 #endif
2052 }
2053
2054 void Parse::decrement_age() {
2055 MethodCounters* mc = method()->ensure_method_counters();
2056 if (mc == NULL) {
2057 C->record_failure("Must have MCs");
2058 return;
2059 }
2060 assert(!is_osr_parse(), "Not doing this for OSRs");
2061
2062 // Set starting bci for uncommon trap.
2063 set_parse_bci(0);
2064
2065 const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2066 Node* mc_adr = makecon(adr_type);
2067 Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2068 Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2069 Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2070 store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2071 Node *chk = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2072 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2073 { BuildCutout unless(this, tst, PROB_ALWAYS);
2074 uncommon_trap(Deoptimization::Reason_tenured,
2075 Deoptimization::Action_make_not_entrant);
2076 }
2077 }
2078
2079 //------------------------------return_current---------------------------------
2080 // Append current _map to _exit_return
2081 void Parse::return_current(Node* value) {
2082 if (RegisterFinalizersAtInit &&
2083 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2084 call_register_finalizer();
2085 }
2086
2087 // Do not set_parse_bci, so that return goo is credited to the return insn.
2088 set_bci(InvocationEntryBci);
2089 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2090 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2091 }
2092 if (C->env()->dtrace_method_probes()) {
2107 mms.set_memory(phi);
2108 }
2109 mms.memory()->add_req(mms.memory2());
2110 }
2111
2112 // frame pointer is always same, already captured
2113 if (value != NULL) {
2114 // If returning oops to an interface-return, there is a silent free
2115 // cast from oop to interface allowed by the Verifier. Make it explicit
2116 // here.
2117 Node* phi = _exits.argument(0);
2118 const TypeInstPtr *tr = phi->bottom_type()->isa_instptr();
2119 if( tr && tr->klass()->is_loaded() &&
2120 tr->klass()->is_interface() ) {
2121 const TypeInstPtr *tp = value->bottom_type()->isa_instptr();
2122 if (tp && tp->klass()->is_loaded() &&
2123 !tp->klass()->is_interface()) {
2124 // sharpen the type eagerly; this eases certain assert checking
2125 if (tp->higher_equal(TypeInstPtr::NOTNULL))
2126 tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2127 value = _gvn.transform(new CheckCastPPNode(0,value,tr));
2128 }
2129 }
2130 phi->add_req(value);
2131 }
2132
2133 stop_and_kill_map(); // This CFG path dies here
2134 }
2135
2136
2137 //------------------------------add_safepoint----------------------------------
2138 void Parse::add_safepoint() {
2139 // See if we can avoid this safepoint. No need for a SafePoint immediately
2140 // after a Call (except Leaf Call) or another SafePoint.
2141 Node *proj = control();
2142 bool add_poll_param = SafePointNode::needs_polling_address_input();
2143 uint parms = add_poll_param ? TypeFunc::Parms+1 : TypeFunc::Parms;
2144 if( proj->is_Proj() ) {
2145 Node *n0 = proj->in(0);
2146 if( n0->is_Catch() ) {
2147 n0 = n0->in(0)->in(0);
2148 assert( n0->is_Call(), "expect a call here" );
2149 }
2150 if( n0->is_Call() ) {
2151 if( n0->as_Call()->guaranteed_safepoint() )
2152 return;
2153 } else if( n0->is_SafePoint() && n0->req() >= parms ) {
2154 return;
2155 }
2156 }
2157
2158 // Clear out dead values from the debug info.
2159 kill_dead_locals();
2160
2161 // Clone the JVM State
2162 SafePointNode *sfpnt = new SafePointNode(parms, NULL);
2163
2164 // Capture memory state BEFORE a SafePoint. Since we can block at a
2165 // SafePoint we need our GC state to be safe; i.e. we need all our current
2166 // write barriers (card marks) to not float down after the SafePoint so we
2167 // must read raw memory. Likewise we need all oop stores to match the card
2168 // marks. If deopt can happen, we need ALL stores (we need the correct JVM
2169 // state on a deopt).
2170
2171 // We do not need to WRITE the memory state after a SafePoint. The control
2172 // edge will keep card-marks and oop-stores from floating up from below a
2173 // SafePoint and our true dependency added here will keep them from floating
2174 // down below a SafePoint.
2175
2176 // Clone the current memory state
2177 Node* mem = MergeMemNode::make(C, map()->memory());
2178
2179 mem = _gvn.transform(mem);
2180
2181 // Pass control through the safepoint
2182 sfpnt->init_req(TypeFunc::Control , control());
|