src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8034812 Cdiff src/share/vm/opto/graphKit.cpp

src/share/vm/opto/graphKit.cpp

Print this page

        

*** 292,302 **** } else { // ...or created from scratch JVMState* jvms = new (C) JVMState(_method, NULL); jvms->set_bci(_bci); jvms->set_sp(_sp); ! jvms->set_map(new (C) SafePointNode(TypeFunc::Parms, jvms)); set_jvms(jvms); for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); set_all_memory(top()); while (map()->req() < jvms->endoff()) map()->add_req(top()); } --- 292,302 ---- } else { // ...or created from scratch JVMState* jvms = new (C) JVMState(_method, NULL); jvms->set_bci(_bci); jvms->set_sp(_sp); ! jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms)); set_jvms(jvms); for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); set_all_memory(top()); while (map()->req() < jvms->endoff()) map()->add_req(top()); }
*** 345,355 **** MergeMemNode* phi_mem = phi_map->merged_memory(); MergeMemNode* ex_mem = ex_map->merged_memory(); if (region->in(0) != hidden_merge_mark) { // The control input is not (yet) a specially-marked region in phi_map. // Make it so, and build some phis. ! region = new (C) RegionNode(2); _gvn.set_type(region, Type::CONTROL); region->set_req(0, hidden_merge_mark); // marks an internal ex-state region->init_req(1, phi_map->control()); phi_map->set_control(region); Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO); --- 345,355 ---- MergeMemNode* phi_mem = phi_map->merged_memory(); MergeMemNode* ex_mem = ex_map->merged_memory(); if (region->in(0) != hidden_merge_mark) { // The control input is not (yet) a specially-marked region in phi_map. // Make it so, and build some phis. ! region = new RegionNode(2); _gvn.set_type(region, Type::CONTROL); region->set_req(0, hidden_merge_mark); // marks an internal ex-state region->init_req(1, phi_map->control()); phi_map->set_control(region); Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
*** 494,510 **** // take the normal fast path provided by add_exception_events. If // exception event reporting is enabled for this thread, we will // take the uncommon_trap in the BuildCutout below. // first must access the should_post_on_exceptions_flag in this thread's JavaThread ! Node* jthread = _gvn.transform(new (C) ThreadLocalNode()); Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); // Test the should_post_on_exceptions_flag vs. 0 ! Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) ); ! Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); // Branch to slow_path if should_post_on_exceptions_flag was true { BuildCutout unless(this, tst, PROB_MAX); // Do not try anything fancy if we're notifying the VM on every throw. // Cf. case Bytecodes::_athrow in parse2.cpp. --- 494,510 ---- // take the normal fast path provided by add_exception_events. If // exception event reporting is enabled for this thread, we will // take the uncommon_trap in the BuildCutout below. // first must access the should_post_on_exceptions_flag in this thread's JavaThread ! Node* jthread = _gvn.transform(new ThreadLocalNode()); Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); // Test the should_post_on_exceptions_flag vs. 0 ! Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) ); ! Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) ); // Branch to slow_path if should_post_on_exceptions_flag was true { BuildCutout unless(this, tst, PROB_MAX); // Do not try anything fancy if we're notifying the VM on every throw. // Cf. case Bytecodes::_athrow in parse2.cpp.
*** 673,684 **** { assert(p->is_Con() || p->is_Bool(), "test must be a bool"); SafePointNode* outer_map = _map; // preserved map is caller's SafePointNode* inner_map = kit->map(); IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); ! outer_map->set_control(kit->gvn().transform( new (kit->C) IfTrueNode(iff) )); ! inner_map->set_control(kit->gvn().transform( new (kit->C) IfFalseNode(iff) )); } BuildCutout::~BuildCutout() { GraphKit* kit = _kit; assert(kit->stopped(), "cutout code must stop, throw, return, etc."); } --- 673,684 ---- { assert(p->is_Con() || p->is_Bool(), "test must be a bool"); SafePointNode* outer_map = _map; // preserved map is caller's SafePointNode* inner_map = kit->map(); IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); ! outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) )); ! inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) )); } BuildCutout::~BuildCutout() { GraphKit* kit = _kit; assert(kit->stopped(), "cutout code must stop, throw, return, etc."); }
*** 1116,1154 **** //------------------------------basic_plus_adr--------------------------------- Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { // short-circuit a common case if (offset == intcon(0)) return ptr; ! return _gvn.transform( new (C) AddPNode(base, ptr, offset) ); } Node* GraphKit::ConvI2L(Node* offset) { // short-circuit a common case jint offset_con = find_int_con(offset, Type::OffsetBot); if (offset_con != Type::OffsetBot) { return longcon((jlong) offset_con); } ! return _gvn.transform( new (C) ConvI2LNode(offset)); } Node* GraphKit::ConvI2UL(Node* offset) { juint offset_con = (juint) find_int_con(offset, Type::OffsetBot); if (offset_con != (juint) Type::OffsetBot) { return longcon((julong) offset_con); } ! Node* conv = _gvn.transform( new (C) ConvI2LNode(offset)); Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) ); ! return _gvn.transform( new (C) AndLNode(conv, mask) ); } Node* GraphKit::ConvL2I(Node* offset) { // short-circuit a common case jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); if (offset_con != (jlong)Type::OffsetBot) { return intcon((int) offset_con); } ! return _gvn.transform( new (C) ConvL2INode(offset)); } //-------------------------load_object_klass----------------------------------- Node* GraphKit::load_object_klass(Node* obj) { // Special-case a fresh allocation to avoid building nodes: --- 1116,1154 ---- //------------------------------basic_plus_adr--------------------------------- Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { // short-circuit a common case if (offset == intcon(0)) return ptr; ! return _gvn.transform( new AddPNode(base, ptr, offset) ); } Node* GraphKit::ConvI2L(Node* offset) { // short-circuit a common case jint offset_con = find_int_con(offset, Type::OffsetBot); if (offset_con != Type::OffsetBot) { return longcon((jlong) offset_con); } ! return _gvn.transform( new ConvI2LNode(offset)); } Node* GraphKit::ConvI2UL(Node* offset) { juint offset_con = (juint) find_int_con(offset, Type::OffsetBot); if (offset_con != (juint) Type::OffsetBot) { return longcon((julong) offset_con); } ! Node* conv = _gvn.transform( new ConvI2LNode(offset)); Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) ); ! return _gvn.transform( new AndLNode(conv, mask) ); } Node* GraphKit::ConvL2I(Node* offset) { // short-circuit a common case jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); if (offset_con != (jlong)Type::OffsetBot) { return intcon((int) offset_con); } ! return _gvn.transform( new ConvL2INode(offset)); } //-------------------------load_object_klass----------------------------------- Node* GraphKit::load_object_klass(Node* obj) { // Special-case a fresh allocation to avoid building nodes:
*** 1163,1173 **** // Special-case a fresh allocation to avoid building nodes: AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); Node *alen; if (alloc == NULL) { Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); ! alen = _gvn.transform( new (C) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); } else { alen = alloc->Ideal_length(); Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn); if (ccast != alen) { alen = _gvn.transform(ccast); --- 1163,1173 ---- // Special-case a fresh allocation to avoid building nodes: AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); Node *alen; if (alloc == NULL) { Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); ! alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); } else { alen = alloc->Ideal_length(); Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn); if (ccast != alen) { alen = _gvn.transform(ccast);
*** 1197,1208 **** explicit_null_checks_inserted++; // Construct NULL check Node *chk = NULL; switch(type) { ! case T_LONG : chk = new (C) CmpLNode(value, _gvn.zerocon(T_LONG)); break; ! case T_INT : chk = new (C) CmpINode(value, _gvn.intcon(0)); break; case T_ARRAY : // fall through type = T_OBJECT; // simplify further tests case T_OBJECT : { const Type *t = _gvn.type( value ); --- 1197,1208 ---- explicit_null_checks_inserted++; // Construct NULL check Node *chk = NULL; switch(type) { ! case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; ! case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; case T_ARRAY : // fall through type = T_OBJECT; // simplify further tests case T_OBJECT : { const Type *t = _gvn.type( value );
*** 1245,1266 **** // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... explicit_null_checks_elided++; return value; // Elided null check quickly! } } ! chk = new (C) CmpPNode( value, null() ); break; } default: fatal(err_msg_res("unexpected type: %s", type2name(type))); } assert(chk != NULL, "sanity check"); chk = _gvn.transform(chk); BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; ! BoolNode *btst = new (C) BoolNode( chk, btest); Node *tst = _gvn.transform( btst ); //----------- // if peephole optimizations occurred, a prior test existed. // If a prior test existed, maybe it dominates as we can avoid this test. --- 1245,1266 ---- // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... explicit_null_checks_elided++; return value; // Elided null check quickly! } } ! chk = new CmpPNode( value, null() ); break; } default: fatal(err_msg_res("unexpected type: %s", type2name(type))); } assert(chk != NULL, "sanity check"); chk = _gvn.transform(chk); BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; ! BoolNode *btst = new BoolNode( chk, btest); Node *tst = _gvn.transform( btst ); //----------- // if peephole optimizations occurred, a prior test existed. // If a prior test existed, maybe it dominates as we can avoid this test.
*** 1323,1334 **** ok_prob = PROB_LIKELY_MAG(3); } if (null_control != NULL) { IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN); ! Node* null_true = _gvn.transform( new (C) IfFalseNode(iff)); ! set_control( _gvn.transform( new (C) IfTrueNode(iff))); if (null_true == top()) explicit_null_checks_elided++; (*null_control) = null_true; } else { BuildCutout unless(this, tst, ok_prob); --- 1323,1334 ---- ok_prob = PROB_LIKELY_MAG(3); } if (null_control != NULL) { IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN); ! Node* null_true = _gvn.transform( new IfFalseNode(iff)); ! set_control( _gvn.transform( new IfTrueNode(iff))); if (null_true == top()) explicit_null_checks_elided++; (*null_control) = null_true; } else { BuildCutout unless(this, tst, ok_prob);
*** 1376,1386 **** const Type *t = _gvn.type(obj); const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); // Object is already not-null? if( t == t_not_null ) return obj; ! Node *cast = new (C) CastPPNode(obj,t_not_null); cast->init_req(0, control()); cast = _gvn.transform( cast ); // Scan for instances of 'obj' in the current JVM mapping. // These instances are known to be not-null after the test. --- 1376,1386 ---- const Type *t = _gvn.type(obj); const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); // Object is already not-null? if( t == t_not_null ) return obj; ! Node *cast = new CastPPNode(obj,t_not_null); cast->init_req(0, control()); cast = _gvn.transform( cast ); // Scan for instances of 'obj' in the current JVM mapping. // These instances are known to be not-null after the test.
*** 1484,1494 **** map()->set_memory(mergemem); } //------------------------------set_all_memory_call---------------------------- void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) { ! Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) ); set_all_memory(newmem); } //============================================================================= // --- 1484,1494 ---- map()->set_memory(mergemem); } //------------------------------set_all_memory_call---------------------------- void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) { ! Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) ); set_all_memory(newmem); } //============================================================================= //
*** 1719,1731 **** // number. (The prior range check has ensured this.) // This assertion is used by ConvI2LNode::Ideal. int index_max = max_jint - 1; // array size is max_jint, index is one less if (sizetype != NULL) index_max = sizetype->_hi - 1; const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax); ! idx = _gvn.transform( new (C) ConvI2LNode(idx, lidxtype) ); #endif ! Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) ); return basic_plus_adr(ary, base, scale); } //-------------------------load_array_element------------------------- Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { --- 1719,1731 ---- // number. (The prior range check has ensured this.) // This assertion is used by ConvI2LNode::Ideal. int index_max = max_jint - 1; // array size is max_jint, index is one less if (sizetype != NULL) index_max = sizetype->_hi - 1; const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax); ! idx = _gvn.transform( new ConvI2LNode(idx, lidxtype) ); #endif ! Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) ); return basic_plus_adr(ary, base, scale); } //-------------------------load_array_element------------------------- Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
*** 1769,1780 **** } assert(xcall == call, "call identity is stable"); // Re-use the current map to produce the result. ! set_control(_gvn.transform(new (C) ProjNode(call, TypeFunc::Control))); ! set_i_o( _gvn.transform(new (C) ProjNode(call, TypeFunc::I_O , separate_io_proj))); set_all_memory_call(xcall, separate_io_proj); //return xcall; // no need, caller already has it } --- 1769,1780 ---- } assert(xcall == call, "call identity is stable"); // Re-use the current map to produce the result. ! set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control))); ! set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj))); set_all_memory_call(xcall, separate_io_proj); //return xcall; // no need, caller already has it }
*** 1784,1806 **** // Capture the return value, if any. Node* ret; if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) ret = top(); ! else ret = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); // Note: Since any out-of-line call can produce an exception, // we always insert an I_O projection from the call into the result. make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj); if (separate_io_proj) { // The caller requested separate projections be used by the fall // through and exceptional paths, so replace the projections for // the fall through path. ! set_i_o(_gvn.transform( new (C) ProjNode(call, TypeFunc::I_O) )); ! set_all_memory(_gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) )); } return ret; } //--------------------set_predefined_input_for_runtime_call-------------------- --- 1784,1806 ---- // Capture the return value, if any. Node* ret; if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) ret = top(); ! else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); // Note: Since any out-of-line call can produce an exception, // we always insert an I_O projection from the call into the result. make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj); if (separate_io_proj) { // The caller requested separate projections be used by the fall // through and exceptional paths, so replace the projections for // the fall through path. ! set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); ! set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); } return ret; } //--------------------set_predefined_input_for_runtime_call--------------------
*** 1836,1852 **** // preceding the call. void GraphKit::set_predefined_output_for_runtime_call(Node* call, Node* keep_mem, const TypePtr* hook_mem) { // no i/o ! set_control(_gvn.transform( new (C) ProjNode(call,TypeFunc::Control) )); if (keep_mem) { // First clone the existing memory state set_all_memory(keep_mem); if (hook_mem != NULL) { // Make memory for the call ! Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) ); // Set the RawPtr memory state only. This covers all the heap top/GC stuff // We also use hook_mem to extract specific effects from arraycopy stubs. set_memory(mem, hook_mem); } // ...else the call has NO memory effects. --- 1836,1852 ---- // preceding the call. void GraphKit::set_predefined_output_for_runtime_call(Node* call, Node* keep_mem, const TypePtr* hook_mem) { // no i/o ! set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) )); if (keep_mem) { // First clone the existing memory state set_all_memory(keep_mem); if (hook_mem != NULL) { // Make memory for the call ! Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) ); // Set the RawPtr memory state only. This covers all the heap top/GC stuff // We also use hook_mem to extract specific effects from arraycopy stubs. set_memory(mem, hook_mem); } // ...else the call has NO memory effects.
*** 1966,1976 **** void GraphKit::increment_counter(Node* counter_addr) { int adr_type = Compile::AliasIdxRaw; Node* ctrl = control(); Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); ! Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered); } //------------------------------uncommon_trap---------------------------------- --- 1966,1976 ---- void GraphKit::increment_counter(Node* counter_addr) { int adr_type = Compile::AliasIdxRaw; Node* ctrl = control(); Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); ! Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered); } //------------------------------uncommon_trap----------------------------------
*** 2085,2095 **** call->set_req(TypeFunc::ReturnAdr, returnadr()); // The debug info is the only real input to this call. // Halt-and-catch fire here. The above call should never return! ! HaltNode* halt = new(C) HaltNode(control(), frameptr()); _gvn.set_type_bottom(halt); root()->add_req(halt); stop_and_kill_map(); } --- 2085,2095 ---- call->set_req(TypeFunc::ReturnAdr, returnadr()); // The debug info is the only real input to this call. // Halt-and-catch fire here. The above call should never return! ! HaltNode* halt = new HaltNode(control(), frameptr()); _gvn.set_type_bottom(halt); root()->add_req(halt); stop_and_kill_map(); }
*** 2167,2177 **** const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); // We're changing the type, we need a new CheckCast node to carry // the new type. The new type depends on the control: what // profiling tells us is only valid from here as far as we can // tell. ! Node* cast = new(C) CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); cast = _gvn.transform(cast); replace_in_map(n, cast); n = cast; } --- 2167,2177 ---- const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); // We're changing the type, we need a new CheckCast node to carry // the new type. The new type depends on the control: what // profiling tells us is only valid from here as far as we can // tell. ! Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); cast = _gvn.transform(cast); replace_in_map(n, cast); n = cast; }
*** 2285,2311 **** // rounding for strict float precision conformance Node* GraphKit::precision_rounding(Node* n) { return UseStrictFP && _method->flags().is_strict() && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding ! ? _gvn.transform( new (C) RoundFloatNode(0, n) ) : n; } // rounding for strict double precision conformance Node* GraphKit::dprecision_rounding(Node *n) { return UseStrictFP && _method->flags().is_strict() && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding ! ? _gvn.transform( new (C) RoundDoubleNode(0, n) ) : n; } // rounding for non-strict double stores Node* GraphKit::dstore_rounding(Node* n) { return Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1 ! ? _gvn.transform( new (C) RoundDoubleNode(0, n) ) : n; } //============================================================================= // Generate a fast path/slow path idiom. Graph looks like: --- 2285,2311 ---- // rounding for strict float precision conformance Node* GraphKit::precision_rounding(Node* n) { return UseStrictFP && _method->flags().is_strict() && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding ! ? _gvn.transform( new RoundFloatNode(0, n) ) : n; } // rounding for strict double precision conformance Node* GraphKit::dprecision_rounding(Node *n) { return UseStrictFP && _method->flags().is_strict() && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding ! ? _gvn.transform( new RoundDoubleNode(0, n) ) : n; } // rounding for non-strict double stores Node* GraphKit::dstore_rounding(Node* n) { return Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1 ! ? _gvn.transform( new RoundDoubleNode(0, n) ) : n; } //============================================================================= // Generate a fast path/slow path idiom. Graph looks like:
*** 2380,2394 **** // Return slow-path control. Node* GraphKit::opt_iff(Node* region, Node* iff) { IfNode *opt_iff = _gvn.transform(iff)->as_If(); // Fast path taken; set region slot 2 ! Node *fast_taken = _gvn.transform( new (C) IfFalseNode(opt_iff) ); region->init_req(2,fast_taken); // Capture fast-control // Fast path not-taken, i.e. slow path ! Node *slow_taken = _gvn.transform( new (C) IfTrueNode(opt_iff) ); return slow_taken; } //-----------------------------make_runtime_call------------------------------- Node* GraphKit::make_runtime_call(int flags, --- 2380,2394 ---- // Return slow-path control. Node* GraphKit::opt_iff(Node* region, Node* iff) { IfNode *opt_iff = _gvn.transform(iff)->as_If(); // Fast path taken; set region slot 2 ! Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) ); region->init_req(2,fast_taken); // Capture fast-control // Fast path not-taken, i.e. slow path ! Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) ); return slow_taken; } //-----------------------------make_runtime_call------------------------------- Node* GraphKit::make_runtime_call(int flags,
*** 2408,2423 **** assert(!is_leaf, "must supply name for leaf"); call_name = OptoRuntime::stub_name(call_addr); } CallNode* call; if (!is_leaf) { ! call = new(C) CallStaticJavaNode(call_type, call_addr, call_name, bci(), adr_type); } else if (flags & RC_NO_FP) { ! call = new(C) CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); } else { ! call = new(C) CallLeafNode(call_type, call_addr, call_name, adr_type); } // The following is similar to set_edges_for_java_call, // except that the memory effects of the call are restricted to AliasIdxRaw. --- 2408,2423 ---- assert(!is_leaf, "must supply name for leaf"); call_name = OptoRuntime::stub_name(call_addr); } CallNode* call; if (!is_leaf) { ! call = new CallStaticJavaNode(call_type, call_addr, call_name, bci(), adr_type); } else if (flags & RC_NO_FP) { ! call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); } else { ! call = new CallLeafNode(call_type, call_addr, call_name, adr_type); } // The following is similar to set_edges_for_java_call, // except that the memory effects of the call are restricted to AliasIdxRaw.
*** 2474,2484 **** // Slow path call has few side-effects, and/or sets few values. set_predefined_output_for_runtime_call(call, prev_mem, adr_type); } if (has_io) { ! set_i_o(_gvn.transform(new (C) ProjNode(call, TypeFunc::I_O))); } return call; } --- 2474,2484 ---- // Slow path call has few side-effects, and/or sets few values. set_predefined_output_for_runtime_call(call, prev_mem, adr_type); } if (has_io) { ! set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O))); } return call; }
*** 2515,2538 **** // Make the exception handler hookups for the slow call void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj) { if (stopped()) return; // Make a catch node with just two handlers: fall-through and catch-all ! Node* i_o = _gvn.transform( new (C) ProjNode(call, TypeFunc::I_O, separate_io_proj) ); ! Node* catc = _gvn.transform( new (C) CatchNode(control(), i_o, 2) ); ! Node* norm = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) ); ! Node* excp = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) ); { PreserveJVMState pjvms(this); set_control(excp); set_i_o(i_o); if (excp != top()) { // Create an exception state also. // Use an exact type if the caller has specified a specific exception. const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); ! Node* ex_oop = new (C) CreateExNode(ex_type, control(), i_o); add_exception_state(make_exception_state(_gvn.transform(ex_oop))); } } // Get the no-exception control from the CatchNode. --- 2515,2538 ---- // Make the exception handler hookups for the slow call void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj) { if (stopped()) return; // Make a catch node with just two handlers: fall-through and catch-all ! Node* i_o = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) ); ! Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) ); ! Node* norm = _gvn.transform( new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) ); ! Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) ); { PreserveJVMState pjvms(this); set_control(excp); set_i_o(i_o); if (excp != top()) { // Create an exception state also. // Use an exact type if the caller has specified a specific exception. const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); ! Node* ex_oop = new CreateExNode(ex_type, control(), i_o); add_exception_state(make_exception_state(_gvn.transform(ex_oop))); } } // Get the no-exception control from the CatchNode.
*** 2578,2592 **** case SSC_always_true: return top(); case SSC_easy_test: { // Just do a direct pointer compare and be done. ! Node* cmp = _gvn.transform( new(C) CmpPNode(subklass, superklass) ); ! Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) ); IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); ! set_control( _gvn.transform( new(C) IfTrueNode (iff) ) ); ! return _gvn.transform( new(C) IfFalseNode(iff) ); } case SSC_full_test: break; default: ShouldNotReachHere(); --- 2578,2592 ---- case SSC_always_true: return top(); case SSC_easy_test: { // Just do a direct pointer compare and be done. ! Node* cmp = _gvn.transform( new CmpPNode(subklass, superklass) ); ! Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); ! set_control( _gvn.transform( new IfTrueNode (iff) ) ); ! return _gvn.transform( new IfFalseNode(iff) ); } case SSC_full_test: break; default: ShouldNotReachHere();
*** 2597,2607 **** // if the subklass is the unique subtype of the superklass, the check // will always succeed. We could leave a dependency behind to ensure this. // First load the super-klass's check-offset Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) ); ! Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); // Load from the sub-klass's super-class display list, or a 1-word cache of --- 2597,2607 ---- // if the subklass is the unique subtype of the superklass, the check // will always succeed. We could leave a dependency behind to ensure this. // First load the super-klass's check-offset Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) ); ! Node *chk_off = _gvn.transform(new LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); // Load from the sub-klass's super-class display list, or a 1-word cache of
*** 2609,2619 **** // if the super-klass is an interface or exceptionally deep in the Java // hierarchy and we have to scan the secondary superclass list the hard way. // Worst-case type is a little odd: NULL is allowed as a result (usually // klass loads can never produce a NULL). Node *chk_off_X = ConvI2X(chk_off); ! Node *p2 = _gvn.transform( new (C) AddPNode(subklass,subklass,chk_off_X) ); // For some types like interfaces the following loadKlass is from a 1-word // cache which is mutable so can't use immutable memory. Other // types load from the super-class display table which is immutable. Node *kmem = might_be_cache ? memory(p2) : immutable_memory(); Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) ); --- 2609,2619 ---- // if the super-klass is an interface or exceptionally deep in the Java // hierarchy and we have to scan the secondary superclass list the hard way. // Worst-case type is a little odd: NULL is allowed as a result (usually // klass loads can never produce a NULL). Node *chk_off_X = ConvI2X(chk_off); ! Node *p2 = _gvn.transform( new AddPNode(subklass,subklass,chk_off_X) ); // For some types like interfaces the following loadKlass is from a 1-word // cache which is mutable so can't use immutable memory. Other // types load from the super-class display table which is immutable. Node *kmem = might_be_cache ? memory(p2) : immutable_memory(); Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
*** 2623,2637 **** return top(); // false path is dead; no test needed. // See if we get an immediate positive hit. Happens roughly 83% of the // time. Test to see if the value loaded just previously from the subklass // is exactly the superklass. ! Node *cmp1 = _gvn.transform( new (C) CmpPNode( superklass, nkls ) ); ! Node *bol1 = _gvn.transform( new (C) BoolNode( cmp1, BoolTest::eq ) ); IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN ); ! Node *iftrue1 = _gvn.transform( new (C) IfTrueNode ( iff1 ) ); ! set_control( _gvn.transform( new (C) IfFalseNode( iff1 ) ) ); // Compile speed common case: Check for being deterministic right now. If // chk_off is a constant and not equal to cacheoff then we are NOT a // subklass. In this case we need exactly the 1 test above and we can // return those results immediately. --- 2623,2637 ---- return top(); // false path is dead; no test needed. // See if we get an immediate positive hit. Happens roughly 83% of the // time. Test to see if the value loaded just previously from the subklass // is exactly the superklass. ! Node *cmp1 = _gvn.transform( new CmpPNode( superklass, nkls ) ); ! Node *bol1 = _gvn.transform( new BoolNode( cmp1, BoolTest::eq ) ); IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN ); ! Node *iftrue1 = _gvn.transform( new IfTrueNode ( iff1 ) ); ! set_control( _gvn.transform( new IfFalseNode( iff1 ) ) ); // Compile speed common case: Check for being deterministic right now. If // chk_off is a constant and not equal to cacheoff then we are NOT a // subklass. In this case we need exactly the 1 test above and we can // return those results immediately.
*** 2640,2676 **** set_control(iftrue1); // We need exactly the 1 test above return not_subtype_ctrl; } // Gather the various success & failures here ! RegionNode *r_ok_subtype = new (C) RegionNode(4); record_for_igvn(r_ok_subtype); ! RegionNode *r_not_subtype = new (C) RegionNode(3); record_for_igvn(r_not_subtype); r_ok_subtype->init_req(1, iftrue1); // Check for immediate negative hit. Happens roughly 11% of the time (which // is roughly 63% of the remaining cases). Test to see if the loaded // check-offset points into the subklass display list or the 1-element // cache. If it points to the display (and NOT the cache) and the display // missed then it's not a subtype. Node *cacheoff = _gvn.intcon(cacheoff_con); ! Node *cmp2 = _gvn.transform( new (C) CmpINode( chk_off, cacheoff ) ); ! Node *bol2 = _gvn.transform( new (C) BoolNode( cmp2, BoolTest::ne ) ); IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN ); ! r_not_subtype->init_req(1, _gvn.transform( new (C) IfTrueNode (iff2) ) ); ! set_control( _gvn.transform( new (C) IfFalseNode(iff2) ) ); // Check for self. Very rare to get here, but it is taken 1/3 the time. // No performance impact (too rare) but allows sharing of secondary arrays // which has some footprint reduction. ! Node *cmp3 = _gvn.transform( new (C) CmpPNode( subklass, superklass ) ); ! Node *bol3 = _gvn.transform( new (C) BoolNode( cmp3, BoolTest::eq ) ); IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN ); ! r_ok_subtype->init_req(2, _gvn.transform( new (C) IfTrueNode ( iff3 ) ) ); ! set_control( _gvn.transform( new (C) IfFalseNode( iff3 ) ) ); // -- Roads not taken here: -- // We could also have chosen to perform the self-check at the beginning // of this code sequence, as the assembler does. This would not pay off // the same way, since the optimizer, unlike the assembler, can perform --- 2640,2676 ---- set_control(iftrue1); // We need exactly the 1 test above return not_subtype_ctrl; } // Gather the various success & failures here ! RegionNode *r_ok_subtype = new RegionNode(4); record_for_igvn(r_ok_subtype); ! RegionNode *r_not_subtype = new RegionNode(3); record_for_igvn(r_not_subtype); r_ok_subtype->init_req(1, iftrue1); // Check for immediate negative hit. Happens roughly 11% of the time (which // is roughly 63% of the remaining cases). Test to see if the loaded // check-offset points into the subklass display list or the 1-element // cache. If it points to the display (and NOT the cache) and the display // missed then it's not a subtype. Node *cacheoff = _gvn.intcon(cacheoff_con); ! Node *cmp2 = _gvn.transform( new CmpINode( chk_off, cacheoff ) ); ! Node *bol2 = _gvn.transform( new BoolNode( cmp2, BoolTest::ne ) ); IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN ); ! r_not_subtype->init_req(1, _gvn.transform( new IfTrueNode (iff2) ) ); ! set_control( _gvn.transform( new IfFalseNode(iff2) ) ); // Check for self. Very rare to get here, but it is taken 1/3 the time. // No performance impact (too rare) but allows sharing of secondary arrays // which has some footprint reduction. ! Node *cmp3 = _gvn.transform( new CmpPNode( subklass, superklass ) ); ! Node *bol3 = _gvn.transform( new BoolNode( cmp3, BoolTest::eq ) ); IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN ); ! r_ok_subtype->init_req(2, _gvn.transform( new IfTrueNode ( iff3 ) ) ); ! set_control( _gvn.transform( new IfFalseNode( iff3 ) ) ); // -- Roads not taken here: -- // We could also have chosen to perform the self-check at the beginning // of this code sequence, as the assembler does. This would not pay off // the same way, since the optimizer, unlike the assembler, can perform
*** 2690,2706 **** // Since the code is rarely used, there is no penalty for moving it // out of line, and it can only improve I-cache density. // The decision to inline or out-of-line this final check is platform // dependent, and is found in the AD file definition of PartialSubtypeCheck. Node* psc = _gvn.transform( ! new (C) PartialSubtypeCheckNode(control(), subklass, superklass) ); ! Node *cmp4 = _gvn.transform( new (C) CmpPNode( psc, null() ) ); ! Node *bol4 = _gvn.transform( new (C) BoolNode( cmp4, BoolTest::ne ) ); IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN ); ! r_not_subtype->init_req(2, _gvn.transform( new (C) IfTrueNode (iff4) ) ); ! r_ok_subtype ->init_req(3, _gvn.transform( new (C) IfFalseNode(iff4) ) ); // Return false path; set default control to true path. set_control( _gvn.transform(r_ok_subtype) ); return _gvn.transform(r_not_subtype); } --- 2690,2706 ---- // Since the code is rarely used, there is no penalty for moving it // out of line, and it can only improve I-cache density. // The decision to inline or out-of-line this final check is platform // dependent, and is found in the AD file definition of PartialSubtypeCheck. Node* psc = _gvn.transform( ! new PartialSubtypeCheckNode(control(), subklass, superklass) ); ! Node *cmp4 = _gvn.transform( new CmpPNode( psc, null() ) ); ! Node *bol4 = _gvn.transform( new BoolNode( cmp4, BoolTest::ne ) ); IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN ); ! r_not_subtype->init_req(2, _gvn.transform( new IfTrueNode (iff4) ) ); ! r_ok_subtype ->init_req(3, _gvn.transform( new IfFalseNode(iff4) ) ); // Return false path; set default control to true path. set_control( _gvn.transform(r_ok_subtype) ); return _gvn.transform(r_not_subtype); }
*** 2760,2781 **** float prob, Node* *casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver); Node* want_klass = makecon(tklass); ! Node* cmp = _gvn.transform( new(C) CmpPNode(recv_klass, want_klass) ); ! Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) ); IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); ! set_control( _gvn.transform( new(C) IfTrueNode (iff) )); ! Node* fail = _gvn.transform( new(C) IfFalseNode(iff) ); const TypeOopPtr* recv_xtype = tklass->as_instance_type(); assert(recv_xtype->klass_is_exact(), ""); // Subsume downstream occurrences of receiver with a cast to // recv_xtype, since now we know what the type will be. ! Node* cast = new(C) CheckCastPPNode(control(), receiver, recv_xtype); (*casted_receiver) = _gvn.transform(cast); // (User must make the replace_in_map call.) return fail; } --- 2760,2781 ---- float prob, Node* *casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver); Node* want_klass = makecon(tklass); ! Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass) ); ! Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); ! set_control( _gvn.transform( new IfTrueNode (iff) )); ! Node* fail = _gvn.transform( new IfFalseNode(iff) ); const TypeOopPtr* recv_xtype = tklass->as_instance_type(); assert(recv_xtype->klass_is_exact(), ""); // Subsume downstream occurrences of receiver with a cast to // recv_xtype, since now we know what the type will be. ! Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype); (*casted_receiver) = _gvn.transform(cast); // (User must make the replace_in_map call.) return fail; }
*** 2918,2929 **** assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), "must check for not-null not-dead klass in callers"); // Make the merge point enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT }; ! RegionNode* region = new(C) RegionNode(PATH_LIMIT); ! Node* phi = new(C) PhiNode(region, TypeInt::BOOL); C->set_has_split_ifs(true); // Has chance for split-if optimization ciProfileData* data = NULL; if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode data = method()->method_data()->bci_to_data(bci()); --- 2918,2929 ---- assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), "must check for not-null not-dead klass in callers"); // Make the merge point enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT }; ! RegionNode* region = new RegionNode(PATH_LIMIT); ! Node* phi = new PhiNode(region, TypeInt::BOOL); C->set_has_split_ifs(true); // Has chance for split-if optimization ciProfileData* data = NULL; if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode data = method()->method_data()->bci_to_data(bci());
*** 3050,3061 **** safe_for_replace = true; } // Make the merge point enum { _obj_path = 1, _null_path, PATH_LIMIT }; ! RegionNode* region = new (C) RegionNode(PATH_LIMIT); ! Node* phi = new (C) PhiNode(region, toop); C->set_has_split_ifs(true); // Has chance for split-if optimization // Use null-cast information if it is available bool speculative_not_null = false; bool never_see_null = ((failure_control == NULL) // regular case only --- 3050,3061 ---- safe_for_replace = true; } // Make the merge point enum { _obj_path = 1, _null_path, PATH_LIMIT }; ! RegionNode* region = new RegionNode(PATH_LIMIT); ! Node* phi = new PhiNode(region, toop); C->set_has_split_ifs(true); // Has chance for split-if optimization // Use null-cast information if it is available bool speculative_not_null = false; bool never_see_null = ((failure_control == NULL) // regular case only
*** 3112,3123 **** // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); // Plug in success path into the merge ! cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), ! not_null_obj, toop)); // Failure path ends in uncommon trap (or may be dead - failure impossible) if (failure_control == NULL) { if (not_subtype_ctrl != top()) { // If failure is possible PreserveJVMState pjvms(this); set_control(not_subtype_ctrl); --- 3112,3122 ---- // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); // Plug in success path into the merge ! cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); // Failure path ends in uncommon trap (or may be dead - failure impossible) if (failure_control == NULL) { if (not_subtype_ctrl != top()) { // If failure is possible PreserveJVMState pjvms(this); set_control(not_subtype_ctrl);
*** 3166,3176 **** Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) { MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); mb->init_req(TypeFunc::Control, control()); mb->init_req(TypeFunc::Memory, reset_memory()); Node* membar = _gvn.transform(mb); ! set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control))); set_all_memory_call(membar); return membar; } //-------------------------insert_mem_bar_volatile---------------------------- --- 3165,3175 ---- Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) { MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); mb->init_req(TypeFunc::Control, control()); mb->init_req(TypeFunc::Memory, reset_memory()); Node* membar = _gvn.transform(mb); ! set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); set_all_memory_call(membar); return membar; } //-------------------------insert_mem_bar_volatile----------------------------
*** 3195,3209 **** } else { assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller"); mb->set_req(TypeFunc::Memory, memory(alias_idx)); } Node* membar = _gvn.transform(mb); ! set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control))); if (alias_idx == Compile::AliasIdxBot) { ! merged_memory()->set_base_memory(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Memory))); } else { ! set_memory(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Memory)),alias_idx); } return membar; } //------------------------------shared_lock------------------------------------ --- 3194,3208 ---- } else { assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller"); mb->set_req(TypeFunc::Memory, memory(alias_idx)); } Node* membar = _gvn.transform(mb); ! set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); if (alias_idx == Compile::AliasIdxBot) { ! merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory))); } else { ! set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx); } return membar; } //------------------------------shared_lock------------------------------------
*** 3219,3232 **** return NULL; assert(dead_locals_are_killed(), "should kill locals before sync. point"); // Box the stack location ! Node* box = _gvn.transform(new (C) BoxLockNode(next_monitor())); Node* mem = reset_memory(); ! FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock(); if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) { // Create the counters for this fast lock. flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci } --- 3218,3231 ---- return NULL; assert(dead_locals_are_killed(), "should kill locals before sync. point"); // Box the stack location ! Node* box = _gvn.transform(new BoxLockNode(next_monitor())); Node* mem = reset_memory(); ! FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock(); if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) { // Create the counters for this fast lock. flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci }
*** 3236,3246 **** // Add monitor to debug info for the slow path. If we block inside the // slow path and de-opt, we need the monitor hanging around map()->push_monitor( flock ); const TypeFunc *tf = LockNode::lock_type(); ! LockNode *lock = new (C) LockNode(C, tf); lock->init_req( TypeFunc::Control, control() ); lock->init_req( TypeFunc::Memory , mem ); lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o lock->init_req( TypeFunc::FramePtr, frameptr() ); --- 3235,3245 ---- // Add monitor to debug info for the slow path. If we block inside the // slow path and de-opt, we need the monitor hanging around map()->push_monitor( flock ); const TypeFunc *tf = LockNode::lock_type(); ! LockNode *lock = new LockNode(C, tf); lock->init_req( TypeFunc::Control, control() ); lock->init_req( TypeFunc::Memory , mem ); lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o lock->init_req( TypeFunc::FramePtr, frameptr() );
*** 3290,3300 **** // Memory barrier to avoid floating things down past the locked region insert_mem_bar(Op_MemBarReleaseLock); const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type(); ! UnlockNode *unlock = new (C) UnlockNode(C, tf); uint raw_idx = Compile::AliasIdxRaw; unlock->init_req( TypeFunc::Control, control() ); unlock->init_req( TypeFunc::Memory , memory(raw_idx) ); unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o unlock->init_req( TypeFunc::FramePtr, frameptr() ); --- 3289,3299 ---- // Memory barrier to avoid floating things down past the locked region insert_mem_bar(Op_MemBarReleaseLock); const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type(); ! UnlockNode *unlock = new UnlockNode(C, tf); uint raw_idx = Compile::AliasIdxRaw; unlock->init_req( TypeFunc::Control, control() ); unlock->init_req( TypeFunc::Memory , memory(raw_idx) ); unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o unlock->init_req( TypeFunc::FramePtr, frameptr() );
*** 3356,3378 **** const TypeOopPtr* oop_type) { int rawidx = Compile::AliasIdxRaw; alloc->set_req( TypeFunc::FramePtr, frameptr() ); add_safepoint_edges(alloc); Node* allocx = _gvn.transform(alloc); ! set_control( _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Control) ) ); // create memory projection for i_o ! set_memory ( _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); make_slow_call_ex(allocx, env()->Throwable_klass(), true); // create a memory projection as for the normal control path ! Node* malloc = _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Memory)); set_memory(malloc, rawidx); // a normal slow-call doesn't change i_o, but an allocation does // we create a separate i_o projection for the normal control path ! set_i_o(_gvn.transform( new (C) ProjNode(allocx, TypeFunc::I_O, false) ) ); ! Node* rawoop = _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Parms) ); // put in an initialization barrier InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx, rawoop)->as_Initialize(); assert(alloc->initialization() == init, "2-way macro link must work"); --- 3355,3377 ---- const TypeOopPtr* oop_type) { int rawidx = Compile::AliasIdxRaw; alloc->set_req( TypeFunc::FramePtr, frameptr() ); add_safepoint_edges(alloc); Node* allocx = _gvn.transform(alloc); ! set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) ); // create memory projection for i_o ! set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); make_slow_call_ex(allocx, env()->Throwable_klass(), true); // create a memory projection as for the normal control path ! Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory)); set_memory(malloc, rawidx); // a normal slow-call doesn't change i_o, but an allocation does // we create a separate i_o projection for the normal control path ! set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) ); ! Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) ); // put in an initialization barrier InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx, rawoop)->as_Initialize(); assert(alloc->initialization() == init, "2-way macro link must work");
*** 3404,3414 **** } } } // Cast raw oop to the real thing... ! Node* javaoop = new (C) CheckCastPPNode(control(), rawoop, oop_type); javaoop = _gvn.transform(javaoop); C->set_recent_alloc(control(), javaoop); assert(just_allocated_object(control()) == javaoop, "just allocated"); #ifdef ASSERT --- 3403,3413 ---- } } } // Cast raw oop to the real thing... ! Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type); javaoop = _gvn.transform(javaoop); C->set_recent_alloc(control(), javaoop); assert(just_allocated_object(control()) == javaoop, "just allocated"); #ifdef ASSERT
*** 3463,3475 **** } else { // reflective case // This reflective path is used by Unsafe.allocateInstance. // (It may be stress-tested by specifying StressReflectiveCode.) // Basically, we want to get into the VM is there's an illegal argument. Node* bit = intcon(Klass::_lh_instance_slow_path_bit); ! initial_slow_test = _gvn.transform( new (C) AndINode(layout_val, bit) ); if (extra_slow_test != intcon(0)) { ! initial_slow_test = _gvn.transform( new (C) OrINode(initial_slow_test, extra_slow_test) ); } // (Macro-expander will further convert this to a Bool, if necessary.) } // Find the size in bytes. This is easy; it's the layout_helper. --- 3462,3474 ---- } else { // reflective case // This reflective path is used by Unsafe.allocateInstance. // (It may be stress-tested by specifying StressReflectiveCode.) // Basically, we want to get into the VM is there's an illegal argument. Node* bit = intcon(Klass::_lh_instance_slow_path_bit); ! initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) ); if (extra_slow_test != intcon(0)) { ! initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) ); } // (Macro-expander will further convert this to a Bool, if necessary.) } // Find the size in bytes. This is easy; it's the layout_helper.
*** 3482,3492 **** size = ConvI2X(layout_val); // Clear the low bits to extract layout_helper_size_in_bytes: assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong)); ! size = _gvn.transform( new (C) AndXNode(size, mask) ); } if (return_size_val != NULL) { (*return_size_val) = size; } --- 3481,3491 ---- size = ConvI2X(layout_val); // Clear the low bits to extract layout_helper_size_in_bytes: assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong)); ! size = _gvn.transform( new AndXNode(size, mask) ); } if (return_size_val != NULL) { (*return_size_val) = size; }
*** 3502,3513 **** // The entire memory state is needed for slow path of the allocation // since GC and deoptimization can happened. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state ! AllocateNode* alloc ! = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), size, klass_node, initial_slow_test); return set_output_for_allocation(alloc, oop_type); --- 3501,3511 ---- // The entire memory state is needed for slow path of the allocation // since GC and deoptimization can happened. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state ! AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), size, klass_node, initial_slow_test); return set_output_for_allocation(alloc, oop_type);
*** 3529,3540 **** !too_many_traps(Deoptimization::Reason_class_check)) { // This is a reflective array creation site. // Optimistically assume that it is a subtype of Object[], // so that we can fold up all the address arithmetic. layout_con = Klass::array_layout_helper(T_OBJECT); ! Node* cmp_lh = _gvn.transform( new(C) CmpINode(layout_val, intcon(layout_con)) ); ! Node* bol_lh = _gvn.transform( new(C) BoolNode(cmp_lh, BoolTest::eq) ); { BuildCutout unless(this, bol_lh, PROB_MAX); inc_sp(nargs); uncommon_trap(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile); } --- 3527,3538 ---- !too_many_traps(Deoptimization::Reason_class_check)) { // This is a reflective array creation site. // Optimistically assume that it is a subtype of Object[], // so that we can fold up all the address arithmetic. layout_con = Klass::array_layout_helper(T_OBJECT); ! Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) ); ! Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) ); { BuildCutout unless(this, bol_lh, PROB_MAX); inc_sp(nargs); uncommon_trap(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile); }
*** 3554,3565 **** // Increase the size limit if we have exact knowledge of array type. int log2_esize = Klass::layout_helper_log2_element_size(layout_con); fast_size_limit <<= (LogBytesPerLong - log2_esize); } ! Node* initial_slow_cmp = _gvn.transform( new (C) CmpUNode( length, intcon( fast_size_limit ) ) ); ! Node* initial_slow_test = _gvn.transform( new (C) BoolNode( initial_slow_cmp, BoolTest::gt ) ); if (initial_slow_test->is_Bool()) { // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); } --- 3552,3563 ---- // Increase the size limit if we have exact knowledge of array type. int log2_esize = Klass::layout_helper_log2_element_size(layout_con); fast_size_limit <<= (LogBytesPerLong - log2_esize); } ! Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); ! Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) ); if (initial_slow_test->is_Bool()) { // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); }
*** 3583,3596 **** header_size_min = hsize; header_size = intcon(hsize + round_mask); } else { Node* hss = intcon(Klass::_lh_header_size_shift); Node* hsm = intcon(Klass::_lh_header_size_mask); ! Node* hsize = _gvn.transform( new(C) URShiftINode(layout_val, hss) ); ! hsize = _gvn.transform( new(C) AndINode(hsize, hsm) ); Node* mask = intcon(round_mask); ! header_size = _gvn.transform( new(C) AddINode(hsize, mask) ); } Node* elem_shift = NULL; if (layout_is_con) { int eshift = Klass::layout_helper_log2_element_size(layout_con); --- 3581,3594 ---- header_size_min = hsize; header_size = intcon(hsize + round_mask); } else { Node* hss = intcon(Klass::_lh_header_size_shift); Node* hsm = intcon(Klass::_lh_header_size_mask); ! Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) ); ! hsize = _gvn.transform( new AndINode(hsize, hsm) ); Node* mask = intcon(round_mask); ! header_size = _gvn.transform( new AddINode(hsize, mask) ); } Node* elem_shift = NULL; if (layout_is_con) { int eshift = Klass::layout_helper_log2_element_size(layout_con);
*** 3611,3636 **** if (tllen != NULL && tllen->_lo < 0) { // Add a manual constraint to a positive range. Cf. array_element_address. jlong size_max = arrayOopDesc::max_array_length(T_BYTE); if (size_max > tllen->_hi) size_max = tllen->_hi; const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin); ! lengthx = _gvn.transform( new (C) ConvI2LNode(length, tlcon)); } } #endif // Combine header size (plus rounding) and body size. Then round down. // This computation cannot overflow, because it is used only in two // places, one where the length is sharply limited, and the other // after a successful allocation. Node* abody = lengthx; if (elem_shift != NULL) ! abody = _gvn.transform( new(C) LShiftXNode(lengthx, elem_shift) ); ! Node* size = _gvn.transform( new(C) AddXNode(headerx, abody) ); if (round_mask != 0) { Node* mask = MakeConX(~round_mask); ! size = _gvn.transform( new(C) AndXNode(size, mask) ); } // else if round_mask == 0, the size computation is self-rounding if (return_size_val != NULL) { // This is the size --- 3609,3634 ---- if (tllen != NULL && tllen->_lo < 0) { // Add a manual constraint to a positive range. Cf. array_element_address. jlong size_max = arrayOopDesc::max_array_length(T_BYTE); if (size_max > tllen->_hi) size_max = tllen->_hi; const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin); ! lengthx = _gvn.transform( new ConvI2LNode(length, tlcon)); } } #endif // Combine header size (plus rounding) and body size. Then round down. // This computation cannot overflow, because it is used only in two // places, one where the length is sharply limited, and the other // after a successful allocation. Node* abody = lengthx; if (elem_shift != NULL) ! abody = _gvn.transform( new LShiftXNode(lengthx, elem_shift) ); ! Node* size = _gvn.transform( new AddXNode(headerx, abody) ); if (round_mask != 0) { Node* mask = MakeConX(~round_mask); ! size = _gvn.transform( new AndXNode(size, mask) ); } // else if round_mask == 0, the size computation is self-rounding if (return_size_val != NULL) { // This is the size
*** 3644,3654 **** Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state // Create the AllocateArrayNode and its result projections AllocateArrayNode* alloc ! = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), control(), mem, i_o(), size, klass_node, initial_slow_test, length); --- 3642,3652 ---- Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state // Create the AllocateArrayNode and its result projections AllocateArrayNode* alloc ! = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), control(), mem, i_o(), size, klass_node, initial_slow_test, length);
*** 3758,3779 **** // do not generate predicate. return; } Node *cont = _gvn.intcon(1); ! Node* opq = _gvn.transform(new (C) Opaque1Node(C, cont)); ! Node *bol = _gvn.transform(new (C) Conv2BNode(opq)); IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); ! Node* iffalse = _gvn.transform(new (C) IfFalseNode(iff)); C->add_predicate_opaq(opq); { PreserveJVMState pjvms(this); set_control(iffalse); inc_sp(nargs); uncommon_trap(reason, Deoptimization::Action_maybe_recompile); } ! Node* iftrue = _gvn.transform(new (C) IfTrueNode(iff)); set_control(iftrue); } //------------------------------add_predicate--------------------------------- void GraphKit::add_predicate(int nargs) { --- 3756,3777 ---- // do not generate predicate. return; } Node *cont = _gvn.intcon(1); ! Node* opq = _gvn.transform(new Opaque1Node(C, cont)); ! Node *bol = _gvn.transform(new Conv2BNode(opq)); IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); ! Node* iffalse = _gvn.transform(new IfFalseNode(iff)); C->add_predicate_opaq(opq); { PreserveJVMState pjvms(this); set_control(iffalse); inc_sp(nargs); uncommon_trap(reason, Deoptimization::Action_maybe_recompile); } ! Node* iftrue = _gvn.transform(new IfTrueNode(iff)); set_control(iftrue); } //------------------------------add_predicate--------------------------------- void GraphKit::add_predicate(int nargs) {
*** 3961,3971 **** // is the queue for this thread full? __ if_then(index, BoolTest::ne, zeroX, likely); { // decrement the index ! Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); // Now get the buffer location we will log the previous value into and store it Node *log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); // update the index --- 3959,3969 ---- // is the queue for this thread full? __ if_then(index, BoolTest::ne, zeroX, likely); { // decrement the index ! Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); // Now get the buffer location we will log the previous value into and store it Node *log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); // update the index
*** 4004,4014 **** __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); // Now do the queue work __ if_then(index, BoolTest::ne, zeroX); { ! Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); Node* log_addr = __ AddP(no_base, buffer, next_index); // Order, see storeCM. __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered); --- 4002,4012 ---- __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); // Now do the queue work __ if_then(index, BoolTest::ne, zeroX); { ! Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); Node* log_addr = __ AddP(no_base, buffer, next_index); // Order, see storeCM. __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
*** 4211,4217 **** } Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { // Reify the property as a CastPP node in Ideal graph to comply with monotonicity // assumption of CCP analysis. ! return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true))); } --- 4209,4215 ---- } Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { // Reify the property as a CastPP node in Ideal graph to comply with monotonicity // assumption of CCP analysis. ! return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true))); }
src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File