src/share/vm/opto/macro.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/opto/macro.cpp	Thu May 15 17:09:30 2014
--- new/src/share/vm/opto/macro.cpp	Thu May 15 17:09:30 2014

*** 106,129 **** --- 106,129 ---- } Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) { Node* cmp; if (mask != 0) { - Node* and_node = transform_later(new (C) AndXNode(word, MakeConX(mask))); - cmp = transform_later(new (C) CmpXNode(and_node, MakeConX(bits))); } else { cmp = word; } - Node* bol = transform_later(new (C) BoolNode(cmp, BoolTest::ne)); - IfNode* iff = new (C) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); transform_later(iff); // Fast path taken. ! Node *fast_taken = transform_later( new (C) IfFalseNode(iff) ); ! Node *fast_taken = transform_later(new IfFalseNode(iff)); // Fast path not-taken, i.e. slow path ! Node *slow_taken = transform_later( new (C) IfTrueNode(iff) ); ! Node *slow_taken = transform_later(new IfTrueNode(iff)); if (return_fast_path) { region->init_req(edge, slow_taken); // Capture slow-control return fast_taken; } else {
*** 145,156 **** --- 145,156 ---- //------------------------------make_slow_call--------------------------------- CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) { // Slow-path call CallNode *call = leaf_name - ? (CallNode*)new (C) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM ) - : (CallNode*)new (C) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM ); // Slow path call has no side-effects, uses few values copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1);
*** 421,431 **** --- 421,431 ---- uint length = mem->req(); GrowableArray <Node *> values(length, length, NULL, false); // create a new Phi for the value - PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset); transform_later(phi); value_phis->push(phi, mem->_idx); for (uint j = 1; j < length; j++) { Node *in = mem->in(j);
*** 733,743 **** --- 733,743 ---- assert(sfpt->jvms() != NULL, "missed JVMS"); // Fields of scalar objs are referenced only at the end // of regular debuginfo at the last (youngest) JVMS. // Record relative start index. uint first_ind = (sfpt->req() - sfpt->jvms()->scloff()); - SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type, #ifdef ASSERT alloc, #endif first_ind, nfields); sobj->init_req(0, C->root());
*** 841,851 **** --- 841,851 ---- // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation // to be able scalar replace the allocation. if (field_val->is_EncodeP()) { field_val = field_val->in(1); } else { - field_val = transform_later(new (C) DecodeNNode(field_val, field_val->get_ptr_type())); } } sfpt->add_req(field_val); } JVMState *jvms = sfpt->jvms();
*** 1067,1077 **** --- 1067,1077 ---- } //---------------------------set_eden_pointers------------------------- void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { if (UseTLAB) { // Private allocation: load from TLS - Node* thread = transform_later(new (C) ThreadLocalNode()); int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); } else { // Shared allocation: load from globals
*** 1203,1224 **** --- 1203,1224 ---- Node *toobig_false = ctrl; assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent"); // generate the initial test if necessary if (initial_slow_test != NULL ) { - slow_region = new (C) RegionNode(3); // Now make the initial failure test. Usually a too-big test but // might be a TRUE for finalizers or a fancy class check for // newInstance0. - IfNode *toobig_iff = new (C) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); transform_later(toobig_iff); // Plug the failing-too-big test into the slow-path region - Node *toobig_true = new (C) IfTrueNode( toobig_iff ); transform_later(toobig_true); slow_region ->init_req( too_big_or_final_path, toobig_true ); - toobig_false = new (C) IfFalseNode( toobig_iff ); transform_later(toobig_false); } else { // No initial test, just fall into next case toobig_false = ctrl; debug_only(slow_region = NodeSentinel); }
*** 1247,1271 **** --- 1247,1271 ---- // prevent a degradation of the optimization. // See comment in memnode.hpp, around line 227 in class LoadPNode. Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); // allocate the Region and Phi nodes for the result - result_region = new (C) RegionNode(3); - result_phi_rawmem = new (C) PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM); - result_phi_rawoop = new (C) PhiNode(result_region, TypeRawPtr::BOTTOM); - result_phi_i_o = new (C) PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch // We need a Region for the loop-back contended case. enum { fall_in_path = 1, contended_loopback_path = 2 }; Node *contended_region; Node *contended_phi_rawmem; if (UseTLAB) { contended_region = toobig_false; contended_phi_rawmem = mem; } else { - contended_region = new (C) RegionNode(3); - contended_phi_rawmem = new (C) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM); // Now handle the passing-too-big test. We fall into the contended // loop-back merge point. contended_region ->init_req(fall_in_path, toobig_false); contended_phi_rawmem->init_req(fall_in_path, mem); transform_later(contended_region);
*** 1273,1310 **** --- 1273,1310 ---- } // Load(-locked) the heap top. // See note above concerning the control input when using a TLAB Node *old_eden_top = UseTLAB - ? new (C) LoadPNode (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered) - : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire); transform_later(old_eden_top); // Add to heap top to get a new heap top - Node *new_eden_top = new (C) AddPNode(top(), old_eden_top, size_in_bytes); transform_later(new_eden_top); // Check for needing a GC; compare against heap end - Node *needgc_cmp = new (C) CmpPNode(new_eden_top, eden_end); transform_later(needgc_cmp); - Node *needgc_bol = new (C) BoolNode(needgc_cmp, BoolTest::ge); transform_later(needgc_bol); - IfNode *needgc_iff = new (C) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN); transform_later(needgc_iff); // Plug the failing-heap-space-need-gc test into the slow-path region - Node *needgc_true = new (C) IfTrueNode(needgc_iff); transform_later(needgc_true); if (initial_slow_test) { slow_region->init_req(need_gc_path, needgc_true); // This completes all paths into the slow merge point transform_later(slow_region); } else { // No initial slow path needed! // Just fall from the need-GC path straight into the VM call. slow_region = needgc_true; } // No need for a GC. Setup for the Store-Conditional - Node *needgc_false = new (C) IfFalseNode(needgc_iff); transform_later(needgc_false); // Grab regular I/O before optional prefetch may change it. // Slow-path does no I/O so just set it to the original I/O. result_phi_i_o->init_req(slow_result_path, i_o);
*** 1320,1372 **** --- 1320,1372 ---- // Store (-conditional) the modified eden top back down. // StorePConditional produces flags for a test PLUS a modified raw // memory state. if (UseTLAB) { Node* store_eden_top = - new (C) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top, MemNode::unordered); transform_later(store_eden_top); fast_oop_ctrl = needgc_false; // No contention, so this is the fast path fast_oop_rawmem = store_eden_top; } else { Node* store_eden_top = - new (C) StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, fast_oop/*old_eden_top*/); transform_later(store_eden_top); - Node *contention_check = new (C) BoolNode(store_eden_top, BoolTest::ne); transform_later(contention_check); - store_eden_top = new (C) SCMemProjNode(store_eden_top); transform_later(store_eden_top); // If not using TLABs, check to see if there was contention. - IfNode *contention_iff = new (C) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN); transform_later(contention_iff); - Node *contention_true = new (C) IfTrueNode(contention_iff); transform_later(contention_true); // If contention, loopback and try again. contended_region->init_req(contended_loopback_path, contention_true); contended_phi_rawmem->init_req(contended_loopback_path, store_eden_top); // Fast-path succeeded with no contention! - Node *contention_false = new (C) IfFalseNode(contention_iff); transform_later(contention_false); fast_oop_ctrl = contention_false; // Bump total allocated bytes for this thread - Node* thread = new (C) ThreadLocalNode(); transform_later(thread); Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread, in_bytes(JavaThread::allocated_bytes_offset())); Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr, 0, TypeLong::LONG, T_LONG); #ifdef _LP64 Node* alloc_size = size_in_bytes; #else - Node* alloc_size = new (C) ConvI2LNode(size_in_bytes); transform_later(alloc_size); #endif - Node* new_alloc_bytes = new (C) AddLNode(alloc_bytes, alloc_size); transform_later(new_alloc_bytes); fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr, 0, new_alloc_bytes, T_LONG); }
*** 1389,1401 **** --- 1389,1401 ---- MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot); transform_later(mb); mb->init_req(TypeFunc::Memory, fast_oop_rawmem); mb->init_req(TypeFunc::Control, fast_oop_ctrl); - fast_oop_ctrl = new (C) ProjNode(mb,TypeFunc::Control); transform_later(fast_oop_ctrl); - fast_oop_rawmem = new (C) ProjNode(mb,TypeFunc::Memory); transform_later(fast_oop_rawmem); } else { // Add the MemBarStoreStore after the InitializeNode so that // all stores performing the initialization that were moved // before the InitializeNode happen before the storestore
*** 1405,1427 **** --- 1405,1427 ---- Node* init_mem = init->proj_out(TypeFunc::Memory); MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot); transform_later(mb); - Node* ctrl = new (C) ProjNode(init,TypeFunc::Control); transform_later(ctrl); - Node* mem = new (C) ProjNode(init,TypeFunc::Memory); transform_later(mem); // The MemBarStoreStore depends on control and memory coming // from the InitializeNode mb->init_req(TypeFunc::Memory, mem); mb->init_req(TypeFunc::Control, ctrl); - ctrl = new (C) ProjNode(mb,TypeFunc::Control); transform_later(ctrl); - mem = new (C) ProjNode(mb,TypeFunc::Memory); transform_later(mem); // All nodes that depended on the InitializeNode for control // and memory must now depend on the MemBarNode that itself // depends on the InitializeNode
*** 1431,1460 **** --- 1431,1460 ---- } if (C->env()->dtrace_extended_probes()) { // Slow-path call int size = TypeFunc::Parms + 2; - CallLeafNode *call = new (C) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base), "dtrace_object_alloc", TypeRawPtr::BOTTOM); // Get base of thread-local storage area - Node* thread = new (C) ThreadLocalNode(); transform_later(thread); call->init_req(TypeFunc::Parms+0, thread); call->init_req(TypeFunc::Parms+1, fast_oop); call->init_req(TypeFunc::Control, fast_oop_ctrl); call->init_req(TypeFunc::I_O , top()); // does no i/o call->init_req(TypeFunc::Memory , fast_oop_rawmem); call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr)); call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); transform_later(call); - fast_oop_ctrl = new (C) ProjNode(call,TypeFunc::Control); transform_later(fast_oop_ctrl); - fast_oop_rawmem = new (C) ProjNode(call,TypeFunc::Memory); transform_later(fast_oop_rawmem); } // Plug in the successful fast-path into the result merge point result_region ->init_req(fast_result_path, fast_oop_ctrl);
*** 1465,1475 **** --- 1465,1475 ---- slow_region = ctrl; result_phi_i_o = i_o; // Rename it to use in the following code. } // Generate slow-path call - CallNode *call = new (C) CallStaticJavaNode(slow_call_type, slow_call_address, OptoRuntime::stub_name(slow_call_address), alloc->jvms()->bci(), TypePtr::BOTTOM); call->init_req( TypeFunc::Control, slow_region ); call->init_req( TypeFunc::I_O , top() ) ; // does no i/o
*** 1522,1532 **** --- 1522,1532 ---- } // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete // _memproj_catchall so we end up with a call that has only 1 memory projection. if (_memproj_catchall != NULL ) { if (_memproj_fallthrough == NULL) { - _memproj_fallthrough = new (C) ProjNode(call, TypeFunc::Memory); transform_later(_memproj_fallthrough); } for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) { Node *use = _memproj_catchall->fast_out(i); _igvn.rehash_node_delayed(use);
*** 1554,1564 **** --- 1554,1564 ---- } // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete // _ioproj_catchall so we end up with a call that has only 1 i_o projection. if (_ioproj_catchall != NULL ) { if (_ioproj_fallthrough == NULL) { - _ioproj_fallthrough = new (C) ProjNode(call, TypeFunc::I_O); transform_later(_ioproj_fallthrough); } for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) { Node *use = _ioproj_catchall->fast_out(i); _igvn.rehash_node_delayed(use);
*** 1688,1738 **** --- 1688,1738 ---- if( UseTLAB && AllocatePrefetchStyle == 2 ) { // Generate prefetch allocation with watermark check. // As an allocation hits the watermark, we will prefetch starting // at a "distance" away from watermark. - Node *pf_region = new (C) RegionNode(3); - Node *pf_phi_rawmem = new (C) PhiNode( pf_region, Type::MEMORY, TypeRawPtr::BOTTOM ); // I/O is used for Prefetch - Node *pf_phi_abio = new (C) PhiNode( pf_region, Type::ABIO ); - Node *thread = new (C) ThreadLocalNode(); transform_later(thread); - Node *eden_pf_adr = new (C) AddPNode( top()/*not oop*/, thread, _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); transform_later(eden_pf_adr); - Node *old_pf_wm = new (C) LoadPNode(needgc_false, contended_phi_rawmem, eden_pf_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); transform_later(old_pf_wm); // check against new_eden_top - Node *need_pf_cmp = new (C) CmpPNode( new_eden_top, old_pf_wm ); transform_later(need_pf_cmp); - Node *need_pf_bol = new (C) BoolNode( need_pf_cmp, BoolTest::ge ); transform_later(need_pf_bol); - IfNode *need_pf_iff = new (C) IfNode( needgc_false, need_pf_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); transform_later(need_pf_iff); // true node, add prefetchdistance - Node *need_pf_true = new (C) IfTrueNode( need_pf_iff ); transform_later(need_pf_true); - Node *need_pf_false = new (C) IfFalseNode( need_pf_iff ); transform_later(need_pf_false); - Node *new_pf_wmt = new (C) AddPNode( top(), old_pf_wm, _igvn.MakeConX(AllocatePrefetchDistance) ); transform_later(new_pf_wmt ); new_pf_wmt->set_req(0, need_pf_true); - Node *store_new_wmt = new (C) StorePNode(need_pf_true, contended_phi_rawmem, eden_pf_adr, TypeRawPtr::BOTTOM, new_pf_wmt, MemNode::unordered); transform_later(store_new_wmt);
*** 1744,1757 **** --- 1744,1757 ---- uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize; uint step_size = AllocatePrefetchStepSize; uint distance = 0; for ( uint i = 0; i < lines; i++ ) { - prefetch_adr = new (C) AddPNode( old_pf_wm, new_pf_wmt, _igvn.MakeConX(distance) ); transform_later(prefetch_adr); - prefetch = new (C) PrefetchAllocationNode( i_o, prefetch_adr ); transform_later(prefetch); distance += step_size; i_o = prefetch; } pf_phi_abio->set_req( pf_path, i_o );
*** 1770,1812 **** --- 1770,1812 ---- contended_phi_rawmem = pf_phi_rawmem; i_o = pf_phi_abio; } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { // Insert a prefetch for each allocation. // This code is used for Sparc with BIS. - Node *pf_region = new (C) RegionNode(3); - Node *pf_phi_rawmem = new (C) PhiNode( pf_region, Type::MEMORY, TypeRawPtr::BOTTOM ); // Generate several prefetch instructions. uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines; uint step_size = AllocatePrefetchStepSize; uint distance = AllocatePrefetchDistance; // Next cache address. - Node *cache_adr = new (C) AddPNode(old_eden_top, old_eden_top, _igvn.MakeConX(distance)); transform_later(cache_adr); - cache_adr = new (C) CastP2XNode(needgc_false, cache_adr); transform_later(cache_adr); Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); - cache_adr = new (C) AndXNode(cache_adr, mask); transform_later(cache_adr); - cache_adr = new (C) CastX2PNode(cache_adr); transform_later(cache_adr); // Prefetch - Node *prefetch = new (C) PrefetchAllocationNode( contended_phi_rawmem, cache_adr ); prefetch->set_req(0, needgc_false); transform_later(prefetch); contended_phi_rawmem = prefetch; Node *prefetch_adr; distance = step_size; for ( uint i = 1; i < lines; i++ ) { - prefetch_adr = new (C) AddPNode( cache_adr, cache_adr, _igvn.MakeConX(distance) ); transform_later(prefetch_adr); - prefetch = new (C) PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr ); transform_later(prefetch); distance += step_size; contended_phi_rawmem = prefetch; } } else if( AllocatePrefetchStyle > 0 ) {
*** 1816,1829 **** --- 1816,1829 ---- // Generate several prefetch instructions. uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines; uint step_size = AllocatePrefetchStepSize; uint distance = AllocatePrefetchDistance; for ( uint i = 0; i < lines; i++ ) { - prefetch_adr = new (C) AddPNode( old_eden_top, new_eden_top, _igvn.MakeConX(distance) ); transform_later(prefetch_adr); - prefetch = new (C) PrefetchAllocationNode( i_o, prefetch_adr ); // Do not let it float too high, since if eden_top == eden_end, // both might be null. if( i == 0 ) { // Set control for first prefetch, next follows it prefetch->init_req(0, needgc_false); }
*** 2168,2183 **** --- 2168,2183 ---- * OptoRuntime::complete_monitor_locking_Java(obj); * } * } */ - region = new (C) RegionNode(5); // create a Phi for the memory state - mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); - Node* fast_lock_region = new (C) RegionNode(3); - Node* fast_lock_mem_phi = new (C) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM); // First, check mark word for the biased lock pattern. Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); // Get fast path - mark word has the biased lock pattern.
*** 2203,2216 **** --- 2203,2216 ---- #endif klass_node->init_req(0, ctrl); } Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type()); - Node* thread = transform_later(new (C) ThreadLocalNode()); - Node* cast_thread = transform_later(new (C) CastP2XNode(ctrl, thread)); - Node* o_node = transform_later(new (C) OrXNode(cast_thread, proto_node)); - Node* x_node = transform_later(new (C) XorXNode(o_node, mark_node)); // Get slow path - mark word does NOT match the value. Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node, (~markOopDesc::age_mask_in_place), 0); // region->in(3) is set to fast path - the object is biased to the current thread.
*** 2229,2249 **** --- 2229,2249 ---- // fast_lock_region->in(2) - the prototype header is no longer biased // and we have to revoke the bias on this object. // We are going to try to reset the mark of this object to the prototype // value and fall through to the CAS-based locking scheme. Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); - Node* cas = new (C) StoreXConditionalNode(not_biased_ctrl, mem, adr, proto_node, mark_node); transform_later(cas); ! Node* proj = transform_later( new (C) SCMemProjNode(cas)); fast_lock_mem_phi->init_req(2, proj); // Second, check epoch bits. - Node* rebiased_region = new (C) RegionNode(3); - Node* old_phi = new (C) PhiNode( rebiased_region, TypeX_X); - Node* new_phi = new (C) PhiNode( rebiased_region, TypeX_X); // Get slow path - mark word does NOT match epoch bits. Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node, markOopDesc::epoch_mask_in_place, 0); // The epoch of the current bias is not valid, attempt to rebias the object
*** 2256,2290 **** --- 2256,2289 ---- // The epoch of the current bias is still valid but we know // nothing about the owner; it might be set or it might be clear. Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); - Node* old = transform_later(new (C) AndXNode(mark_node, cmask)); - cast_thread = transform_later(new (C) CastP2XNode(ctrl, thread)); - Node* new_mark = transform_later(new (C) OrXNode(cast_thread, old)); old_phi->init_req(1, old); new_phi->init_req(1, new_mark); transform_later(rebiased_region); transform_later(old_phi); transform_later(new_phi); // Try to acquire the bias of the object using an atomic operation. // If this fails we will go in to the runtime to revoke the object's bias. ! cas = new (C) StoreXConditionalNode(rebiased_region, mem, adr, new_phi, old_phi); ! cas = new StoreXConditionalNode(rebiased_region, mem, adr, new_phi, old_phi); transform_later(cas); ! proj = transform_later( new (C) SCMemProjNode(cas)); // Get slow path - Failed to CAS. not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0); mem_phi->init_req(4, proj); // region->in(4) is set to fast path - the object is rebiased to the current thread. // Failed to CAS. - slow_path = new (C) RegionNode(3); - Node *slow_mem = new (C) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM); slow_path->init_req(1, not_biased_ctrl); // Capture slow-control slow_mem->init_req(1, proj); // Call CAS-based locking scheme (FastLock node).
*** 2304,2316 **** --- 2303,2315 ---- transform_later(slow_mem); // Reset lock's memory edge. lock->set_req(TypeFunc::Memory, slow_mem); } else { - region = new (C) RegionNode(3); // create a Phi for the memory state - mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); // Optimize test; set region slot 2 slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0); mem_phi->init_req(2, mem); }
*** 2337,2347 **** --- 2336,2346 ---- region->init_req(1, slow_ctrl); // region inputs are now complete transform_later(region); _igvn.replace_node(_fallthroughproj, region); ! Node *memproj = transform_later( new(C) ProjNode(call, TypeFunc::Memory) ); ! Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory)); mem_phi->init_req(1, memproj ); transform_later(mem_phi); _igvn.replace_node(_memproj_fallthrough, mem_phi); }
*** 2362,2387 **** --- 2361,2386 ---- Node *mem_phi; if (UseOptoBiasInlining) { // Check for biased locking unlock case, which is a no-op. // See the full description in MacroAssembler::biased_locking_exit(). - region = new (C) RegionNode(4); // create a Phi for the memory state - mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); mem_phi->init_req(3, mem); Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); ctrl = opt_bits_test(ctrl, region, 3, mark_node, markOopDesc::biased_lock_mask_in_place, markOopDesc::biased_lock_pattern); } else { - region = new (C) RegionNode(3); // create a Phi for the memory state - mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); } - FastUnlockNode *funlock = new (C) FastUnlockNode( ctrl, obj, box ); funlock = transform_later( funlock )->as_FastUnlock(); // Optimize test; set region slot 2 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0); CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
*** 2402,2412 **** --- 2401,2411 ---- region->init_req(1, slow_ctrl); // region inputs are now complete transform_later(region); _igvn.replace_node(_fallthroughproj, region); ! Node *memproj = transform_later( new(C) ProjNode(call, TypeFunc::Memory) ); mem_phi->init_req(1, memproj ); mem_phi->init_req(2, mem); transform_later(mem_phi); _igvn.replace_node(_memproj_fallthrough, mem_phi); }

src/share/vm/opto/macro.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File