--- old/src/share/vm/opto/library_call.cpp 2016-02-11 15:30:00.576567836 +0100 +++ new/src/share/vm/opto/library_call.cpp 2016-02-11 15:30:00.250270576 +0100 @@ -271,8 +271,12 @@ bool inline_arraycopy(); AllocateArrayNode* tightly_coupled_allocation(Node* ptr, RegionNode* slow_region); - JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp); - void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp); + JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, + int& saved_reexecute_sp); + void arraycopy_move_allocation_here(AllocateArrayNode* alloc, + Node* dest, + JVMState* saved_jvms, + int saved_reexecute_sp); typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind; bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind); @@ -4551,67 +4555,60 @@ // the arraycopy to the state before the allocation so, in case of // deoptimization, we'll reexecute the allocation and the // initialization. -JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) { - if (alloc != NULL) { - ciMethod* trap_method = alloc->jvms()->method(); - int trap_bci = alloc->jvms()->bci(); - - if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) & - !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) { - // Make sure there's no store between the allocation and the - // arraycopy otherwise visible side effects could be rexecuted - // in case of deoptimization and cause incorrect execution. - bool no_interfering_store = true; - Node* mem = alloc->in(TypeFunc::Memory); - if (mem->is_MergeMem()) { - for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) { - Node* n = mms.memory(); - if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) { - assert(n->is_Store(), "what else?"); - no_interfering_store = false; - break; - } - } - } else { - for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { - Node* n = mms.memory(); - if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) { - assert(n->is_Store(), "what else?"); - no_interfering_store = false; - break; - } - } +JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, + int& saved_reexecute_sp) { + // Make sure there's no store between the allocation and the + // arraycopy otherwise visible side effects could be rexecuted + // in case of deoptimization and cause incorrect execution. + bool no_interfering_store = true; + Node* mem = alloc->in(TypeFunc::Memory); + if (mem->is_MergeMem()) { + for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) { + Node* n = mms.memory(); + if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) { + assert(n->is_Store(), "what else?"); + no_interfering_store = false; + break; } + } + } else { + for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { + Node* n = mms.memory(); + if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) { + assert(n->is_Store(), "what else?"); + no_interfering_store = false; + break; + } + } + } - if (no_interfering_store) { - JVMState* old_jvms = alloc->jvms()->clone_shallow(C); - uint size = alloc->req(); - SafePointNode* sfpt = new SafePointNode(size, old_jvms); - old_jvms->set_map(sfpt); - for (uint i = 0; i < size; i++) { - sfpt->init_req(i, alloc->in(i)); - } - // re-push array length for deoptimization - sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength)); - old_jvms->set_sp(old_jvms->sp()+1); - old_jvms->set_monoff(old_jvms->monoff()+1); - old_jvms->set_scloff(old_jvms->scloff()+1); - old_jvms->set_endoff(old_jvms->endoff()+1); - old_jvms->set_should_reexecute(true); - - sfpt->set_i_o(map()->i_o()); - sfpt->set_memory(map()->memory()); - sfpt->set_control(map()->control()); + if (no_interfering_store) { + JVMState* old_jvms = alloc->jvms()->clone_shallow(C); + uint size = alloc->req(); + SafePointNode* sfpt = new SafePointNode(size, old_jvms); + old_jvms->set_map(sfpt); + for (uint i = 0; i < size; i++) { + sfpt->init_req(i, alloc->in(i)); + } + // re-push array length for deoptimization + sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength)); + old_jvms->set_sp(old_jvms->sp()+1); + old_jvms->set_monoff(old_jvms->monoff()+1); + old_jvms->set_scloff(old_jvms->scloff()+1); + old_jvms->set_endoff(old_jvms->endoff()+1); + old_jvms->set_should_reexecute(true); + + sfpt->set_i_o(map()->i_o()); + sfpt->set_memory(map()->memory()); + sfpt->set_control(map()->control()); - JVMState* saved_jvms = jvms(); - saved_reexecute_sp = _reexecute_sp; + JVMState* saved_jvms = jvms(); + saved_reexecute_sp = _reexecute_sp; - set_jvms(sfpt->jvms()); - _reexecute_sp = jvms()->sp(); + set_jvms(sfpt->jvms()); + _reexecute_sp = jvms()->sp(); - return saved_jvms; - } - } + return saved_jvms; } return NULL; } @@ -4623,7 +4620,10 @@ // deoptimize. This is possible because tightly_coupled_allocation() // guarantees there's no observer of the allocated array at this point // and the control flow is simple enough. -void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp) { +void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, + Node* dest, + JVMState* saved_jvms, + int saved_reexecute_sp) { if (saved_jvms != NULL && !stopped()) { assert(alloc != NULL, "only with a tightly coupled allocation"); // restore JVM state to the state at the arraycopy @@ -4694,14 +4694,18 @@ // Check for allocation before we add nodes that would confuse // tightly_coupled_allocation() AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL); + ciMethod* alloc_method = NULL; + int alloc_bci = -1; + if (alloc != NULL) { + alloc_method = alloc->jvms()->method(); + alloc_bci = alloc->jvms()->bci(); + } int saved_reexecute_sp = -1; - JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp); - // See arraycopy_restore_alloc_state() comment - // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards - // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation - // if saved_jvms == NULL and alloc != NULL, we can’t emit any guards - bool can_emit_guards = (alloc == NULL || saved_jvms != NULL); + JVMState* saved_jvms = NULL; + if (alloc != NULL && !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_null_check)) { + saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp); + } // The following tests must be performed // (1) src and dest are arrays. @@ -4721,17 +4725,20 @@ assert(null_ctl->is_top(), "no null control here"); dest = null_check(dest, T_ARRAY); - if (!can_emit_guards) { - // if saved_jvms == NULL and alloc != NULL, we don't emit any - // guards but the arraycopy node could still take advantage of a - // tightly allocated allocation. tightly_coupled_allocation() is - // called again to make sure it takes the null check above into - // account: the null check is mandatory and if it caused an - // uncommon trap to be emitted then the allocation can't be - // considered tightly coupled in this context. + if (saved_jvms == NULL) { + // See if the null check above was optimized out (alloc not null) alloc = tightly_coupled_allocation(dest, NULL); + if (alloc != NULL && !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_intrinsic)) { + saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp); + } } + // See arraycopy_restore_alloc_state() comment + // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards + // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation + // if saved_jvms == NULL and alloc != NULL, we can’t emit any guards + bool can_emit_guards = (alloc == NULL || (saved_jvms != NULL && !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_intrinsic))); + bool validated = false; const Type* src_type = _gvn.type(src); @@ -4834,14 +4841,7 @@ } } - ciMethod* trap_method = method(); - int trap_bci = bci(); - if (saved_jvms != NULL) { - trap_method = alloc->jvms()->method(); - trap_bci = alloc->jvms()->bci(); - } - - if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) && + if ((alloc != NULL || !C->too_many_traps(method(), bci(), Deoptimization::Reason_intrinsic)) && can_emit_guards && !src->is_top() && !dest->is_top()) { // validate arguments: enables transformation the ArrayCopyNode