src/share/vm/opto/library_call.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
hotspot Cdiff src/share/vm/opto/library_call.cpp
src/share/vm/opto/library_call.cpp
Print this page
rev 10219 : 8146828: Subsequent arraycopy does not always eliminate array zeroing
Summary: null check before allocation and arraycopy prevents zeroing elimination
Reviewed-by:
rev 10220 : comments
*** 269,280 ****
// Helper functions for inlining arraycopy
bool inline_arraycopy();
AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
RegionNode* slow_region);
! JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
! void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
bool inline_unsafe_ordered_store(BasicType type);
bool inline_unsafe_fence(vmIntrinsics::ID id);
--- 269,284 ----
// Helper functions for inlining arraycopy
bool inline_arraycopy();
AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
RegionNode* slow_region);
! JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc,
! int& saved_reexecute_sp);
! void arraycopy_move_allocation_here(AllocateArrayNode* alloc,
! Node* dest,
! JVMState* saved_jvms,
! int saved_reexecute_sp);
typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
bool inline_unsafe_ordered_store(BasicType type);
bool inline_unsafe_fence(vmIntrinsics::ID id);
*** 4549,4565 ****
// unitialized array will escape the compiled method. To prevent that
// we set the JVM state for uncommon traps between the allocation and
// the arraycopy to the state before the allocation so, in case of
// deoptimization, we'll reexecute the allocation and the
// initialization.
! JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
! if (alloc != NULL) {
! ciMethod* trap_method = alloc->jvms()->method();
! int trap_bci = alloc->jvms()->bci();
!
! if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &
! !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
// Make sure there's no store between the allocation and the
// arraycopy otherwise visible side effects could be rexecuted
// in case of deoptimization and cause incorrect execution.
bool no_interfering_store = true;
Node* mem = alloc->in(TypeFunc::Memory);
--- 4553,4564 ----
// unitialized array will escape the compiled method. To prevent that
// we set the JVM state for uncommon traps between the allocation and
// the arraycopy to the state before the allocation so, in case of
// deoptimization, we'll reexecute the allocation and the
// initialization.
! JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc,
! int& saved_reexecute_sp) {
// Make sure there's no store between the allocation and the
// arraycopy otherwise visible side effects could be rexecuted
// in case of deoptimization and cause incorrect execution.
bool no_interfering_store = true;
Node* mem = alloc->in(TypeFunc::Memory);
*** 4609,4631 ****
set_jvms(sfpt->jvms());
_reexecute_sp = jvms()->sp();
return saved_jvms;
}
- }
- }
return NULL;
}
! // In case of a deoptimization, we restart execution at the
! // allocation, allocating a new array. We would leave an uninitialized
! // array in the heap that GCs wouldn't expect. Move the allocation
! // after the traps so we don't allocate the array if we
! // deoptimize. This is possible because tightly_coupled_allocation()
// guarantees there's no observer of the allocated array at this point
! // and the control flow is simple enough.
! void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp) {
if (saved_jvms != NULL && !stopped()) {
assert(alloc != NULL, "only with a tightly coupled allocation");
// restore JVM state to the state at the arraycopy
saved_jvms->map()->set_control(map()->control());
assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
--- 4608,4632 ----
set_jvms(sfpt->jvms());
_reexecute_sp = jvms()->sp();
return saved_jvms;
}
return NULL;
}
! // Because of zeroing elimination, a trap between an arraycopy and its
! // tightly coupled allocation would leave an uninitialized array in
! // the heap that GCs wouldn't expect. Moving the allocation after the
! // traps so we don't allocate the array if we deoptimize solves that
! // problem. This is possible because tightly_coupled_allocation()
// guarantees there's no observer of the allocated array at this point
! // and the control flow is simple enough. In case of a deoptimization,
! // we restart execution before the allocation, allocating a new array.
! void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc,
! Node* dest,
! JVMState* saved_jvms,
! int saved_reexecute_sp) {
if (saved_jvms != NULL && !stopped()) {
assert(alloc != NULL, "only with a tightly coupled allocation");
// restore JVM state to the state at the arraycopy
saved_jvms->map()->set_control(map()->control());
assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
*** 4693,4709 ****
// Check for allocation before we add nodes that would confuse
// tightly_coupled_allocation()
AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
int saved_reexecute_sp = -1;
! JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
! // See arraycopy_restore_alloc_state() comment
! // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
! // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
! // if saved_jvms == NULL and alloc != NULL, we can’t emit any guards
! bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
// The following tests must be performed
// (1) src and dest are arrays.
// (2) src and dest arrays must have elements of the same BasicType
// (3) src and dest must not be null.
--- 4694,4731 ----
// Check for allocation before we add nodes that would confuse
// tightly_coupled_allocation()
AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
+ // If the allocation is moved below the guards then we deopt before
+ // the allocation and traps are recorded (and must be checked) at
+ // the method/bci of the allocation. Otherwise, we record/check for
+ // traps at the arraycopy call.
+ ciMethod* alloc_method = NULL;
+ int alloc_bci = -1;
+ if (alloc != NULL) {
+ alloc_method = alloc->jvms()->method();
+ alloc_bci = alloc->jvms()->bci();
+ }
+
int saved_reexecute_sp = -1;
! JVMState* saved_jvms = NULL;
! // In case of a tightly coupled allocation, the allocation needs to
! // be moved after the null check for src below so in case we deopt
! // at the null check, the array is not left uninitialized (because
! // of zeroing elimination).
! // If guards are emitted, the allocation is moved all the way after
! // the guards. If guards are not emitted (because some guards
! // trapped too much in the past), the allocation is moved between
! // the null check for src and the array copy code. Once expanded the
! // array copy code doesn't include safepoints before the copy itself
! // so there's no risk that an uninitialized array escapes the
! // method.
! if (alloc != NULL && !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_null_check) &&
! !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_speculate_null_check)) {
! saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
! }
// The following tests must be performed
// (1) src and dest are arrays.
// (2) src and dest arrays must have elements of the same BasicType
// (3) src and dest must not be null.
*** 4715,4739 ****
// (9) each element of an oop array must be assignable
// (3) src and dest must not be null.
// always do this here because we need the JVM state for uncommon traps
Node* null_ctl = top();
! src = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
assert(null_ctl->is_top(), "no null control here");
dest = null_check(dest, T_ARRAY);
! if (!can_emit_guards) {
! // if saved_jvms == NULL and alloc != NULL, we don't emit any
! // guards but the arraycopy node could still take advantage of a
! // tightly allocated allocation. tightly_coupled_allocation() is
! // called again to make sure it takes the null check above into
! // account: the null check is mandatory and if it caused an
! // uncommon trap to be emitted then the allocation can't be
! // considered tightly coupled in this context.
alloc = tightly_coupled_allocation(dest, NULL);
}
bool validated = false;
const Type* src_type = _gvn.type(src);
const Type* dest_type = _gvn.type(dest);
const TypeAryPtr* top_src = src_type->isa_aryptr();
--- 4737,4776 ----
// (9) each element of an oop array must be assignable
// (3) src and dest must not be null.
// always do this here because we need the JVM state for uncommon traps
Node* null_ctl = top();
! src = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true, !_gvn.type(src)->speculative_maybe_null()) : null_check(src, T_ARRAY);
assert(null_ctl->is_top(), "no null control here");
dest = null_check(dest, T_ARRAY);
! // We emitted a null check for src.
! // In case of a tightly coupled allocation:
! // - the null check for dest is optimized out
! // - if saved_jvms is not NULL, we decided to move the allocation
! // below the src null check and are free to move the allocation
! // below the guards (if we emit them) as well.
! // - if saved_jvms is NULL, the src null check could be a witness of
! // the allocation and so prevent zeroing elimination. We need to
! // call tightly_coupled_allocation() again to check if it's the case
! // or not. If we still have a tightly coupled allocation, then it
! // can be moved below the guards (if we emit them) and we call
! // arraycopy_restore_alloc_state() to prepare the move of the
! // allocation.
! if (saved_jvms == NULL && alloc != NULL) {
alloc = tightly_coupled_allocation(dest, NULL);
+ if (alloc != NULL && !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_intrinsic)) {
+ saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
+ }
}
+ // See arraycopy_restore_alloc_state() comment
+ // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
+ // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
+ // if saved_jvms == NULL and alloc != NULL, we can't emit any guards
+ bool can_emit_guards = (alloc == NULL || (saved_jvms != NULL && !C->too_many_traps(alloc_method, alloc_bci, Deoptimization::Reason_intrinsic)));
+
bool validated = false;
const Type* src_type = _gvn.type(src);
const Type* dest_type = _gvn.type(dest);
const TypeAryPtr* top_src = src_type->isa_aryptr();
*** 4832,4849 ****
}
}
}
}
! ciMethod* trap_method = method();
! int trap_bci = bci();
! if (saved_jvms != NULL) {
! trap_method = alloc->jvms()->method();
! trap_bci = alloc->jvms()->bci();
! }
!
! if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
can_emit_guards &&
!src->is_top() && !dest->is_top()) {
// validate arguments: enables transformation the ArrayCopyNode
validated = true;
--- 4869,4879 ----
}
}
}
}
! if ((alloc != NULL || !C->too_many_traps(method(), bci(), Deoptimization::Reason_intrinsic)) &&
can_emit_guards &&
!src->is_top() && !dest->is_top()) {
// validate arguments: enables transformation the ArrayCopyNode
validated = true;
src/share/vm/opto/library_call.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File