< prev index next >
src/hotspot/share/opto/escape.cpp
Print this page
rev 52560 : 8213615: GC/C2 abstraction for escape analysis
*** 23,32 ****
--- 23,33 ----
*/
#include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/compileLog.hpp"
+ #include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "opto/c2compiler.hpp"
*** 37,52 ****
#include "opto/escape.hpp"
#include "opto/phaseX.hpp"
#include "opto/movenode.hpp"
#include "opto/rootnode.hpp"
#include "utilities/macros.hpp"
- #if INCLUDE_G1GC
- #include "gc/g1/g1ThreadLocalData.hpp"
- #endif // INCLUDE_G1GC
- #if INCLUDE_ZGC
- #include "gc/z/c2/zBarrierSetC2.hpp"
- #endif
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
_in_worklist(C->comp_arena()),
_next_pidx(0),
--- 38,47 ----
*** 386,395 ****
--- 381,394 ----
// point to phantom_obj.
if (n_ptn == phantom_obj || n_ptn == null_obj)
return; // Skip predefined nodes.
int opcode = n->Opcode();
+ bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
+ if (gc_handled) {
+ return; // Ignore node if already handled by GC.
+ }
switch (opcode) {
case Op_AddP: {
Node* base = get_addp_base(n);
PointsToNode* ptn_base = ptnode_adr(base->_idx);
// Field nodes are created for all field types. They are used in
*** 451,464 ****
// Unknown class is loaded
map_ideal_node(n, phantom_obj);
break;
}
case Op_LoadP:
- #if INCLUDE_ZGC
- case Op_LoadBarrierSlowReg:
- case Op_LoadBarrierWeakSlowReg:
- #endif
case Op_LoadN:
case Op_LoadPLocked: {
add_objload_to_connection_graph(n, delayed_worklist);
break;
}
--- 450,459 ----
*** 489,505 ****
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
n->in(0)->as_Call()->returns_pointer()) {
add_local_var_and_edge(n, PointsToNode::NoEscape,
n->in(0), delayed_worklist);
}
- #if INCLUDE_ZGC
- else if (UseZGC) {
- if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
- add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
- }
- }
- #endif
break;
}
case Op_Rethrow: // Exception object escapes
case Op_Return: {
if (n->req() > TypeFunc::Parms &&
--- 484,493 ----
*** 523,588 ****
case Op_StorePConditional:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
! Node* adr = n->in(MemNode::Address);
! const Type *adr_type = igvn->type(adr);
! adr_type = adr_type->make_ptr();
! if (adr_type == NULL) {
! break; // skip dead nodes
! }
! if ( adr_type->isa_oopptr()
! || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
! && adr_type == TypeRawPtr::NOTNULL
! && adr->in(AddPNode::Address)->is_Proj()
! && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
! delayed_worklist->push(n); // Process it later.
! #ifdef ASSERT
! assert(adr->is_AddP(), "expecting an AddP");
! if (adr_type == TypeRawPtr::NOTNULL) {
! // Verify a raw address for a store captured by Initialize node.
! int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
! assert(offs != Type::OffsetBot, "offset must be a constant");
! }
! #endif
! } else {
! // Ignore copy the displaced header to the BoxNode (OSR compilation).
! if (adr->is_BoxLock())
! break;
! // Stored value escapes in unsafe access.
! if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
! // Pointer stores in G1 barriers looks like unsafe access.
! // Ignore such stores to be able scalar replace non-escaping
! // allocations.
! #if INCLUDE_G1GC
! if (UseG1GC && adr->is_AddP()) {
! Node* base = get_addp_base(adr);
! if (base->Opcode() == Op_LoadP &&
! base->in(MemNode::Address)->is_AddP()) {
! adr = base->in(MemNode::Address);
! Node* tls = get_addp_base(adr);
! if (tls->Opcode() == Op_ThreadLocal) {
! int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
! if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) {
! break; // G1 pre barrier previous oop value store.
! }
! if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
! break; // G1 post barrier card address store.
! }
! }
! }
! }
! #endif
! delayed_worklist->push(n); // Process unsafe access later.
! break;
! }
! #ifdef ASSERT
! n->dump(1);
! assert(false, "not unsafe or G1 barrier raw StoreP");
! #endif
! }
break;
}
case Op_AryEq:
case Op_HasNegatives:
case Op_StrComp:
--- 511,521 ----
case Op_StorePConditional:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
! add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
break;
}
case Op_AryEq:
case Op_HasNegatives:
case Op_StrComp:
*** 631,640 ****
--- 564,577 ----
}
assert(n->is_Store() || n->is_LoadStore() ||
(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
"node should be registered already");
int opcode = n->Opcode();
+ bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
+ if (gc_handled) {
+ return; // Ignore node if already handled by GC.
+ }
switch (opcode) {
case Op_AddP: {
Node* base = get_addp_base(n);
PointsToNode* ptn_base = ptnode_adr(base->_idx);
assert(ptn_base != NULL, "field's base should be registered");
*** 664,677 ****
add_edge(n_ptn, ptn);
}
break;
}
case Op_LoadP:
- #if INCLUDE_ZGC
- case Op_LoadBarrierSlowReg:
- case Op_LoadBarrierWeakSlowReg:
- #endif
case Op_LoadN:
case Op_LoadPLocked: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
const Type* t = _igvn->type(n);
--- 601,610 ----
*** 707,724 ****
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
n->in(0)->as_Call()->returns_pointer()) {
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
break;
}
- #if INCLUDE_ZGC
- else if (UseZGC) {
- if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
- add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
- break;
- }
- }
- #endif
ELSE_FAIL("Op_Proj");
}
case Op_Rethrow: // Exception object escapes
case Op_Return: {
if (n->req() > TypeFunc::Parms &&
--- 640,649 ----
*** 740,790 ****
case Op_CompareAndSwapN:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
! Node* adr = n->in(MemNode::Address);
! const Type *adr_type = _igvn->type(adr);
! adr_type = adr_type->make_ptr();
! #ifdef ASSERT
! if (adr_type == NULL) {
! n->dump(1);
! assert(adr_type != NULL, "dead node should not be on list");
! break;
! }
! #endif
! if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
! opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
! add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
! }
! if ( adr_type->isa_oopptr()
! || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
! && adr_type == TypeRawPtr::NOTNULL
! && adr->in(AddPNode::Address)->is_Proj()
! && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
! // Point Address to Value
! PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
! assert(adr_ptn != NULL &&
! adr_ptn->as_Field()->is_oop(), "node should be registered");
! Node *val = n->in(MemNode::ValueIn);
! PointsToNode* ptn = ptnode_adr(val->_idx);
! assert(ptn != NULL, "node should be registered");
! add_edge(adr_ptn, ptn);
! break;
! } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
! // Stored value escapes in unsafe access.
! Node *val = n->in(MemNode::ValueIn);
! PointsToNode* ptn = ptnode_adr(val->_idx);
! assert(ptn != NULL, "node should be registered");
! set_escape_state(ptn, PointsToNode::GlobalEscape);
! // Add edge to object for unsafe access with offset.
! PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
! assert(adr_ptn != NULL, "node should be registered");
! if (adr_ptn->is_Field()) {
! assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
! add_edge(adr_ptn, ptn);
! }
break;
}
ELSE_FAIL("Op_StoreP");
}
case Op_AryEq:
--- 665,675 ----
case Op_CompareAndSwapN:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
! if (add_final_edges_unsafe_access(n, opcode)) {
break;
}
ELSE_FAIL("Op_StoreP");
}
case Op_AryEq:
*** 825,834 ****
--- 710,806 ----
}
}
return;
}
+ void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
+ Node* adr = n->in(MemNode::Address);
+ const Type* adr_type = _igvn->type(adr);
+ adr_type = adr_type->make_ptr();
+ if (adr_type == NULL) {
+ return; // skip dead nodes
+ }
+ if (adr_type->isa_oopptr()
+ || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
+ && adr_type == TypeRawPtr::NOTNULL
+ && adr->in(AddPNode::Address)->is_Proj()
+ && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
+ delayed_worklist->push(n); // Process it later.
+ #ifdef ASSERT
+ assert (adr->is_AddP(), "expecting an AddP");
+ if (adr_type == TypeRawPtr::NOTNULL) {
+ // Verify a raw address for a store captured by Initialize node.
+ int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
+ assert(offs != Type::OffsetBot, "offset must be a constant");
+ }
+ #endif
+ } else {
+ // Ignore copy the displaced header to the BoxNode (OSR compilation).
+ if (adr->is_BoxLock()) {
+ return;
+ }
+ // Stored value escapes in unsafe access.
+ if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
+ delayed_worklist->push(n); // Process unsafe access later.
+ return;
+ }
+ #ifdef ASSERT
+ n->dump(1);
+ assert(false, "not unsafe");
+ #endif
+ }
+ }
+
+ bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
+ Node* adr = n->in(MemNode::Address);
+ const Type *adr_type = _igvn->type(adr);
+ adr_type = adr_type->make_ptr();
+ #ifdef ASSERT
+ if (adr_type == NULL) {
+ n->dump(1);
+ assert(adr_type != NULL, "dead node should not be on list");
+ return true;
+ }
+ #endif
+
+ if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
+ opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
+ add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
+ }
+
+ if (adr_type->isa_oopptr()
+ || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
+ && adr_type == TypeRawPtr::NOTNULL
+ && adr->in(AddPNode::Address)->is_Proj()
+ && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
+ // Point Address to Value
+ PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
+ assert(adr_ptn != NULL &&
+ adr_ptn->as_Field()->is_oop(), "node should be registered");
+ Node* val = n->in(MemNode::ValueIn);
+ PointsToNode* ptn = ptnode_adr(val->_idx);
+ assert(ptn != NULL, "node should be registered");
+ add_edge(adr_ptn, ptn);
+ return true;
+ } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
+ // Stored value escapes in unsafe access.
+ Node* val = n->in(MemNode::ValueIn);
+ PointsToNode* ptn = ptnode_adr(val->_idx);
+ assert(ptn != NULL, "node should be registered");
+ set_escape_state(ptn, PointsToNode::GlobalEscape);
+ // Add edge to object for unsafe access with offset.
+ PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
+ assert(adr_ptn != NULL, "node should be registered");
+ if (adr_ptn->is_Field()) {
+ assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
+ add_edge(adr_ptn, ptn);
+ }
+ return true;
+ }
+ return false;
+ }
+
void ConnectionGraph::add_call_node(CallNode* call) {
assert(call->returns_pointer(), "only for call which returns pointer");
uint call_idx = call->_idx;
if (call->is_Allocate()) {
Node* k = call->in(AllocateNode::KlassNode);
*** 2098,2108 ****
bt = field->layout_type();
} else {
// Check for unsafe oop field access
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
! n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
bt = T_OBJECT;
(*unsafe) = true;
}
}
} else if (adr_type->isa_aryptr()) {
--- 2070,2081 ----
bt = field->layout_type();
} else {
// Check for unsafe oop field access
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
! n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
! BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
bt = T_OBJECT;
(*unsafe) = true;
}
}
} else if (adr_type->isa_aryptr()) {
*** 2116,2126 ****
}
} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
// Allocation initialization, ThreadLocal field access, unsafe access
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
! n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
bt = T_OBJECT;
}
}
}
return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
--- 2089,2100 ----
}
} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
// Allocation initialization, ThreadLocal field access, unsafe access
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
! n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
! BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
bt = T_OBJECT;
}
}
}
return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
*** 2357,2367 ****
Node* uncast_base = base->uncast();
int opcode = uncast_base->Opcode();
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
! (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
}
}
return base;
}
--- 2331,2342 ----
Node* uncast_base = base->uncast();
int opcode = uncast_base->Opcode();
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
! (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||
! BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(uncast_base), "sanity");
}
}
return base;
}
*** 3090,3099 ****
--- 3065,3075 ----
if (!split_AddP(n, base)) continue; // wrong type from dead path
} else if (n->is_Phi() ||
n->is_CheckCastPP() ||
n->is_EncodeP() ||
n->is_DecodeN() ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(n) ||
(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
if (visited.test_set(n->_idx)) {
assert(n->is_Phi(), "loops only through Phi's");
continue; // already processed
}
*** 3160,3169 ****
--- 3136,3146 ----
alloc_worklist.append_if_missing(use);
} else if (use->is_Phi() ||
use->is_CheckCastPP() ||
use->is_EncodeNarrowPtr() ||
use->is_DecodeNarrowPtr() ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(use) ||
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
alloc_worklist.append_if_missing(use);
#ifdef ASSERT
} else if (use->is_Mem()) {
assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
*** 3562,3566 ****
--- 3539,3548 ----
tty->cr();
}
}
}
#endif
+
+ void ConnectionGraph::record_for_optimizer(Node *n) {
+ _igvn->_worklist.push(n);
+ _igvn->add_users_to_worklist(n);
+ }
< prev index next >