--- old/src/share/vm/opto/c2_globals.hpp Thu Feb 23 12:08:42 2012 +++ new/src/share/vm/opto/c2_globals.hpp Thu Feb 23 12:08:42 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -465,6 +465,9 @@ notproduct(bool, PrintOptimizePtrCompare, false, \ "Print information about optimized pointers compare") \ \ + notproduct(bool, VerifyConnectionGraph , true, \ + "Verify Connection Graph construction in Escape Analysis") \ + \ product(bool, UseOptoBiasInlining, true, \ "Generate biased locking code in C2 ideal graph") \ \ --- old/src/share/vm/opto/callnode.cpp Thu Feb 23 12:08:43 2012 +++ new/src/share/vm/opto/callnode.cpp Thu Feb 23 12:08:43 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1538,10 +1538,7 @@ // If we are locking an unescaped object, the lock/unlock is unnecessary // ConnectionGraph *cgr = phase->C->congraph(); - PointsToNode::EscapeState es = PointsToNode::GlobalEscape; - if (cgr != NULL) - es = cgr->escape_state(obj_node()); - if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { + if (cgr != NULL && cgr->not_global_escape(obj_node())) { assert(!is_eliminated() || is_coarsened(), "sanity"); // The lock could be marked eliminated by lock coarsening // code during first IGVN before EA. Replace coarsened flag @@ -1680,10 +1677,7 @@ // If we are unlocking an unescaped object, the lock/unlock is unnecessary. // ConnectionGraph *cgr = phase->C->congraph(); - PointsToNode::EscapeState es = PointsToNode::GlobalEscape; - if (cgr != NULL) - es = cgr->escape_state(obj_node()); - if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { + if (cgr != NULL && cgr->not_global_escape(obj_node())) { assert(!is_eliminated() || is_coarsened(), "sanity"); // The lock could be marked eliminated by lock coarsening // code during first IGVN before EA. Replace coarsened flag --- old/src/share/vm/opto/callnode.hpp Thu Feb 23 12:08:43 2012 +++ new/src/share/vm/opto/callnode.hpp Thu Feb 23 12:08:43 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -546,6 +546,12 @@ // or result projection is there are several CheckCastPP // or returns NULL if there is no one. Node *result_cast(); + // Does this node returns pointer? + bool returns_pointer() const { + const TypeTuple *r = tf()->range(); + return (r->cnt() > TypeFunc::Parms && + r->field_at(TypeFunc::Parms)->isa_ptr()); + } // Collect all the interesting edges from a call for use in // replacing the call by something else. Used by macro expansion --- old/src/share/vm/opto/compile.cpp Thu Feb 23 12:08:44 2012 +++ new/src/share/vm/opto/compile.cpp Thu Feb 23 12:08:44 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1707,7 +1707,6 @@ if (major_progress()) print_method("PhaseIdealLoop before EA", 2); if (failing()) return; } - TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true); ConnectionGraph::do_analysis(this, &igvn); if (failing()) return; @@ -1719,6 +1718,7 @@ if (failing()) return; if (congraph() != NULL && macro_count() > 0) { + NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); ) PhaseMacroExpand mexp(igvn); mexp.eliminate_macro_nodes(); igvn.set_delay_transform(false); --- old/src/share/vm/opto/escape.cpp Thu Feb 23 12:08:45 2012 +++ new/src/share/vm/opto/escape.cpp Thu Feb 23 12:08:45 2012 @@ -34,423 +34,449 @@ #include "opto/phaseX.hpp" #include "opto/rootnode.hpp" -void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) { - uint v = (targIdx << EdgeShift) + ((uint) et); - if (_edges == NULL) { - Arena *a = Compile::current()->comp_arena(); - _edges = new(a) GrowableArray(a, INITIAL_EDGE_COUNT, 0, 0); - } - _edges->append_if_missing(v); -} - -void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) { - uint v = (targIdx << EdgeShift) + ((uint) et); - - _edges->remove(v); -} - -#ifndef PRODUCT -static const char *node_type_names[] = { - "UnknownType", - "JavaObject", - "LocalVar", - "Field" -}; - -static const char *esc_names[] = { - "UnknownEscape", - "NoEscape", - "ArgEscape", - "GlobalEscape" -}; - -static const char *edge_type_suffix[] = { - "?", // UnknownEdge - "P", // PointsToEdge - "D", // DeferredEdge - "F" // FieldEdge -}; - -void PointsToNode::dump(bool print_state) const { - NodeType nt = node_type(); - tty->print("%s ", node_type_names[(int) nt]); - if (print_state) { - EscapeState es = escape_state(); - tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR"); - } - tty->print("[["); - for (uint i = 0; i < edge_count(); i++) { - tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]); - } - tty->print("]] "); - if (_node == NULL) - tty->print_cr(""); - else - _node->dump(); -} -#endif - ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : - _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()), - _processed(C->comp_arena()), - pt_ptset(C->comp_arena()), - pt_visited(C->comp_arena()), - pt_worklist(C->comp_arena(), 4, 0, 0), + _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), _collecting(true), - _progress(false), + _verify(false), _compile(C), _igvn(igvn), _node_map(C->comp_arena()) { - _phantom_object = C->top()->_idx, - add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true); + add_java_object(C->top(), PointsToNode::GlobalEscape); + phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); // Add ConP(#NULL) and ConN(#NULL) nodes. Node* oop_null = igvn->zerocon(T_OBJECT); - _oop_null = oop_null->_idx; - assert(_oop_null < nodes_size(), "should be created already"); - add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); + assert(oop_null->_idx < nodes_size(), "should be created already"); + add_java_object(oop_null, PointsToNode::NoEscape); + null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); if (UseCompressedOops) { Node* noop_null = igvn->zerocon(T_NARROWOOP); - _noop_null = noop_null->_idx; - assert(_noop_null < nodes_size(), "should be created already"); - add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); - } else { - _noop_null = _oop_null; // Should be initialized + assert(noop_null->_idx < nodes_size(), "should be created already"); + map_ideal_node(noop_null, null_obj); } _pcmp_neq = NULL; // Should be initialized _pcmp_eq = NULL; } -void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) { - PointsToNode *f = ptnode_adr(from_i); - PointsToNode *t = ptnode_adr(to_i); +// Connection Graph constuction functions. - assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); - assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge"); - assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge"); - if (to_i == _phantom_object) { // Quick test for most common object - if (f->has_unknown_ptr()) { - return; - } else { - f->set_has_unknown_ptr(); - } +void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { + PointsToNode* ptadr = _nodes.at(n->_idx); + if (ptadr != NULL) { + assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); + return; } - add_edge(f, to_i, PointsToNode::PointsToEdge); + ptadr = new LocalVarNode(n, es); + _nodes.at_put(n->_idx, ptadr); } -void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) { - PointsToNode *f = ptnode_adr(from_i); - PointsToNode *t = ptnode_adr(to_i); - - assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); - assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge"); - assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge"); - // don't add a self-referential edge, this can occur during removal of - // deferred edges - if (from_i != to_i) - add_edge(f, to_i, PointsToNode::DeferredEdge); -} - -int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { - const Type *adr_type = phase->type(adr); - if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && - adr->in(AddPNode::Address)->is_Proj() && - adr->in(AddPNode::Address)->in(0)->is_Allocate()) { - // We are computing a raw address for a store captured by an Initialize - // compute an appropriate address type. AddP cases #3 and #5 (see below). - int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); - assert(offs != Type::OffsetBot || - adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), - "offset must be a constant or it is initialization of array"); - return offs; +void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { + PointsToNode* ptadr = _nodes.at(n->_idx); + if (ptadr != NULL) { + assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); + return; } - const TypePtr *t_ptr = adr_type->isa_ptr(); - assert(t_ptr != NULL, "must be a pointer type"); - return t_ptr->offset(); + ptadr = new JavaObjectNode(n, es); + _nodes.at_put(n->_idx, ptadr); } -void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) { - // Don't add fields to NULL pointer. - if (is_null_ptr(from_i)) +void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { + PointsToNode* ptadr = _nodes.at(n->_idx); + if (ptadr != NULL) { + assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); return; - PointsToNode *f = ptnode_adr(from_i); - PointsToNode *t = ptnode_adr(to_i); - - assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); - assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge"); - assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge"); - assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets"); - t->set_offset(offset); - - add_edge(f, to_i, PointsToNode::FieldEdge); + } + bool is_oop = is_oop_field(n, offset); + FieldNode* field = new FieldNode(n, es, offset, is_oop); + _nodes.at_put(n->_idx, field); } -void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) { - // Don't change non-escaping state of NULL pointer. - if (is_null_ptr(ni)) +void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, + PointsToNode* src, PointsToNode* dst) { + assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); + assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); + PointsToNode* ptadr = _nodes.at(n->_idx); + if (ptadr != NULL) { + assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); return; - PointsToNode *npt = ptnode_adr(ni); - PointsToNode::EscapeState old_es = npt->escape_state(); - if (es > old_es) - npt->set_escape_state(es); + } + ptadr = new ArraycopyNode(n, es); + _nodes.at_put(n->_idx, ptadr); + // Add edge from arraycopy node to source object. + (void)add_edge(ptadr, src); + src->set_arraycopy_src(); + // Add edge from destination object to arraycopy node. + (void)add_edge(dst, ptadr); + dst->set_arraycopy_dst(); } -void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt, - PointsToNode::EscapeState es, bool done) { - PointsToNode* ptadr = ptnode_adr(n->_idx); - ptadr->_node = n; - ptadr->set_node_type(nt); - - // inline set_escape_state(idx, es); - PointsToNode::EscapeState old_es = ptadr->escape_state(); - if (es > old_es) - ptadr->set_escape_state(es); - - if (done) - _processed.set(n->_idx); +bool ConnectionGraph::is_oop_field(Node* n, int offset) { + const Type* adr_type = n->as_AddP()->bottom_type(); + BasicType bt = T_INT; + if (offset == Type::OffsetBot) { + // Check only oop fields. + if (!adr_type->isa_aryptr() || + (adr_type->isa_aryptr()->klass() == NULL) || + adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { + // OffsetBot is used to reference array's element. Ignore first AddP. + if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { + bt = T_OBJECT; + } + } + } else if (offset != oopDesc::klass_offset_in_bytes()) { + if (adr_type->isa_instptr()) { + ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); + if (field != NULL) { + bt = field->layout_type(); + } else { + // Ignore non field load (for example, klass load) + } + } else if (adr_type->isa_aryptr()) { + if (offset == arrayOopDesc::length_offset_in_bytes()) { + // Ignore array length load. + } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { + // Ignore first AddP. + } else { + const Type* elemtype = adr_type->isa_aryptr()->elem(); + bt = elemtype->array_element_basic_type(); + } + } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { + // Allocation initialization, ThreadLocal field access, unsafe access + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + int opcode = n->fast_out(i)->Opcode(); + if (opcode == Op_StoreP || opcode == Op_LoadP || + opcode == Op_StoreN || opcode == Op_LoadN) { + bt = T_OBJECT; + } + } + } + } + return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); } -PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) { - uint idx = n->_idx; - PointsToNode::EscapeState es; +// Add all references to JavaObject node by walking over all uses. +int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, + bool populate_worklist) { + int new_edges = 0; + if (populate_worklist) { + // Populate _worklist by uses of jobj's uses. + uint count = jobj->use_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* use = jobj->use(i); + if (use->is_Arraycopy()) + continue; - // If we are still collecting or there were no non-escaping allocations - // we don't know the answer yet - if (_collecting) - return PointsToNode::UnknownEscape; + add_uses_to_worklist(use); + if (use->is_Field() && use->as_Field()->is_oop()) { + // Put on worklist all field's uses (loads) and + // related field nodes (same base and offset). + add_field_uses_to_worklist(use->as_Field()); + } + } + } - // if the node was created after the escape computation, return - // UnknownEscape - if (idx >= nodes_size()) - return PointsToNode::UnknownEscape; + while(_worklist.length() > 0) { + PointsToNode* use = _worklist.pop(); - es = ptnode_adr(idx)->escape_state(); + if (PointsToNode::is_base_use(use)) { + // Add reference from jobj to field and from field to jobj (field's base). + use = PointsToNode::get_use_node(use)->as_Field(); + if (add_base(use->as_Field(), jobj)) + new_edges++; + continue; + } + assert(!use->is_JavaObject(), "sanity"); - // if we have already computed a value, return it - if (es != PointsToNode::UnknownEscape && - ptnode_adr(idx)->node_type() == PointsToNode::JavaObject) - return es; + if (use->is_Arraycopy()) { + if (jobj == null_obj) // NULL object does not have field edges + continue; - // PointsTo() calls n->uncast() which can return a new ideal node. - if (n->uncast()->_idx >= nodes_size()) - return PointsToNode::UnknownEscape; + // Added edge from Arraycopy node to arraycopy's source java object + if (add_edge(use, jobj)) { + jobj->set_arraycopy_src(); + new_edges++; + } + // and stop here. + continue; + } - PointsToNode::EscapeState orig_es = es; + if (!add_edge(use, jobj)) + continue; // No new edge added, there was such edge already. - // compute max escape state of anything this node could point to - for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) { - uint pt = i.elem; - PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state(); - if (pes > es) - es = pes; + new_edges++; + + if (use->is_LocalVar()) { + add_uses_to_worklist(use); + if (use->arraycopy_dst()) { + uint ecnt = use->edge_count(); + for (uint j = 0; j < ecnt; j++) { + PointsToNode* e = use->edge(j); + if (e->is_Arraycopy()) { + if (jobj == null_obj) // NULL object does not have field edges + continue; + // Add edge from arraycopy's destination java object to Arraycopy node. + if (add_edge(jobj, e)) { + new_edges++; + jobj->set_arraycopy_dst(); + } + } + } + } + } else { + // Added new edge to stored in field values. + // Put on worklist all field's uses (loads) and + // related field nodes (same base and offset). + add_field_uses_to_worklist(use->as_Field()); + } } - if (orig_es != es) { - // cache the computed escape state - assert(es > orig_es, "should have computed an escape state"); - set_escape_state(idx, es); - } // orig_es could be PointsToNode::UnknownEscape - return es; + return new_edges; } -VectorSet* ConnectionGraph::PointsTo(Node * n) { - pt_ptset.Reset(); - pt_visited.Reset(); - pt_worklist.clear(); +// Put on worklist all related field nodes. +void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { + assert(field->is_oop(), "sanity"); + int offset = field->offset(); + add_uses_to_worklist(field); -#ifdef ASSERT - Node *orig_n = n; -#endif + // Loop over all bases of this field and push on worklist Field nodes + // with the same offset and base (since they may reference the same field). + uint bcnt = field->base_count(); + for (uint i = 0; i < bcnt; i++) { + PointsToNode* base = field->base(i); + add_fields_to_worklist(field, base); - n = n->uncast(); - PointsToNode* npt = ptnode_adr(n->_idx); - - // If we have a JavaObject, return just that object - if (npt->node_type() == PointsToNode::JavaObject) { - pt_ptset.set(n->_idx); - return &pt_ptset; + // Check if the base was source object of arraycopy and go over arraycopy's + // destination objects since values stored to a field of source object are + // accessable by uses (loads) of fields of destination objects. + if (base->arraycopy_src()) { + uint ucnt = base->use_count(); + for (uint j = 0; j < ucnt; j++) { + PointsToNode* arycp = base->use(j); + if (arycp->is_Arraycopy()) { + uint acnt = arycp->use_count(); + for (uint j = 0; j < acnt; j++) { + PointsToNode* abase = arycp->use(j); + if (abase->arraycopy_dst() && abase != base) { + // Look for the same arracopy reference. + add_fields_to_worklist(field, abase); + } + } + } + } + } } -#ifdef ASSERT - if (npt->_node == NULL) { - if (orig_n != n) - orig_n->dump(); - n->dump(); - assert(npt->_node != NULL, "unregistered node"); - } -#endif - pt_worklist.push(n->_idx); - while(pt_worklist.length() > 0) { - int ni = pt_worklist.pop(); - if (pt_visited.test_set(ni)) - continue; +} - PointsToNode* pn = ptnode_adr(ni); - // ensure that all inputs of a Phi have been processed - assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),""); - - int edges_processed = 0; - uint e_cnt = pn->edge_count(); - for (uint e = 0; e < e_cnt; e++) { - uint etgt = pn->edge_target(e); - PointsToNode::EdgeType et = pn->edge_type(e); - if (et == PointsToNode::PointsToEdge) { - pt_ptset.set(etgt); - edges_processed++; - } else if (et == PointsToNode::DeferredEdge) { - pt_worklist.push(etgt); - edges_processed++; - } else { - assert(false,"neither PointsToEdge or DeferredEdge"); +// Put on worklist all related field nodes. +void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { + int offset = field->offset(); + if (base->is_LocalVar()) { + uint fcnt = base->use_count(); + for (uint j = 0; j < fcnt; j++) { + PointsToNode* f = base->use(j); + if (PointsToNode::is_base_use(f)) { // Field + f = PointsToNode::get_use_node(f); + if (f == field || !f->as_Field()->is_oop()) + continue; + int offs = f->as_Field()->offset(); + if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { + add_to_worklist(f); + } } } - if (edges_processed == 0) { - // no deferred or pointsto edges found. Assume the value was set - // outside this method. Add the phantom object to the pointsto set. - pt_ptset.set(_phantom_object); + } else { + assert(base->is_JavaObject(), "sanity"); + if (// Skip phantom_object since it is only used to indicate that + // this field's content globally escapes. + (base != phantom_obj) && + // NULL object node does not have fields. + (base != null_obj)) { + uint fcnt = base->edge_count(); + for (uint j = 0; j < fcnt; j++) { + PointsToNode* f = base->edge(j); + // Skip arraycopy edge since store to destination object field + // does not update value in source object field. + if (f->is_Arraycopy()) { + assert(base->arraycopy_dst(), "sanity"); + continue; + } + if (f == field || !f->as_Field()->is_oop()) + continue; + int offs = f->as_Field()->offset(); + if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { + add_to_worklist(f); + } + } } } - return &pt_ptset; } -void ConnectionGraph::remove_deferred(uint ni, GrowableArray* deferred_edges, VectorSet* visited) { - // This method is most expensive during ConnectionGraph construction. - // Reuse vectorSet and an additional growable array for deferred edges. - deferred_edges->clear(); - visited->Reset(); +// Returns unique pointed java object or NULL. +JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { + assert(!_collecting, "should not call when contructed graph"); - visited->set(ni); - PointsToNode *ptn = ptnode_adr(ni); - assert(ptn->node_type() == PointsToNode::LocalVar || - ptn->node_type() == PointsToNode::Field, "sanity"); - assert(ptn->edge_count() != 0, "should have at least phantom_object"); + // If the node was created after the escape computation we can't answer. + uint idx = n->_idx; + if (idx >= nodes_size()) + return NULL; - // Mark current edges as visited and move deferred edges to separate array. - for (uint i = 0; i < ptn->edge_count(); ) { - uint t = ptn->edge_target(i); -#ifdef ASSERT - assert(!visited->test_set(t), "expecting no duplications"); -#else - visited->set(t); -#endif - if (ptn->edge_type(i) == PointsToNode::DeferredEdge) { - ptn->remove_edge(t, PointsToNode::DeferredEdge); - deferred_edges->append(t); - } else { - i++; - } + PointsToNode* ptn = ptnode_adr(idx); + if (ptn->is_JavaObject()) { + return ptn->as_JavaObject(); } - for (int next = 0; next < deferred_edges->length(); ++next) { - uint t = deferred_edges->at(next); - PointsToNode *ptt = ptnode_adr(t); - uint e_cnt = ptt->edge_count(); - assert(e_cnt != 0, "should have at least phantom_object"); - for (uint e = 0; e < e_cnt; e++) { - uint etgt = ptt->edge_target(e); - if (visited->test_set(etgt)) - continue; - PointsToNode::EdgeType et = ptt->edge_type(e); - if (et == PointsToNode::PointsToEdge) { - add_pointsto_edge(ni, etgt); - } else if (et == PointsToNode::DeferredEdge) { - deferred_edges->append(etgt); - } else { - assert(false,"invalid connection graph"); + assert(ptn->is_LocalVar(), "sanity"); + // Check all java objects it points to. + uint count = ptn->edge_count(); + JavaObjectNode* jobj = NULL; + for (uint i = 0; i < count; i++) { + PointsToNode* e = ptn->edge(i); + if (e->is_JavaObject()) { + if (jobj == NULL) { + jobj = e->as_JavaObject(); + } else if (jobj != e) { + return NULL; } } } - if (ptn->edge_count() == 0) { - // No pointsto edges found after deferred edges are removed. - // For example, in the next case where call is replaced - // with uncommon trap and as result array's load references - // itself through deferred edges: - // - // A a = b[i]; - // if (c!=null) a = c.foo(); - // b[i] = a; - // - // Assume the value was set outside this method and - // add edge to phantom object. - add_pointsto_edge(ni, _phantom_object); + return jobj; +} + +// Return true if nodes points only to non-escaped allocations. +bool PointsToNode::not_escaped_allocation() { + if (is_JavaObject()) { + Node* n = ideal_node(); + if (n->is_Allocate() || n->is_CallStaticJava()) { + return (escape_state() == PointsToNode::NoEscape); + } else { + return false; + } } + assert(is_LocalVar(), "sanity"); + // Check all java objects it points to. + uint count = edge_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* e = edge(i); + if (e->is_JavaObject()) { + Node* n = e->ideal_node(); + if ((e->escape_state() != PointsToNode::NoEscape) || + !(n->is_Allocate() || n->is_CallStaticJava())) { + return false; + } + } + } + return true; } +// Return true if we know the node does not escape globally. +bool ConnectionGraph::not_global_escape(Node *n) { + assert(!_collecting, "should not call when contructed graph"); -// Add an edge to node given by "to_i" from any field of adr_i whose offset -// matches "offset" A deferred edge is added if to_i is a LocalVar, and -// a pointsto edge is added if it is a JavaObject + // If the node was created after the escape computation we can't answer. + uint idx = n->_idx; + if (idx >= nodes_size()) + return false; -void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) { - // No fields for NULL pointer. - if (is_null_ptr(adr_i)) { - return; + PointsToNode* ptn = ptnode_adr(idx); + PointsToNode::EscapeState es = ptn->escape_state(); + + // If we have already computed a value, return it. + if (es >= PointsToNode::GlobalEscape) + return false; + + if (ptn->is_JavaObject()) { + return (es < PointsToNode::GlobalEscape); } - PointsToNode* an = ptnode_adr(adr_i); - PointsToNode* to = ptnode_adr(to_i); - bool deferred = (to->node_type() == PointsToNode::LocalVar); - bool escaped = (to_i == _phantom_object) && (offs == Type::OffsetTop); - if (escaped) { - // Values in fields escaped during call. - assert(an->escape_state() >= PointsToNode::ArgEscape, "sanity"); - offs = Type::OffsetBot; + + assert(ptn->is_LocalVar(), "sanity"); + // Check all java objects it points to. + uint count = ptn->edge_count(); + for (uint i = 0; i < count; i++) { + if (ptn->edge(i)->escape_state() >= PointsToNode::GlobalEscape) + return false; } - for (uint fe = 0; fe < an->edge_count(); fe++) { - assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); - int fi = an->edge_target(fe); - if (escaped) { - set_escape_state(fi, PointsToNode::GlobalEscape); - } - PointsToNode* pf = ptnode_adr(fi); - int po = pf->offset(); - if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) { - if (deferred) - add_deferred_edge(fi, to_i); - else - add_pointsto_edge(fi, to_i); - } - } + return true; } -// Add a deferred edge from node given by "from_i" to any field of adr_i -// whose offset matches "offset". -void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) { - // No fields for NULL pointer. - if (is_null_ptr(adr_i)) { - return; + +// Helper functions + +// Return true if this node points to specified node or nodes it points to. +bool PointsToNode::points_to(JavaObjectNode* ptn) const { + if (this->is_JavaObject()) + return (this == ptn); + + assert(this->is_LocalVar(), "sanity"); + + uint count = this->edge_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* e = this->edge(i); + if (e == ptn) + return true; } - if (adr_i == _phantom_object) { - // Add only one edge for unknown object. - add_pointsto_edge(from_i, _phantom_object); - return; - } - PointsToNode* an = ptnode_adr(adr_i); - bool is_alloc = an->_node->is_Allocate(); - for (uint fe = 0; fe < an->edge_count(); fe++) { - assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); - int fi = an->edge_target(fe); - PointsToNode* pf = ptnode_adr(fi); - int offset = pf->offset(); - if (!is_alloc) { - // Assume the field was set outside this method if it is not Allocation - add_pointsto_edge(fi, _phantom_object); + return false; +} + +// Return true if one node points to an other. +bool PointsToNode::meet(PointsToNode* ptn) { + if (this == ptn) + return true; + + if (ptn->is_JavaObject()) + return this->points_to(ptn->as_JavaObject()); + + if (this->is_JavaObject()) + return ptn->points_to(this->as_JavaObject()); + + assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); + + uint this_count = this->edge_count(); + uint ptn_count = ptn->edge_count(); + for (uint i = 0; i < this_count; i++) { + PointsToNode* this_e = this->edge(i); + for (uint j = 0; j < ptn_count; j++) { + PointsToNode* ptn_e = ptn->edge(j); + if (this_e == ptn_e) + return true; } - if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) { - add_deferred_edge(from_i, fi); - } } - // Some fields references (AddP) may still be missing - // until Connection Graph construction is complete. - // For example, loads from RAW pointers with offset 0 - // which don't have AddP. - // A reference to phantom_object will be added if - // a field reference is still missing after completing - // Connection Graph (see remove_deferred()). + return false; } -// Helper functions +#ifdef ASSERT +// Return true if bases points to this java object. +bool FieldNode::has_base(JavaObjectNode* jobj) const { + uint count = this->base_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* b = this->base(i); + if (b == jobj) + return true; + } + return false; +} +#endif -static Node* get_addp_base(Node *addp) { +int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { + const Type *adr_type = phase->type(adr); + if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && + adr->in(AddPNode::Address)->is_Proj() && + adr->in(AddPNode::Address)->in(0)->is_Allocate()) { + // We are computing a raw address for a store captured by an Initialize + // compute an appropriate address type. AddP cases #3 and #5 (see below). + int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); + assert(offs != Type::OffsetBot || + adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), + "offset must be a constant or it is initialization of array"); + return offs; + } + const TypePtr *t_ptr = adr_type->isa_ptr(); + assert(t_ptr != NULL, "must be a pointer type"); + return t_ptr->offset(); +} + +Node* ConnectionGraph::get_addp_base(Node *addp) { assert(addp->is_AddP(), "must be AddP"); // // AddP cases for Base and Address inputs: @@ -513,23 +539,25 @@ // | | // AddP ( base == address ) // - Node *base = addp->in(AddPNode::Base)->uncast(); - if (base->is_top()) { // The AddP case #3 and #6. - base = addp->in(AddPNode::Address)->uncast(); + Node *base = addp->in(AddPNode::Base); + if (base->uncast()->is_top()) { // The AddP case #3 and #6. + base = addp->in(AddPNode::Address); while (base->is_AddP()) { // Case #6 (unsafe access) may have several chained AddP nodes. - assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only"); - base = base->in(AddPNode::Address)->uncast(); + assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); + base = base->in(AddPNode::Address); } - assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal || - base->Opcode() == Op_CastX2P || base->is_DecodeN() || - (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) || - (base->is_Proj() && base->in(0)->is_Allocate()), "sanity"); + Node* uncast_base = base->uncast(); + int opcode = uncast_base->Opcode(); + assert(opcode == Op_ConP || opcode == Op_ThreadLocal || + opcode == Op_CastX2P || uncast_base->is_DecodeN() || + (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) || + (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); } return base; } -static Node* find_second_addp(Node* addp, Node* n) { +Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); Node* addp2 = addp->raw_out(0); @@ -620,8 +648,7 @@ int alias_idx = _compile->get_alias_index(tinst); igvn->set_type(addp, tinst); // record the allocation in the node map - assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered"); - set_map(addp->_idx, get_map(base->_idx)); + set_map(addp, get_map(base->_idx)); // Set addp's Base and Address to 'base'. Node *abase = addp->in(AddPNode::Base); @@ -699,10 +726,7 @@ igvn->set_type(result, result->bottom_type()); record_for_optimizer(result); - debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;) - assert(pn == NULL || pn == orig_phi, "wrong node"); - set_map(orig_phi->_idx, result); - ptnode_adr(orig_phi->_idx)->_node = orig_phi; + set_map(orig_phi, result); new_created = true; return result; @@ -779,15 +803,15 @@ // // The next methods are derived from methods in MemNode. // -static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { +Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { Node *mem = mmem; // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally // means an array I have not precisely typed yet. Do not do any // alias stuff with it any time soon. - if( toop->base() != Type::AnyPtr && + if (toop->base() != Type::AnyPtr && !(toop->klass() != NULL && toop->klass()->is_java_lang_Object() && - toop->offset() == Type::OffsetBot) ) { + toop->offset() == Type::OffsetBot)) { mem = mmem->memory_at(alias_idx); // Update input if it is progress over what we have now } @@ -1077,7 +1101,10 @@ Arena* arena = Thread::current()->resource_area(); VectorSet visited(arena); + ideal_nodes.clear(); // Reset for use with set_map/get_map. + uint unique_old = _compile->unique(); + // Phase 1: Process possible allocations from alloc_worklist. // Create instance types for the CheckCastPP for allocations where possible. // @@ -1088,12 +1115,11 @@ while (alloc_worklist.length() != 0) { Node *n = alloc_worklist.pop(); uint ni = n->_idx; - const TypeOopPtr* tinst = NULL; if (n->is_Call()) { CallNode *alloc = n->as_Call(); // copy escape information to call node PointsToNode* ptn = ptnode_adr(alloc->_idx); - PointsToNode::EscapeState es = escape_state(alloc); + PointsToNode::EscapeState es = ptn->escape_state(); // We have an allocation or call which returns a Java object, // see if it is unescaped. if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) @@ -1145,20 +1171,18 @@ // so it could be eliminated. alloc->as_Allocate()->_is_scalar_replaceable = true; } - set_escape_state(n->_idx, es); // CheckCastPP escape state + set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state // in order for an object to be scalar-replaceable, it must be: // - a direct allocation (not a call returning an object) // - non-escaping // - eligible to be a unique type // - not determined to be ineligible by escape analysis - assert(ptnode_adr(alloc->_idx)->_node != NULL && - ptnode_adr(n->_idx)->_node != NULL, "should be registered"); - set_map(alloc->_idx, n); - set_map(n->_idx, alloc); + set_map(alloc, n); + set_map(n, alloc); const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); if (t == NULL) continue; // not a TypeOopPtr - tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); + const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); igvn->hash_delete(n); igvn->set_type(n, tinst); n->raise_bottom_type(tinst); @@ -1169,8 +1193,9 @@ // First, put on the worklist all Field edges from Connection Graph // which is more accurate then putting immediate users from Ideal Graph. for (uint e = 0; e < ptn->edge_count(); e++) { - Node *use = ptnode_adr(ptn->edge_target(e))->_node; - assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(), + PointsToNode* tgt = ptn->edge(e); + Node* use = tgt->ideal_node(); + assert(tgt->is_Field() && use->is_AddP(), "only AddP nodes are Field edges in CG"); if (use->outcnt() > 0) { // Don't process dead nodes Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); @@ -1202,16 +1227,18 @@ } } } else if (n->is_AddP()) { - VectorSet* ptset = PointsTo(get_addp_base(n)); - assert(ptset->Size() == 1, "AddP address is unique"); - uint elem = ptset->getelem(); // Allocation node's index - if (elem == _phantom_object) { - assert(false, "escaped allocation"); - continue; // Assume the value was set outside this method. + JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); + if (jobj == NULL || jobj == phantom_obj) { +#ifdef ASSERT + ptnode_adr(get_addp_base(n)->_idx)->dump(); + ptnode_adr(n->_idx)->dump(); + assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); +#endif + _compile->record_failure(C2Compiler::retry_no_escape_analysis()); + return; } - Node *base = get_map(elem); // CheckCastPP node + Node *base = get_map(jobj->idx()); // CheckCastPP node if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path - tinst = igvn->type(base)->isa_oopptr(); } else if (n->is_Phi() || n->is_CheckCastPP() || n->is_EncodeP() || @@ -1221,18 +1248,20 @@ assert(n->is_Phi(), "loops only through Phi's"); continue; // already processed } - VectorSet* ptset = PointsTo(n); - if (ptset->Size() == 1) { - uint elem = ptset->getelem(); // Allocation node's index - if (elem == _phantom_object) { - assert(false, "escaped allocation"); - continue; // Assume the value was set outside this method. - } - Node *val = get_map(elem); // CheckCastPP node + JavaObjectNode* jobj = unique_java_object(n); + if (jobj == NULL || jobj == phantom_obj) { +#ifdef ASSERT + ptnode_adr(n->_idx)->dump(); + assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); +#endif + _compile->record_failure(C2Compiler::retry_no_escape_analysis()); + return; + } else { + Node *val = get_map(jobj->idx()); // CheckCastPP node TypeNode *tn = n->as_Type(); - tinst = igvn->type(val)->isa_oopptr(); + const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); assert(tinst != NULL && tinst->is_known_instance() && - (uint)tinst->instance_id() == elem , "instance type expected."); + tinst->instance_id() == jobj->idx() , "instance type expected."); const Type *tn_type = igvn->type(tn); const TypeOopPtr *tn_t; @@ -1315,6 +1344,8 @@ // New alias types were created in split_AddP(). uint new_index_end = (uint) _compile->num_alias_types(); + assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); + // Phase 2: Process MemNode's from memnode_worklist. compute new address type and // compute new values for Memory inputs (the Memory inputs are not // actually updated until phase 4.) @@ -1348,10 +1379,7 @@ if (mem != n->in(MemNode::Memory)) { // We delay the memory edge update since we need old one in // MergeMem code below when instances memory slices are separated. - debug_only(Node* pn = ptnode_adr(n->_idx)->_node;) - assert(pn == NULL || pn == n, "wrong node"); - set_map(n->_idx, mem); - ptnode_adr(n->_idx)->_node = n; + set_map(n, mem); } if (n->is_Load()) { continue; // don't push users @@ -1503,32 +1531,30 @@ visited.Reset(); Node_Stack old_mems(arena, _compile->unique() >> 2); #endif - for (uint i = 0; i < nodes_size(); i++) { - Node *nmem = get_map(i); - if (nmem != NULL) { - Node *n = ptnode_adr(i)->_node; - assert(n != NULL, "sanity"); - if (n->is_Mem()) { + for (uint i = 0; i < ideal_nodes.size(); i++) { + Node* n = ideal_nodes.at(i); + Node* nmem = get_map(n->_idx); + assert(nmem != NULL, "sanity"); + if (n->is_Mem()) { #if 0 // ifdef ASSERT - Node* old_mem = n->in(MemNode::Memory); - if (!visited.test_set(old_mem->_idx)) { - old_mems.push(old_mem, old_mem->outcnt()); - } + Node* old_mem = n->in(MemNode::Memory); + if (!visited.test_set(old_mem->_idx)) { + old_mems.push(old_mem, old_mem->outcnt()); + } #endif - assert(n->in(MemNode::Memory) != nmem, "sanity"); - if (!n->is_Load()) { - // Move memory users of a store first. - move_inst_mem(n, orig_phis, igvn); - } - // Now update memory input - igvn->hash_delete(n); - n->set_req(MemNode::Memory, nmem); - igvn->hash_insert(n); - record_for_optimizer(n); - } else { - assert(n->is_Allocate() || n->is_CheckCastPP() || - n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); + assert(n->in(MemNode::Memory) != nmem, "sanity"); + if (!n->is_Load()) { + // Move memory users of a store first. + move_inst_mem(n, orig_phis, igvn); } + // Now update memory input + igvn->hash_delete(n); + n->set_req(MemNode::Memory, nmem); + igvn->hash_insert(n); + record_for_optimizer(n); + } else { + assert(n->is_Allocate() || n->is_CheckCastPP() || + n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); } } #if 0 // ifdef ASSERT @@ -1560,6 +1586,8 @@ } void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { + Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true); + // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction // to create space for them in ConnectionGraph::_nodes[]. Node* oop_null = igvn->zerocon(T_OBJECT); @@ -1581,37 +1609,53 @@ bool ConnectionGraph::compute_escape() { Compile* C = _compile; + PhaseGVN* igvn = _igvn; - // 1. Populate Connection Graph (CG) with Ideal nodes. + // 1. Populate Connection Graph (CG) with PointsTo nodes. - Unique_Node_List worklist_init; - worklist_init.map(C->unique(), NULL); // preallocate space + ideal_nodes.map(C->unique(), NULL); // preallocate space // Initialize worklist if (C->root() != NULL) { - worklist_init.push(C->root()); + ideal_nodes.push(C->root()); } + DEBUG_ONLY( GrowableArray addp_worklist; ) + + Unique_Node_List delayed_worklist; GrowableArray alloc_worklist; - GrowableArray addp_worklist; GrowableArray ptr_cmp_worklist; GrowableArray storestore_worklist; - PhaseGVN* igvn = _igvn; + GrowableArray ptnodes_worklist; + GrowableArray java_objects_worklist; + GrowableArray non_escaped_worklist; + GrowableArray oop_fields_worklist; + + { Compile::TracePhase t3("buildConnectionGraph", &Phase::_t_buildConnectionGraph, true); + // Push all useful nodes onto CG list and set their type. - for( uint next = 0; next < worklist_init.size(); ++next ) { - Node* n = worklist_init.at(next); - record_for_escape_analysis(n, igvn); - // Only allocations and java static calls results are checked - // for an escape status. See process_call_result() below. - if (n->is_Allocate() || n->is_CallStaticJava() && - ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) { - alloc_worklist.append(n); - } else if(n->is_AddP()) { - // Collect address nodes. Use them during stage 3 below - // to build initial connection graph field edges. - addp_worklist.append(n); - } else if (n->is_MergeMem()) { + for( uint next = 0; next < ideal_nodes.size(); ++next ) { + Node* n = ideal_nodes.at(next); + + // It is called only once per node since ideal_nodes is Unique_Node list. + build_connection_graph(n, &delayed_worklist, igvn); + + PointsToNode* ptn = ptnode_adr(n->_idx); + if (ptn != NULL) { + ptnodes_worklist.append(ptn); + if (ptn->is_JavaObject()) { + java_objects_worklist.append(ptn->as_JavaObject()); + if ((n->is_Allocate() || n->is_CallStaticJava()) && + (ptn->escape_state() < PointsToNode::GlobalEscape)) { + // Only allocations and java static calls results are interesting. + non_escaped_worklist.append(ptn->as_JavaObject()); + } + } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { + oop_fields_worklist.append(ptn->as_Field()); + } + } + if (n->is_MergeMem()) { // Collect all MergeMem nodes to add memory slices for // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); @@ -1624,64 +1668,51 @@ // escape status of the associated Allocate node some of them // may be eliminated. storestore_worklist.append(n); +#ifdef ASSERT + } else if(n->is_AddP()) { + // Collect address nodes. Use them during stage 3 below + // to build initial connection graph field edges. + addp_worklist.append(n); +#endif } for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* m = n->fast_out(i); // Get user - worklist_init.push(m); + ideal_nodes.push(m); } } - if (alloc_worklist.length() == 0) { + if (non_escaped_worklist.length() == 0) { _collecting = false; return false; // Nothing to do. } - // 2. First pass to create simple CG edges (doesn't require to walk CG). - uint delayed_size = _delayed_worklist.size(); - for( uint next = 0; next < delayed_size; ++next ) { - Node* n = _delayed_worklist.at(next); - build_connection_graph(n, igvn); + // Add final simple edges. + while(delayed_worklist.size() > 0) { + Node* n = delayed_worklist.pop(); + build_connection_graph(n, NULL, igvn); } - // 3. Pass to create initial fields edges (JavaObject -F-> AddP) - // to reduce number of iterations during stage 4 below. - uint addp_length = addp_worklist.length(); - for( uint next = 0; next < addp_length; ++next ) { - Node* n = addp_worklist.at(next); - Node* base = get_addp_base(n); - if (base->is_Proj() && base->in(0)->is_Call()) - base = base->in(0); - PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type(); - if (nt == PointsToNode::JavaObject) { - build_connection_graph(n, igvn); - } - } + uint ptnodes_length = ptnodes_worklist.length(); - GrowableArray cg_worklist; - cg_worklist.append(_phantom_object); - GrowableArray worklist; - - // 4. Build Connection Graph which need - // to walk the connection graph. - _progress = false; - for (uint ni = 0; ni < nodes_size(); ni++) { - PointsToNode* ptn = ptnode_adr(ni); - Node *n = ptn->_node; - if (n != NULL) { // Call, AddP, LoadP, StoreP - build_connection_graph(n, igvn); - if (ptn->node_type() != PointsToNode::UnknownType) - cg_worklist.append(n->_idx); // Collect CG nodes - if (!_processed.test(n->_idx)) - worklist.append(n->_idx); // Collect C/A/L/S nodes +#ifdef ASSERT + if (VerifyConnectionGraph) { + // Verify that no new simple edges could be created and all + // local vars has edges. + _verify = true; + for (uint next = 0; next < ptnodes_length; ++next) { + PointsToNode* ptn = ptnodes_worklist.at(next); + build_connection_graph(ptn->ideal_node(), NULL, igvn); + if (ptn->is_LocalVar() && ptn->edge_count() == 0) { + ptn->dump(); + assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); + } } + _verify = false; } +#endif - // After IGVN user nodes may have smaller _idx than - // their inputs so they will be processed first in - // previous loop. Because of that not all Graph - // edges will be created. Walk over interesting - // nodes again until no new edges are created. - // + // 2. Finish Graph construction. + // Normally only 1-3 passes needed to build // Connection Graph depending on graph complexity. // Observed 8 passes in jvm2008 compiler.compiler. @@ -1697,156 +1728,198 @@ #define CG_BUILD_TIME_LIMIT 30.0 #endif - uint length = worklist.length(); int iterations = 0; + + // Propagate GlobalEscape and ArgEscape escape states + // and check that we still have non escaped objects. + // The method pushs on _worklist Field nodes which reference phantom_object. + if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { + _collecting = false; + return false; // Nothing to do. + } + + // Propagate references to all JavaObject nodes. + uint java_objects_length = java_objects_worklist.length(); + elapsedTimer time; - while(_progress && + int new_edges = 1; + while ((new_edges > 0) && (iterations++ < CG_BUILD_ITER_LIMIT) && (time.seconds() < CG_BUILD_TIME_LIMIT)) { time.start(); - _progress = false; - for( uint next = 0; next < length; ++next ) { - int ni = worklist.at(next); - PointsToNode* ptn = ptnode_adr(ni); - Node* n = ptn->_node; - assert(n != NULL, "should be known node"); - build_connection_graph(n, igvn); + new_edges = 0; + // Propagate references to phantom_object for nodes pushed on _worklist + // by find_non_escaped_objects(). + new_edges += add_java_object_edges(phantom_obj, false); + for (uint next = 0; next < java_objects_length; ++next) { + JavaObjectNode* ptn = java_objects_worklist.at(next); + new_edges += add_java_object_edges(ptn, true); } + if (new_edges > 0) { + // Update escape states on each iteration if graph was updated. + if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { + _collecting = false; + return false; // Nothing to do. + } + } time.stop(); } + + time.start(); + // Find fields which have unknown value. + uint fields_length = oop_fields_worklist.length(); + for (uint next = 0; next < fields_length; next++) { + FieldNode* field = oop_fields_worklist.at(next); + if (field->edge_count() == 0) { + new_edges += find_field_value(field); + } + } + time.stop(); + + while ((new_edges > 0) && + (iterations++ < CG_BUILD_ITER_LIMIT) && + (time.seconds() < CG_BUILD_TIME_LIMIT)) { + time.start(); + new_edges = 0; + // Propagate references to phantom_object for nodes pushed on _worklist + // by previous code. + new_edges += add_java_object_edges(phantom_obj, false); + if (new_edges > 0) { + // Update escape states on each iteration if graph was updated. + if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { + _collecting = false; + return false; // Nothing to do. + } + } + time.stop(); + } + if ((iterations >= CG_BUILD_ITER_LIMIT) || (time.seconds() >= CG_BUILD_TIME_LIMIT)) { assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", - time.seconds(), iterations, nodes_size(), length)); + time.seconds(), iterations, nodes_size(), ptnodes_length)); // Possible infinite build_connection_graph loop, // bailout (no changes to ideal graph were made). _collecting = false; return false; } + +#ifdef ASSERT + if (Verbose && PrintEscapeAnalysis) { + tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", + iterations, nodes_size(), ptnodes_length); + } +#endif + #undef CG_BUILD_ITER_LIMIT #undef CG_BUILD_TIME_LIMIT - // 5. Propagate escaped states. - worklist.clear(); - // mark all nodes reachable from GlobalEscape nodes - (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape); + // 3. Find fields initialized by NULL for non escaped Allocations. - // mark all nodes reachable from ArgEscape nodes - bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape); + uint non_escaped_length = non_escaped_worklist.length(); + for (uint next = 0; next < non_escaped_length; next++) { + JavaObjectNode* ptn = non_escaped_worklist.at(next); + PointsToNode::EscapeState es = ptn->escape_state(); + assert(es <= PointsToNode::ArgEscape, "sanity"); - Arena* arena = Thread::current()->resource_area(); - VectorSet visited(arena); - - // 6. Find fields initializing values for not escaped allocations - uint alloc_length = alloc_worklist.length(); - for (uint next = 0; next < alloc_length; ++next) { - Node* n = alloc_worklist.at(next); - PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state(); if (es == PointsToNode::NoEscape) { - has_non_escaping_obj = true; - if (n->is_Allocate()) { - find_init_values(n, &visited, igvn); - // The object allocated by this Allocate node will never be - // seen by an other thread. Mark it so that when it is - // expanded no MemBarStoreStore is added. - n->as_Allocate()->initialization()->set_does_not_escape(); + if (find_init_values(ptn, null_obj, igvn) > 0) { + // Adding references to NULL object does not change escape states + // since it does not escape. Also no fields are added to NULL object. + add_java_object_edges(null_obj, false); } - } else if ((es == PointsToNode::ArgEscape) && n->is_Allocate()) { - // Same as above. Mark this Allocate node so that when it is + } + Node* n = ptn->ideal_node(); + if (n->is_Allocate()) { + // The object allocated by this Allocate node will never be + // seen by an other thread. Mark it so that when it is // expanded no MemBarStoreStore is added. - n->as_Allocate()->initialization()->set_does_not_escape(); + InitializeNode* ini = n->as_Allocate()->initialization(); + if (ini != NULL) + ini->set_does_not_escape(); } } - uint cg_length = cg_worklist.length(); - - // Skip the rest of code if all objects escaped. - if (!has_non_escaping_obj) { - cg_length = 0; - addp_length = 0; - } - - for (uint next = 0; next < cg_length; ++next) { - int ni = cg_worklist.at(next); - PointsToNode* ptn = ptnode_adr(ni); - PointsToNode::NodeType nt = ptn->node_type(); - if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { - if (ptn->edge_count() == 0) { - // No values were found. Assume the value was set - // outside this method - add edge to phantom object. - add_pointsto_edge(ni, _phantom_object); +#ifdef ASSERT + if (VerifyConnectionGraph) { + uint addp_length = addp_worklist.length(); + for (uint next = 0; next < addp_length; ++next ) { + Node* n = addp_worklist.at(next); + FieldNode* field = ptnode_adr(n->_idx)->as_Field(); + if (field->is_oop()) { + // Verify that field has all bases + Node* base = get_addp_base(n); + PointsToNode* ptn = ptnode_adr(base->_idx); + if (ptn->is_JavaObject()) { + assert(field->has_base(ptn->as_JavaObject()), "sanity"); + } else { + assert(ptn->is_LocalVar(), "sanity"); + uint count = ptn->edge_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* e = ptn->edge(i); + if (e->is_JavaObject()) { + assert(field->has_base(e->as_JavaObject()), "sanity"); + } + } + } + // Verify that all fields have initializing values. + if (field->edge_count() == 0) { + field->dump(); + assert(field->edge_count() > 0, "sanity"); + } } } } +#endif - // 7. Remove deferred edges from the graph. - for (uint next = 0; next < cg_length; ++next) { - int ni = cg_worklist.at(next); - PointsToNode* ptn = ptnode_adr(ni); - PointsToNode::NodeType nt = ptn->node_type(); - if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { - remove_deferred(ni, &worklist, &visited); - } - } + // 4. Adjust scalar_replaceable state of nonescaping objects. - // 8. Adjust escape state of nonescaping objects. - for (uint next = 0; next < addp_length; ++next) { - Node* n = addp_worklist.at(next); - adjust_escape_state(n); - } + for (uint next = 0; next < non_escaped_length; next++) { + JavaObjectNode* ptn = non_escaped_worklist.at(next); + if (ptn->escape_state() == PointsToNode::NoEscape && + ptn->scalar_replaceable()) { + adjust_scalar_replaceable_state(ptn); - // push all NoEscape nodes on the worklist - worklist.clear(); - for( uint next = 0; next < cg_length; ++next ) { - int nk = cg_worklist.at(next); - if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape && - !is_null_ptr(nk)) - worklist.push(nk); + if (ptn->scalar_replaceable()) { + // Push scalar replaceable allocations on alloc_worklist + // for processing in split_unique_types(). + alloc_worklist.append(ptn->ideal_node()); + } + } } - alloc_worklist.clear(); - // Propagate scalar_replaceable value. - while(worklist.length() > 0) { - uint nk = worklist.pop(); - PointsToNode* ptn = ptnode_adr(nk); - Node* n = ptn->_node; - bool scalar_replaceable = ptn->scalar_replaceable(); - if (n->is_Allocate() && scalar_replaceable) { - // Push scalar replaceable allocations on alloc_worklist - // for processing in split_unique_types(). Note, - // following code may change scalar_replaceable value. - alloc_worklist.append(n); +#ifdef ASSERT + if (VerifyConnectionGraph) { + // Verify that graph is complete - no new edges could be added. + new_edges = 0; + for (uint next = 0; next < java_objects_length; ++next) { + JavaObjectNode* ptn = java_objects_worklist.at(next); + new_edges += add_java_object_edges(ptn, true); } - uint e_cnt = ptn->edge_count(); - for (uint ei = 0; ei < e_cnt; ei++) { - uint npi = ptn->edge_target(ei); - if (is_null_ptr(npi)) - continue; - PointsToNode *np = ptnode_adr(npi); - if (np->escape_state() < PointsToNode::NoEscape) { - set_escape_state(npi, PointsToNode::NoEscape); - if (!scalar_replaceable) { - np->set_scalar_replaceable(false); - } - worklist.push(npi); - } else if (np->scalar_replaceable() && !scalar_replaceable) { - np->set_scalar_replaceable(false); - worklist.push(npi); - } - } + assert(new_edges == 0, "graph was not complete"); + + // Verify that escape state is final. + uint length = non_escaped_worklist.length(); + find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); + assert((non_escaped_length == (uint)non_escaped_worklist.length()) && + (non_escaped_length == length) && + (_worklist.length() == 0), "escape state was not final"); } +#endif _collecting = false; assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build"); - assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape && - ptnode_adr(_oop_null)->edge_count() == 0, "sanity"); - if (UseCompressedOops) { - assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape && - ptnode_adr(_noop_null)->edge_count() == 0, "sanity"); - } + assert(null_obj->escape_state() == PointsToNode::NoEscape && + null_obj->edge_count() == 0 && + !null_obj->arraycopy_src() && + !null_obj->arraycopy_dst(), "sanity"); + } // TracePhase t3("buildConnectionGraph") + + bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); + if (EliminateLocks && has_non_escaping_obj) { // Mark locks before changing ideal graph. int cnt = C->macro_count(); @@ -1855,9 +1928,7 @@ if (n->is_AbstractLock()) { // Lock and Unlock nodes AbstractLockNode* alock = n->as_AbstractLock(); if (!alock->is_non_esc_obj()) { - PointsToNode::EscapeState es = escape_state(alock->obj_node()); - assert(es != PointsToNode::UnknownEscape, "should know"); - if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { + if (not_global_escape(alock->obj_node())) { assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); // The lock could be marked eliminated by lock coarsening // code during first IGVN before EA. Replace coarsened flag @@ -1917,24 +1988,24 @@ #ifndef PRODUCT if (PrintEscapeAnalysis) { - dump(); // Dump ConnectionGraph + dump(ptnodes_worklist); // Dump ConnectionGraph } #endif - bool has_scalar_replaceable_candidates = false; - alloc_length = alloc_worklist.length(); - for (uint next = 0; next < alloc_length; ++next) { - Node* n = alloc_worklist.at(next); - PointsToNode* ptn = ptnode_adr(n->_idx); - assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity"); - if (ptn->scalar_replaceable()) { - has_scalar_replaceable_candidates = true; - break; + bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); +#ifdef ASSERT + if (VerifyConnectionGraph) { + uint alloc_length = alloc_worklist.length(); + for (uint next = 0; next < alloc_length; ++next) { + Node* n = alloc_worklist.at(next); + PointsToNode* ptn = ptnode_adr(n->_idx); + assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); } } +#endif - if ( has_scalar_replaceable_candidates && - C->AliasLevel() >= 3 && EliminateAllocations ) { + if (has_scalar_replaceable_candidates && + C->AliasLevel() >= 3 && EliminateAllocations) { // Now use the escape information to create unique types for // scalar replaceable objects. @@ -1961,74 +2032,119 @@ return has_non_escaping_obj; } +// Find fields which have unknown value. +int ConnectionGraph::find_field_value(FieldNode* field) { + // Escaped fields should have init value already. + assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); + + int new_edges = 0; + + uint bcnt = field->base_count(); + for (uint i = 0; i < bcnt; i++) { + PointsToNode* base = field->base(i); + if (base->is_JavaObject()) { + // Skip Allocate's fields which will be processed later. + if (base->ideal_node()->is_Allocate()) + return 0; + assert(base == null_obj, "only NULL ptr base expected here"); + } + } + if (add_edge(field, phantom_obj)) { + // New edge was added + new_edges++; + add_field_uses_to_worklist(field); + } + return new_edges; +} + // Find fields initializing values for allocations. -void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) { - assert(alloc->is_Allocate(), "Should be called for Allocate nodes only"); - PointsToNode* pta = ptnode_adr(alloc->_idx); +int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); + + int new_edges = 0; + + Node* alloc = pta->ideal_node(); + + if (init_val == phantom_obj) { + // Do nothing for Allocate nodes since its fields values are "known". + if (alloc->is_Allocate()) + return 0; + + assert(alloc->as_CallStaticJava(), "sanity"); +#ifdef ASSERT + if (alloc->as_CallStaticJava()->method() == NULL) { + const char* name = alloc->as_CallStaticJava()->_name; + assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); + } +#endif + + // Non-escaped allocation returned from Java or runtime call have + // unknown values in fields. + uint cnt = pta->edge_count(); + for (uint i = 0; i < cnt; i++) { + PointsToNode* ptn = pta->edge(i); + if (ptn->is_Field() && ptn->as_Field()->is_oop()) { + if (add_edge(ptn, phantom_obj)) { + // New edge was added + new_edges++; + add_field_uses_to_worklist(ptn->as_Field()); + } + } + } + return new_edges; + } + + assert(init_val == null_obj, "sanity"); + // Do nothing for Call nodes since its fields values are unknown. + if (!alloc->is_Allocate()) + return 0; + InitializeNode* ini = alloc->as_Allocate()->initialization(); Compile* C = _compile; - visited->Reset(); + bool visited_bottom_offset = false; + GrowableArray offsets_worklist; + // Check if a oop field's initializing value is recorded and add - // a corresponding NULL field's value if it is not recorded. + // a corresponding NULL if field's value if it is not recorded. // Connection Graph does not record a default initialization by NULL // captured by Initialize node. // - uint null_idx = UseCompressedOops ? _noop_null : _oop_null; uint ae_cnt = pta->edge_count(); - bool visited_bottom_offset = false; for (uint ei = 0; ei < ae_cnt; ei++) { - uint nidx = pta->edge_target(ei); // Field (AddP) - PointsToNode* ptn = ptnode_adr(nidx); - assert(ptn->_node->is_AddP(), "Should be AddP nodes only"); - int offset = ptn->offset(); + + PointsToNode* ptn = pta->edge(ei); // Field (AddP) + if (!ptn->is_Field() || !ptn->as_Field()->is_oop()) + continue; // Not oop field + + int offset = ptn->as_Field()->offset(); if (offset == Type::OffsetBot) { if (!visited_bottom_offset) { - visited_bottom_offset = true; - // Check only oop fields. - const Type* adr_type = ptn->_node->as_AddP()->bottom_type(); - if (!adr_type->isa_aryptr() || - (adr_type->isa_aryptr()->klass() == NULL) || - adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { - // OffsetBot is used to reference array's element, - // always add reference to NULL since we don't - // known which element is referenced. - add_edge_from_fields(alloc->_idx, null_idx, offset); + // OffsetBot is used to reference array's element, + // always add reference to NULL to all Field nodes since we don't + // known which element is referenced. + if (add_edge(ptn, null_obj)) { + // New edge was added + new_edges++; + add_field_uses_to_worklist(ptn->as_Field()); + visited_bottom_offset = true; } } - } else if (offset != oopDesc::klass_offset_in_bytes() && - !visited->test_set(offset)) { - + } else { // Check only oop fields. - const Type* adr_type = ptn->_node->as_AddP()->bottom_type(); - BasicType basic_field_type = T_INT; - if (adr_type->isa_instptr()) { - ciField* field = C->alias_type(adr_type->isa_instptr())->field(); - if (field != NULL) { - basic_field_type = field->layout_type(); - } else { - // Ignore non field load (for example, klass load) - } - } else if (adr_type->isa_aryptr()) { - if (offset != arrayOopDesc::length_offset_in_bytes()) { - const Type* elemtype = adr_type->isa_aryptr()->elem(); - basic_field_type = elemtype->array_element_basic_type(); - } else { - // Ignore array length load - } + const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type(); + if (adr_type->isa_rawptr()) { #ifdef ASSERT - } else { // Raw pointers are used for initializing stores so skip it // since it should be recorded already - Node* base = get_addp_base(ptn->_node); + Node* base = get_addp_base(ptn->ideal_node()); assert(adr_type->isa_rawptr() && base->is_Proj() && (base->in(0) == alloc),"unexpected pointer type"); #endif + continue; } - if (basic_field_type == T_OBJECT || - basic_field_type == T_NARROWOOP || - basic_field_type == T_ARRAY) { + if (!offsets_worklist.contains(offset)) { + offsets_worklist.append(offset); Node* value = NULL; if (ini != NULL) { BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT; @@ -2045,165 +2161,262 @@ // that compare pointers optimization works correctly. } } - if (value == NULL || value != ptnode_adr(value->_idx)->_node) { + if (value == NULL) { // A field's initializing value was not recorded. Add NULL. - add_edge_from_fields(alloc->_idx, null_idx, offset); + if (add_edge(ptn, null_obj)) { + // New edge was added + new_edges++; + add_field_uses_to_worklist(ptn->as_Field()); + } } } } } + return new_edges; } -// Adjust escape state after Connection Graph is built. -void ConnectionGraph::adjust_escape_state(Node* n) { - PointsToNode* ptn = ptnode_adr(n->_idx); - assert(n->is_AddP(), "Should be called for AddP nodes only"); - // Search for objects which are not scalar replaceable +// Adjust scalar_replaceable state after Connection Graph is built. +void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { + // Search for non escaped objects which are not scalar replaceable // and mark them to propagate the state to referenced objects. - // - int offset = ptn->offset(); - Node* base = get_addp_base(n); - VectorSet* ptset = PointsTo(base); - int ptset_size = ptset->Size(); - - // An object is not scalar replaceable if the field which may point - // to it has unknown offset (unknown element of an array of objects). + // 1. An object is not scalar replaceable if the field into which it is + // stored has unknown offset (stored into unknown element of an array). // + uint count = jobj->use_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* use = jobj->use(i); + assert(!use->is_Arraycopy(), "sanity"); + if (use->is_Field()) { + FieldNode* field = use->as_Field(); + assert(field->is_oop() && field->scalar_replaceable() && + field->fields_escape_state() == PointsToNode::NoEscape, "sanity"); + if (field->offset() == Type::OffsetBot) { + jobj->set_scalar_replaceable(false); + return; + } + } + assert(use->is_Field() || use->is_LocalVar(), "sanity"); - if (offset == Type::OffsetBot) { - uint e_cnt = ptn->edge_count(); - for (uint ei = 0; ei < e_cnt; ei++) { - uint npi = ptn->edge_target(ei); - ptnode_adr(npi)->set_scalar_replaceable(false); + // 2. An object is not scalar replaceable if it is merged with other objects. + uint e_count = use->edge_count(); + for (uint j = 0; j < e_count; j++) { + PointsToNode* ptn = use->edge(j); + if (ptn->is_JavaObject() && ptn != jobj) { + // Mark all objects. + jobj->set_scalar_replaceable(false); + ptn->set_scalar_replaceable(false); + } } + if (!jobj->scalar_replaceable()) { + return; + } } - // Currently an object is not scalar replaceable if a LoadStore node - // access its field since the field value is unknown after it. - // - bool has_LoadStore = false; - for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { - Node *use = n->fast_out(i); - if (use->is_LoadStore()) { - has_LoadStore = true; - break; + count = jobj->edge_count(); + for (uint i = 0; i < count; i++) { + // Non escaped object should points only to fields. + FieldNode* field = jobj->edge(i)->as_Field(); + int offset = field->as_Field()->offset(); + + // 3. An object is not scalar replaceable if it has a field with unknown + // offset (array's element is accessed in loop). + if (offset == Type::OffsetBot) { + jobj->set_scalar_replaceable(false); + return; } - } - // An object is not scalar replaceable if the address points - // to unknown field (unknown element for arrays, offset is OffsetBot). - // - // Or the address may point to more then one object. This may produce - // the false positive result (set not scalar replaceable) - // since the flow-insensitive escape analysis can't separate - // the case when stores overwrite the field's value from the case - // when stores happened on different control branches. - // - // Note: it will disable scalar replacement in some cases: - // - // Point p[] = new Point[1]; - // p[0] = new Point(); // Will be not scalar replaced - // - // but it will save us from incorrect optimizations in next cases: - // - // Point p[] = new Point[1]; - // if ( x ) p[0] = new Point(); // Will be not scalar replaced - // - if (ptset_size > 1 || ptset_size != 0 && - (has_LoadStore || offset == Type::OffsetBot)) { - for( VectorSetI j(ptset); j.test(); ++j ) { - ptnode_adr(j.elem)->set_scalar_replaceable(false); + + // 4. Currently an object is not scalar replaceable if a LoadStore node + // access its field since the field value is unknown after it. + // + Node* n = field->ideal_node(); + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + if (n->fast_out(i)->is_LoadStore()) { + jobj->set_scalar_replaceable(false); + return; + } } + + // 5. Or the address may point to more then one object. This may produce + // the false positive result (set not scalar replaceable) + // since the flow-insensitive escape analysis can't separate + // the case when stores overwrite the field's value from the case + // when stores happened on different control branches. + // + // Note: it will disable scalar replacement in some cases: + // + // Point p[] = new Point[1]; + // p[0] = new Point(); // Will be not scalar replaced + // + // but it will save us from incorrect optimizations in next cases: + // + // Point p[] = new Point[1]; + // if ( x ) p[0] = new Point(); // Will be not scalar replaced + // + uint bcnt = field->base_count(); + if (bcnt > 1) { + for (uint i = 0; i < bcnt; i++) { + PointsToNode* base = field->base(i); + // Don't take into account LocalVar nodes which + // may point to only one object which should be also + // this field's base by now. + if (base->is_JavaObject() && base != jobj) { + // Mark all bases. + jobj->set_scalar_replaceable(false); + base->set_scalar_replaceable(false); + } + } + } } } -// Propagate escape states to referenced nodes. -bool ConnectionGraph::propagate_escape_state(GrowableArray* cg_worklist, - GrowableArray* worklist, - PointsToNode::EscapeState esc_state) { - bool has_java_obj = false; - // push all nodes with the same escape state on the worklist - uint cg_length = cg_worklist->length(); - for (uint next = 0; next < cg_length; ++next) { - int nk = cg_worklist->at(next); - if (ptnode_adr(nk)->escape_state() == esc_state) - worklist->push(nk); +// Propagate GlobalEscape and ArgEscape escape states to all nodes +// and check that we still have non escaped java objects. +bool ConnectionGraph::find_non_escaped_objects(GrowableArray& ptnodes_worklist, + GrowableArray& non_escaped_worklist) { + GrowableArray escape_worklist; + + // First, put all nodes with GlobalEscape and ArgEscape states on worklist. + uint ptnodes_length = ptnodes_worklist.length(); + for (uint next = 0; next < ptnodes_length; ++next) { + PointsToNode* ptn = ptnodes_worklist.at(next); + if (ptn->escape_state() >= PointsToNode::ArgEscape || + ptn->fields_escape_state() >= PointsToNode::ArgEscape) { + escape_worklist.push(ptn); + } } - // mark all reachable nodes - while (worklist->length() > 0) { - int pt = worklist->pop(); - PointsToNode* ptn = ptnode_adr(pt); - if (ptn->node_type() == PointsToNode::JavaObject && - !is_null_ptr(pt)) { - has_java_obj = true; - if (esc_state > PointsToNode::NoEscape) { - // fields values are unknown if object escapes - add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); + // Set escape states to referenced nodes (edges list). + while (escape_worklist.length() > 0) { + PointsToNode* ptn = escape_worklist.pop(); + PointsToNode::EscapeState es = ptn->escape_state(); + PointsToNode::EscapeState field_es = ptn->fields_escape_state(); + if (ptn->is_Field() && ptn->as_Field()->is_oop() && + es >= PointsToNode::ArgEscape) { + // GlobalEscape or ArgEscape state of field means it has unknown value. + if (add_edge(ptn, phantom_obj)) { + // New edge was added + add_field_uses_to_worklist(ptn->as_Field()); } } - uint e_cnt = ptn->edge_count(); - for (uint ei = 0; ei < e_cnt; ei++) { - uint npi = ptn->edge_target(ei); - if (is_null_ptr(npi)) - continue; - PointsToNode *np = ptnode_adr(npi); - if (np->escape_state() < esc_state) { - set_escape_state(npi, esc_state); - worklist->push(npi); + uint cnt = ptn->edge_count(); + for (uint i = 0; i < cnt; i++) { + PointsToNode* e = ptn->edge(i); + if (e->is_Arraycopy()) { + assert(ptn->arraycopy_dst(), "sanity"); + // Propagate only fields escape state through arraycopy edge. + if (e->fields_escape_state() < field_es) { + set_fields_escape_state(e, field_es); + escape_worklist.push(e); + } + } else if (es >= field_es) { + // fields_escape_state is also set to 'es' if it is less than 'es'. + if (e->escape_state() < es) { + set_escape_state(e, es); + escape_worklist.push(e); + } + } else { + // Propagate field escape state. + bool es_changed = false; + if (e->fields_escape_state() < field_es) { + set_fields_escape_state(e, field_es); + es_changed = true; + } + if ((e->escape_state() < field_es) && + e->is_Field() && ptn->is_JavaObject() && + e->as_Field()->is_oop()) { + // Change escape state of referenced fileds. + set_escape_state(e, field_es); + es_changed = true;; + } else if (e->escape_state() < es) { + set_escape_state(e, es); + es_changed = true;; + } + if (es_changed) { + escape_worklist.push(e); + } } } } - // Has not escaping java objects - return has_java_obj && (esc_state < PointsToNode::GlobalEscape); + + // Remove escaped objects from non_escaped list. + for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { + JavaObjectNode* ptn = non_escaped_worklist.at(next); + if (ptn->escape_state() >= PointsToNode::GlobalEscape) { + non_escaped_worklist.delete_at(next); + } + if (ptn->escape_state() == PointsToNode::NoEscape) { + // Find fields in non-escaped allocations which have unknown value. + find_init_values(ptn, phantom_obj, NULL); + } + } + + return (non_escaped_worklist.length() > 0); } // Optimize objects compare. Node* ConnectionGraph::optimize_ptr_compare(Node* n) { assert(OptimizePtrCompare, "sanity"); - // Clone returned Set since PointsTo() returns pointer - // to the same structure ConnectionGraph.pt_ptset. - VectorSet ptset1 = *PointsTo(n->in(1)); - VectorSet ptset2 = *PointsTo(n->in(2)); + PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); + PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); + assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); + assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); + + JavaObjectNode* jobj1 = unique_java_object(n->in(1)); + JavaObjectNode* jobj2 = unique_java_object(n->in(2)); + // Check simple cases first. - if (ptset1.Size() == 1) { - uint pt1 = ptset1.getelem(); - PointsToNode* ptn1 = ptnode_adr(pt1); - if (ptn1->escape_state() == PointsToNode::NoEscape) { - if (ptset2.Size() == 1 && ptset2.getelem() == pt1) { + if (jobj1 != NULL) { + if (jobj1->escape_state() == PointsToNode::NoEscape) { + if (jobj1 == jobj2) { // Comparing the same not escaping object. return _pcmp_eq; } - Node* obj = ptn1->_node; + Node* obj = jobj1->ideal_node(); // Comparing not escaping allocation. if ((obj->is_Allocate() || obj->is_CallStaticJava()) && - !ptset2.test(pt1)) { + !ptn2->points_to(jobj1)) { return _pcmp_neq; // This includes nullness check. } } - } else if (ptset2.Size() == 1) { - uint pt2 = ptset2.getelem(); - PointsToNode* ptn2 = ptnode_adr(pt2); - if (ptn2->escape_state() == PointsToNode::NoEscape) { - Node* obj = ptn2->_node; + } + if (jobj2 != NULL) { + if (jobj2->escape_state() == PointsToNode::NoEscape) { + Node* obj = jobj2->ideal_node(); // Comparing not escaping allocation. if ((obj->is_Allocate() || obj->is_CallStaticJava()) && - !ptset1.test(pt2)) { + !ptn1->points_to(jobj2)) { return _pcmp_neq; // This includes nullness check. } } } - if (!ptset1.disjoint(ptset2)) { + if (jobj1 != NULL && jobj1 != phantom_obj && + jobj2 != NULL && jobj2 != phantom_obj && + jobj1->ideal_node()->is_Con() && + jobj2->ideal_node()->is_Con()) { + // Klass or String constants compare. Need to be careful with + // compressed pointers - compare types of ConN and ConP instead of nodes. + const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr(); + const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr(); + assert(t1 != NULL && t2 != NULL, "sanity"); + if (t1->make_ptr() == t2->make_ptr()) { + return _pcmp_eq; + } else { + return _pcmp_neq; + } + } + + if (ptn1->meet(ptn2)) { return NULL; // Sets are not disjoint } // Sets are disjoint. - bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0; - bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0; - bool set1_has_null_ptr = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0; - bool set2_has_null_ptr = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0; + bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); + bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); + bool set1_has_null_ptr = ptn1->points_to(null_obj); + bool set2_has_null_ptr = ptn2->points_to(null_obj); if (set1_has_unknown_ptr && set2_has_null_ptr || set2_has_unknown_ptr && set1_has_null_ptr) { @@ -2214,36 +2427,14 @@ // Disjointness by itself is not sufficient since // alias analysis is not complete for escaped objects. // Disjoint sets are definitely unrelated only when - // at least one set has only not escaping objects. + // at least one set has only not escaping allocations. if (!set1_has_unknown_ptr && !set1_has_null_ptr) { - bool has_only_non_escaping_alloc = true; - for (VectorSetI i(&ptset1); i.test(); ++i) { - uint pt = i.elem; - PointsToNode* ptn = ptnode_adr(pt); - Node* obj = ptn->_node; - if (ptn->escape_state() != PointsToNode::NoEscape || - !(obj->is_Allocate() || obj->is_CallStaticJava())) { - has_only_non_escaping_alloc = false; - break; - } - } - if (has_only_non_escaping_alloc) { + if (ptn1->not_escaped_allocation()) { return _pcmp_neq; } } if (!set2_has_unknown_ptr && !set2_has_null_ptr) { - bool has_only_non_escaping_alloc = true; - for (VectorSetI i(&ptset2); i.test(); ++i) { - uint pt = i.elem; - PointsToNode* ptn = ptnode_adr(pt); - Node* obj = ptn->_node; - if (ptn->escape_state() != PointsToNode::NoEscape || - !(obj->is_Allocate() || obj->is_CallStaticJava())) { - has_only_non_escaping_alloc = false; - break; - } - } - if (has_only_non_escaping_alloc) { + if (ptn2->not_escaped_allocation()) { return _pcmp_neq; } } @@ -2273,40 +2464,27 @@ bool src_has_oops = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); - Node *arg = call->in(i)->uncast(); + Node *arg = call->in(i); const Type *aat = phase->type(arg); - PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state(); - if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() && - (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) { -#ifdef ASSERT + if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) + continue; + if (arg->is_AddP()) { + // + // The inline_native_clone() case when the arraycopy stub is called + // after the allocation before Initialize and CheckCastPP nodes. + // Or normal arraycopy for object arrays case. + // + // Set AddP's base (Allocate) as not scalar replaceable since + // pointer to the base (with offset) is passed as argument. + // + arg = get_addp_base(arg); + } + PointsToNode* arg_ptn = ptnode_adr(arg->_idx); + assert(arg_ptn != NULL, "should be registered"); + PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); + if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || aat->isa_ptr() != NULL, "expecting an Ptr"); - if (!(is_arraycopy || - call->as_CallLeaf()->_name != NULL && - (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || - strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 )) - ) { - call->dump(); - assert(false, "EA: unexpected CallLeaf"); - } -#endif - if (arg_esc < PointsToNode::ArgEscape) { - set_escape_state(arg->_idx, PointsToNode::ArgEscape); - Node* arg_base = arg; - if (arg->is_AddP()) { - // - // The inline_native_clone() case when the arraycopy stub is called - // after the allocation before Initialize and CheckCastPP nodes. - // Or normal arraycopy for object arrays case. - // - // Set AddP's base (Allocate) as not scalar replaceable since - // pointer to the base (with offset) is passed as argument. - // - arg_base = get_addp_base(arg); - set_escape_state(arg_base->_idx, PointsToNode::ArgEscape); - } - } - bool arg_has_oops = aat->isa_oopptr() && (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); @@ -2319,33 +2497,42 @@ // arraycopy(char[],0,Object*,0,size); // arraycopy(Object*,0,char[],0,size); // - // Do nothing special in such cases. + // Don't add edges in such cases. // - if (is_arraycopy && (i > TypeFunc::Parms) && - src_has_oops && arg_has_oops) { - // Destination object's fields reference an unknown object. - Node* arg_base = arg; - if (arg->is_AddP()) { - arg_base = get_addp_base(arg); - } - for (VectorSetI s(PointsTo(arg_base)); s.test(); ++s) { - uint ps = s.elem; - set_escape_state(ps, PointsToNode::ArgEscape); - add_edge_from_fields(ps, _phantom_object, Type::OffsetBot); - } - // Conservatively all values in source object fields globally escape - // since we don't know if values in destination object fields - // escape (it could be traced but it is too expensive). - Node* src = call->in(TypeFunc::Parms)->uncast(); - Node* src_base = src; + bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && + arg_has_oops && (i > TypeFunc::Parms); +#ifdef ASSERT + if (!(is_arraycopy || + call->as_CallLeaf()->_name != NULL && + (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || + strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 )) + ) { + call->dump(); + assert(false, "EA: unexpected CallLeaf"); + } +#endif + // Always process arraycopy's destination object since + // we need to add all possible edges to references in + // source object. + if (arg_esc >= PointsToNode::ArgEscape && + !arg_is_arraycopy_dest) { + continue; + } + set_escape_state(arg_ptn, PointsToNode::ArgEscape); + if (arg_is_arraycopy_dest) { + Node* src = call->in(TypeFunc::Parms); if (src->is_AddP()) { - src_base = get_addp_base(src); + src = get_addp_base(src); } - for (VectorSetI s(PointsTo(src_base)); s.test(); ++s) { - uint ps = s.elem; - set_escape_state(ps, PointsToNode::ArgEscape); - // Use OffsetTop to indicate fields global escape. - add_edge_from_fields(ps, _phantom_object, Type::OffsetTop); + PointsToNode* src_ptn = ptnode_adr(src->_idx); + assert(src_ptn != NULL, "should be registered"); + if (arg_ptn != src_ptn) { + // Special arraycopy edge: + // A destination object's field can't have the source object + // as base since objects escape states are not related. + // Only escape state of destination object's fields affects + // escape state of fields in source object. + add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn); } } } @@ -2357,54 +2544,55 @@ // For a static call, we know exactly what method is being called. // Use bytecode estimator to record the call's escape affects { - ciMethod *meth = call->as_CallJava()->method(); - BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; +#ifdef ASSERT + const char* name = call->as_CallStaticJava()->_name; + assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); +#endif + + ciMethod* meth = call->as_CallJava()->method(); + BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; // fall-through if not a Java method or no analyzer information if (call_analyzer != NULL) { - const TypeTuple * d = call->tf()->domain(); - bool copy_dependencies = false; + PointsToNode* call_ptn = ptnode_adr(call->_idx); + const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); int k = i - TypeFunc::Parms; - Node *arg = call->in(i)->uncast(); - + Node* arg = call->in(i); + PointsToNode* arg_ptn = ptnode_adr(arg->_idx); + if (at->isa_ptr() != NULL && + call_analyzer->is_arg_returned(k)) { + // The call returns arguments. + if (call_ptn != NULL) { // Is call's result used? + assert(call_ptn->is_LocalVar(), "node should be registered"); + assert(arg_ptn != NULL, "node should be registered"); + add_edge(call_ptn, arg_ptn); + } + } if (at->isa_oopptr() != NULL && - ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) { - + arg_ptn->escape_state() < PointsToNode::GlobalEscape) { bool global_escapes = false; bool fields_escapes = false; if (!call_analyzer->is_arg_stack(k)) { - // The argument global escapes, mark everything it could point to - set_escape_state(arg->_idx, PointsToNode::GlobalEscape); - global_escapes = true; + // The argument global escapes + set_escape_state(arg_ptn, PointsToNode::GlobalEscape); } else { + set_escape_state(arg_ptn, PointsToNode::ArgEscape); if (!call_analyzer->is_arg_local(k)) { // The argument itself doesn't escape, but any fields might - fields_escapes = true; + set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); } - set_escape_state(arg->_idx, PointsToNode::ArgEscape); - copy_dependencies = true; } - - for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) { - uint pt = j.elem; - if (global_escapes) { - // The argument global escapes, mark everything it could point to - set_escape_state(pt, PointsToNode::GlobalEscape); - add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); - } else { - set_escape_state(pt, PointsToNode::ArgEscape); - if (fields_escapes) { - // The argument itself doesn't escape, but any fields might. - // Use OffsetTop to indicate such case. - add_edge_from_fields(pt, _phantom_object, Type::OffsetTop); - } - } - } } } - if (copy_dependencies) - call_analyzer->copy_dependencies(_compile->dependencies()); + if (call_ptn != NULL && call_ptn->is_LocalVar()) { + // The call returns arguments. + assert(call_ptn->edge_count() > 0, "sanity"); + if (!call_analyzer->is_return_local()) { + // Returns also unknown object. + add_edge(call_ptn, phantom_obj); + } + } break; } } @@ -2414,229 +2602,213 @@ // or some other type of call, assume the worst case: all arguments // globally escape. { - // adjust escape state for outgoing arguments - const TypeTuple * d = call->tf()->domain(); + const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); if (at->isa_oopptr() != NULL) { - Node *arg = call->in(i)->uncast(); - set_escape_state(arg->_idx, PointsToNode::GlobalEscape); - for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) { - uint pt = j.elem; - set_escape_state(pt, PointsToNode::GlobalEscape); - add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); + Node* arg = call->in(i); + if (arg->is_AddP()) { + arg = get_addp_base(arg); } + assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); + set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); } } } } } -void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) { - CallNode *call = resproj->in(0)->as_Call(); + +void ConnectionGraph::add_call_node(CallNode* call) { + assert(call->returns_pointer(), "only for call which returns pointer"); uint call_idx = call->_idx; - uint resproj_idx = resproj->_idx; + if (call->is_Allocate()) { + Node* k = call->in(AllocateNode::KlassNode); + const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); + assert(kt != NULL, "TypeKlassPtr required."); + ciKlass* cik = kt->klass(); - switch (call->Opcode()) { - case Op_Allocate: - { - Node *k = call->in(AllocateNode::KlassNode); - const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr(); - assert(kt != NULL, "TypeKlassPtr required."); - ciKlass* cik = kt->klass(); - - PointsToNode::EscapeState es; - uint edge_to; - if (cik->is_subclass_of(_compile->env()->Thread_klass()) || - !cik->is_instance_klass() || // StressReflectiveCode - cik->as_instance_klass()->has_finalizer()) { - es = PointsToNode::GlobalEscape; - edge_to = _phantom_object; // Could not be worse - } else { - es = PointsToNode::NoEscape; - edge_to = call_idx; - assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity"); - } - set_escape_state(call_idx, es); - add_pointsto_edge(resproj_idx, edge_to); - _processed.set(resproj_idx); - break; - } - - case Op_AllocateArray: - { - - Node *k = call->in(AllocateNode::KlassNode); - const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr(); - assert(kt != NULL, "TypeKlassPtr required."); - ciKlass* cik = kt->klass(); - - PointsToNode::EscapeState es; - uint edge_to; + PointsToNode::EscapeState es = PointsToNode::NoEscape; + bool scalar_replaceable = true; + if (call->is_AllocateArray()) { if (!cik->is_array_klass()) { // StressReflectiveCode es = PointsToNode::GlobalEscape; - edge_to = _phantom_object; } else { - es = PointsToNode::NoEscape; - edge_to = call_idx; - assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity"); int length = call->in(AllocateNode::ALength)->find_int_con(-1); if (length < 0 || length > EliminateAllocationArraySizeLimit) { // Not scalar replaceable if the length is not constant or too big. - ptnode_adr(call_idx)->set_scalar_replaceable(false); + scalar_replaceable = false; } } - set_escape_state(call_idx, es); - add_pointsto_edge(resproj_idx, edge_to); - _processed.set(resproj_idx); - break; + } else { // Allocate instance + if (cik->is_subclass_of(_compile->env()->Thread_klass()) || + !cik->is_instance_klass() || // StressReflectiveCode + cik->as_instance_klass()->has_finalizer()) { + es = PointsToNode::GlobalEscape; + } } + add_java_object(call, es); + PointsToNode* ptn = ptnode_adr(call_idx); + if (!scalar_replaceable && ptn->scalar_replaceable()) + ptn->set_scalar_replaceable(false); - case Op_CallStaticJava: - // For a static call, we know exactly what method is being called. - // Use bytecode estimator to record whether the call's return value escapes - { - bool done = true; - const TypeTuple *r = call->tf()->range(); - const Type* ret_type = NULL; + } else if (call->is_CallStaticJava()) { + // Call nodes could be different types: + // + // 1. CallDynamicJavaNode (what happened during call is unknown): + // + // - mapped to GlobalEscape JavaObject node if oop is returned; + // + // - all oop arguments are escaping globally; + // + // 2. CallStaticJavaNode (execute bytecode analysis if possible): + // + // - the same as CallDynamicJavaNode if can't do bytecode analysis; + // + // - mapped to GlobalEscape JavaObject node if unknown oop is returned; + // - mapped to NoEscape JavaObject node if non escaped object allocated + // during call is returned; + // - mapped to ArgEscape LocalVar node pointed to object arguments + // which are returned and does not escape during call; + // + // - oop arguments escaping status is defined by bytecode analysis; + // - if (r->cnt() > TypeFunc::Parms) - ret_type = r->field_at(TypeFunc::Parms); - - // Note: we use isa_ptr() instead of isa_oopptr() here because the - // _multianewarray functions return a TypeRawPtr. - if (ret_type == NULL || ret_type->isa_ptr() == NULL) { - _processed.set(resproj_idx); - break; // doesn't return a pointer type - } - ciMethod *meth = call->as_CallJava()->method(); - const TypeTuple * d = call->tf()->domain(); - if (meth == NULL) { - // not a Java method, assume global escape - set_escape_state(call_idx, PointsToNode::GlobalEscape); - add_pointsto_edge(resproj_idx, _phantom_object); + // For a static call, we know exactly what method is being called. + // Use bytecode estimator to record whether the call's return value escapes. + ciMethod* meth = call->as_CallJava()->method(); + if (meth == NULL) { + const char* name = call->as_CallStaticJava()->_name; + assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); + // Returns a newly allocated unescaped object. + add_java_object(call, PointsToNode::NoEscape); + ptnode_adr(call_idx)->set_scalar_replaceable(false); + } else { + BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); + call_analyzer->copy_dependencies(_compile->dependencies()); + if (call_analyzer->is_return_allocated()) { + // Returns a newly allocated unescaped object, simply + // update dependency information. + // Mark it as NoEscape so that objects referenced by + // it's fields will be marked as NoEscape at least. + add_java_object(call, PointsToNode::NoEscape); + ptnode_adr(call_idx)->set_scalar_replaceable(false); } else { - BCEscapeAnalyzer *call_analyzer = meth->get_bcea(); - bool copy_dependencies = false; - - if (call_analyzer->is_return_allocated()) { - // Returns a newly allocated unescaped object, simply - // update dependency information. - // Mark it as NoEscape so that objects referenced by - // it's fields will be marked as NoEscape at least. - set_escape_state(call_idx, PointsToNode::NoEscape); - ptnode_adr(call_idx)->set_scalar_replaceable(false); - // Fields values are unknown - add_edge_from_fields(call_idx, _phantom_object, Type::OffsetBot); - add_pointsto_edge(resproj_idx, call_idx); - copy_dependencies = true; - } else { - // determine whether any arguments are returned - set_escape_state(call_idx, PointsToNode::ArgEscape); - bool ret_arg = false; - for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { - const Type* at = d->field_at(i); - if (at->isa_oopptr() != NULL) { - Node *arg = call->in(i)->uncast(); - - if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { - ret_arg = true; - PointsToNode *arg_esp = ptnode_adr(arg->_idx); - if (arg_esp->node_type() == PointsToNode::UnknownType) - done = false; - else if (arg_esp->node_type() == PointsToNode::JavaObject) - add_pointsto_edge(resproj_idx, arg->_idx); - else - add_deferred_edge(resproj_idx, arg->_idx); - } - } + // Determine whether any arguments are returned. + const TypeTuple* d = call->tf()->domain(); + bool ret_arg = false; + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { + if (d->field_at(i)->isa_ptr() != NULL && + call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { + ret_arg = true; + break; } - if (done) { - copy_dependencies = true; - // is_return_local() is true when only arguments are returned. - if (!ret_arg || !call_analyzer->is_return_local()) { - // Returns unknown object. - add_pointsto_edge(resproj_idx, _phantom_object); - } - } } - if (copy_dependencies) - call_analyzer->copy_dependencies(_compile->dependencies()); - } - if (done) - _processed.set(resproj_idx); - break; - } - - default: - // Some other type of call, assume the worst case that the - // returned value, if any, globally escapes. - { - const TypeTuple *r = call->tf()->range(); - if (r->cnt() > TypeFunc::Parms) { - const Type* ret_type = r->field_at(TypeFunc::Parms); - - // Note: we use isa_ptr() instead of isa_oopptr() here because the - // _multianewarray functions return a TypeRawPtr. - if (ret_type->isa_ptr() != NULL) { - set_escape_state(call_idx, PointsToNode::GlobalEscape); - add_pointsto_edge(resproj_idx, _phantom_object); + if (ret_arg) { + add_local_var(call, PointsToNode::ArgEscape); + } else { + // Returns unknown object. + map_ideal_node(call, phantom_obj); } } - _processed.set(resproj_idx); } + } else { + // An other type of call, assume the worst case: + // returned value is unknown and globally escapes. + assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); + map_ideal_node(call, phantom_obj); } } -// Populate Connection Graph with Ideal nodes and create simple -// connection graph edges (do not need to check the node_type of inputs -// or to call PointsTo() to walk the connection graph). -void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) { - if (_processed.test(n->_idx)) - return; // No need to redefine node's state. +// Populate Connection Graph with PointsTo nodes and create simple +// connection graph edges. +void ConnectionGraph::build_connection_graph(Node *n, + Unique_Node_List *delayed_worklist, + PhaseTransform *phase) { + // During first iteration create PointsTo nodes and simple edges. + // During second iteration create the rest of edges. + bool first_iteration = (delayed_worklist != NULL); + uint n_idx = n->_idx; + PointsToNode* n_ptn = ptnode_adr(n_idx); + if (first_iteration && n_ptn != NULL) + return; // No need to redefine PointsTo node during first iteration. + +#ifdef ASSERT + if (_verify && n_ptn->is_JavaObject()) + return; // Following code for JavaObject does not change graph. +#endif + if (n->is_Call()) { + if (!first_iteration) { + process_call_arguments(n->as_Call(), phase); + return; + } // Arguments to allocation and locking don't escape. - if (n->is_Allocate()) { - add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true); - record_for_optimizer(n); - } else if (n->is_Lock() || n->is_Unlock()) { + if (n->is_AbstractLock()) { // Put Lock and Unlock nodes on IGVN worklist to process them during - // the first IGVN optimization when escape information is still available. + // first IGVN optimization when escape information is still available. record_for_optimizer(n); - _processed.set(n->_idx); + } else if (n->is_Allocate()) { + add_call_node(n->as_Call()); + record_for_optimizer(n); } else { + if (n->is_CallStaticJava()) { + const char* name = n->as_CallStaticJava()->_name; + if (name != NULL && strcmp(name, "uncommon_trap") == 0) + return; // Skip uncommon traps + } // Don't mark as processed since call's arguments have to be processed. - PointsToNode::NodeType nt = PointsToNode::UnknownType; - PointsToNode::EscapeState es = PointsToNode::UnknownEscape; + delayed_worklist->push(n); // Check if a call returns an object. - const TypeTuple *r = n->as_Call()->tf()->range(); - if (r->cnt() > TypeFunc::Parms && - r->field_at(TypeFunc::Parms)->isa_ptr() && + if (n->as_Call()->returns_pointer() && n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { - nt = PointsToNode::JavaObject; - if (!n->is_CallStaticJava()) { - // Since the called mathod is statically unknown assume - // the worst case that the returned value globally escapes. - es = PointsToNode::GlobalEscape; - } + add_call_node(n->as_Call()); } - add_node(n, nt, es, false); } return; } - // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because - // ThreadLocal has RawPrt type. - switch (n->Opcode()) { + // Put this check here to process call arguments since some call nodes + // point to phantom_obj. + if (n_ptn == phantom_obj || n_ptn == null_obj) + return; // Skip predefined nodes. + + assert(first_iteration || n->is_Store() || n->is_LoadStore() || + n_ptn != NULL && n_ptn->ideal_node() != NULL, + "node should be registered during second iteration"); + + int opcode = n->Opcode(); + switch (opcode) { case Op_AddP: { - add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false); + Node* base = get_addp_base(n); + PointsToNode* ptn = ptnode_adr(base->_idx); + if (first_iteration) { + // Field node is created for all field types. It will help in + // split_unique_types(). Note, there will be no uses of non oop fields + // in Connection Graph. + int offset = address_offset(n, phase); + add_field(n, PointsToNode::NoEscape, offset); + if (ptn == NULL) { + delayed_worklist->push(n); + return; // Process it later. + } + n_ptn = ptnode_adr(n_idx); + } else { + assert(ptn != NULL, "node should be registered"); + } + add_base(n_ptn->as_Field(), ptn); break; } case Op_CastX2P: - { // "Unsafe" memory access. - add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); + { + if (first_iteration) { + // "Unsafe" memory access to unknown object. + map_ideal_node(n, phantom_obj); + } else { + assert(false, "Op_CastX2P"); + } break; } case Op_CastPP: @@ -2644,392 +2816,183 @@ case Op_EncodeP: case Op_DecodeN: { - add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); - int ti = n->in(1)->_idx; - PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); - if (nt == PointsToNode::UnknownType) { - _delayed_worklist.push(n); // Process it later. - break; - } else if (nt == PointsToNode::JavaObject) { - add_pointsto_edge(n->_idx, ti); - } else { - add_deferred_edge(n->_idx, ti); - } - _processed.set(n->_idx); + add_local_var_and_edge(n, PointsToNode::NoEscape, + n->in(1), delayed_worklist); break; } - case Op_ConP: + case Op_CMoveP: { - // assume all pointer constants globally escape except for null - PointsToNode::EscapeState es; - if (phase->type(n) == TypePtr::NULL_PTR) - es = PointsToNode::NoEscape; - else - es = PointsToNode::GlobalEscape; + if (first_iteration) { + add_local_var(n, PointsToNode::NoEscape); + // Do not add edges during first iteration because some could be + // not defined yet. + delayed_worklist->push(n); + return; // Process it later. + } else { + for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { + Node* in = n->in(i); + if (in == NULL) + continue; // ignore NULL + Node* uncast_in = in->uncast(); + if (uncast_in->is_top() || uncast_in == n) + continue; // ignore top or inputs which go back this node - add_node(n, PointsToNode::JavaObject, es, true); - break; - } - case Op_ConN: - { - // assume all narrow oop constants globally escape except for null - PointsToNode::EscapeState es; - if (phase->type(n) == TypeNarrowOop::NULL_PTR) - es = PointsToNode::NoEscape; - else - es = PointsToNode::GlobalEscape; - - add_node(n, PointsToNode::JavaObject, es, true); - break; - } - case Op_CreateEx: - { - // assume that all exception objects globally escape - add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); - break; - } - case Op_LoadKlass: - case Op_LoadNKlass: - { - add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); - break; - } - case Op_LoadP: - case Op_LoadN: - { - const Type *t = phase->type(n); - if (t->make_ptr() == NULL) { - _processed.set(n->_idx); - return; - } - add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); - break; - } - case Op_Parm: - { - _processed.set(n->_idx); // No need to redefine it state. - uint con = n->as_Proj()->_con; - if (con < TypeFunc::Parms) - return; - const Type *t = n->in(0)->as_Start()->_domain->field_at(con); - if (t->isa_ptr() == NULL) - return; - // We have to assume all input parameters globally escape - // (Note: passing 'false' since _processed is already set). - add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false); - break; - } - case Op_PartialSubtypeCheck: - { // Produces Null or notNull and is used in CmpP. - add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); - break; - } - case Op_Phi: - { - const Type *t = n->as_Phi()->type(); - if (t->make_ptr() == NULL) { - // nothing to do if not an oop or narrow oop - _processed.set(n->_idx); - return; - } - add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); - uint i; - for (i = 1; i < n->req() ; i++) { - Node* in = n->in(i); - if (in == NULL) - continue; // ignore NULL - in = in->uncast(); - if (in->is_top() || in == n) - continue; // ignore top or inputs which go back this node - int ti = in->_idx; - PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); - if (nt == PointsToNode::UnknownType) { - break; - } else if (nt == PointsToNode::JavaObject) { - add_pointsto_edge(n->_idx, ti); - } else { - add_deferred_edge(n->_idx, ti); + PointsToNode* ptn = ptnode_adr(in->_idx); + assert(ptn != NULL, "node should be registered"); + add_edge(n_ptn, ptn); } } - if (i >= n->req()) - _processed.set(n->_idx); - else - _delayed_worklist.push(n); break; } - case Op_Proj: + case Op_ConP: + case Op_ConN: { - // we are only interested in the oop result projection from a call - if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { - const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); - assert(r->cnt() > TypeFunc::Parms, "sanity"); - if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { - add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); - int ti = n->in(0)->_idx; - // The call may not be registered yet (since not all its inputs are registered) - // if this is the projection from backbranch edge of Phi. - if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) { - process_call_result(n->as_Proj(), phase); - } - if (!_processed.test(n->_idx)) { - // The call's result may need to be processed later if the call - // returns it's argument and the argument is not processed yet. - _delayed_worklist.push(n); - } - break; - } - } - _processed.set(n->_idx); - break; - } - case Op_Return: - { - if( n->req() > TypeFunc::Parms && - phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { - // Treat Return value as LocalVar with GlobalEscape escape state. - add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false); - int ti = n->in(TypeFunc::Parms)->_idx; - PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); - if (nt == PointsToNode::UnknownType) { - _delayed_worklist.push(n); // Process it later. - break; - } else if (nt == PointsToNode::JavaObject) { - add_pointsto_edge(n->_idx, ti); + // assume all oop constants globally escape except for null + if (first_iteration) { + PointsToNode::EscapeState es; + if (phase->type(n) == TypePtr::NULL_PTR || + phase->type(n) == TypeNarrowOop::NULL_PTR) { + es = PointsToNode::NoEscape; } else { - add_deferred_edge(n->_idx, ti); + es = PointsToNode::GlobalEscape; } - } - _processed.set(n->_idx); - break; - } - case Op_StoreP: - case Op_StoreN: - { - const Type *adr_type = phase->type(n->in(MemNode::Address)); - adr_type = adr_type->make_ptr(); - if (adr_type->isa_oopptr()) { - add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); + add_java_object(n, es); } else { - Node* adr = n->in(MemNode::Address); - if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL && - adr->in(AddPNode::Address)->is_Proj() && - adr->in(AddPNode::Address)->in(0)->is_Allocate()) { - add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); - // We are computing a raw address for a store captured - // by an Initialize compute an appropriate address type. - int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); - assert(offs != Type::OffsetBot, "offset must be a constant"); - } else { - _processed.set(n->_idx); - return; - } + assert(false, "Op_ConP"); } break; } - case Op_StorePConditional: - case Op_CompareAndSwapP: - case Op_CompareAndSwapN: + case Op_CreateEx: { - const Type *adr_type = phase->type(n->in(MemNode::Address)); - adr_type = adr_type->make_ptr(); - if (adr_type->isa_oopptr()) { - add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); + if (first_iteration) { + // assume that all exception objects globally escape + add_java_object(n, PointsToNode::GlobalEscape); } else { - _processed.set(n->_idx); - return; + assert(false, "Op_CreateEx"); } break; } - case Op_AryEq: - case Op_StrComp: - case Op_StrEquals: - case Op_StrIndexOf: + case Op_LoadKlass: + case Op_LoadNKlass: { - // char[] arrays passed to string intrinsics are not scalar replaceable. - add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); - break; - } - case Op_ThreadLocal: - { - add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); - break; - } - default: - ; - // nothing to do - } - return; -} - -void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) { - uint n_idx = n->_idx; - assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered"); - - // Don't set processed bit for AddP, LoadP, StoreP since - // they may need more then one pass to process. - // Also don't mark as processed Call nodes since their - // arguments may need more then one pass to process. - if (_processed.test(n_idx)) - return; // No need to redefine node's state. - - if (n->is_Call()) { - CallNode *call = n->as_Call(); - process_call_arguments(call, phase); - return; - } - - switch (n->Opcode()) { - case Op_AddP: - { - Node *base = get_addp_base(n); - int offset = address_offset(n, phase); - // Create a field edge to this node from everything base could point to. - for( VectorSetI i(PointsTo(base)); i.test(); ++i ) { - uint pt = i.elem; - add_field_edge(pt, n_idx, offset); - } - break; - } - case Op_CastX2P: - { - assert(false, "Op_CastX2P"); - break; - } - case Op_CastPP: - case Op_CheckCastPP: - case Op_EncodeP: - case Op_DecodeN: - { - int ti = n->in(1)->_idx; - assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered"); - if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { - add_pointsto_edge(n_idx, ti); + if (first_iteration) { + // Unknown class is loaded + map_ideal_node(n, phantom_obj); } else { - add_deferred_edge(n_idx, ti); + assert(false, "Op_LoadKlass"); } - _processed.set(n_idx); break; } - case Op_ConP: - { - assert(false, "Op_ConP"); - break; - } - case Op_ConN: - { - assert(false, "Op_ConN"); - break; - } - case Op_CreateEx: - { - assert(false, "Op_CreateEx"); - break; - } - case Op_LoadKlass: - case Op_LoadNKlass: - { - assert(false, "Op_LoadKlass"); - break; - } case Op_LoadP: case Op_LoadN: + case Op_LoadPLocked: { - const Type *t = phase->type(n); + // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because + // ThreadLocal has RawPrt type. + const Type* t = phase->type(n); + if (t->make_ptr() != NULL) { + Node* adr = n->in(MemNode::Address); + if (!adr->is_AddP()) { + assert(phase->type(adr)->isa_rawptr(), "sanity"); + } else { + assert((ptnode_adr(adr->_idx) == NULL || + ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); + } + add_local_var_and_edge(n, PointsToNode::NoEscape, + adr, delayed_worklist); #ifdef ASSERT - if (t->make_ptr() == NULL) + } else if (!first_iteration) { + n->dump(1); assert(false, "Op_LoadP"); #endif - - Node* adr = n->in(MemNode::Address)->uncast(); - Node* adr_base; - if (adr->is_AddP()) { - adr_base = get_addp_base(adr); - } else { - adr_base = adr; } - - // For everything "adr_base" could point to, create a deferred edge from - // this node to each field with the same offset. - int offset = address_offset(adr, phase); - for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) { - uint pt = i.elem; - if (adr->is_AddP()) { - // Add field edge if it is missing. - add_field_edge(pt, adr->_idx, offset); - } - add_deferred_edge_to_fields(n_idx, pt, offset); - } break; } case Op_Parm: { - assert(false, "Op_Parm"); + if (first_iteration) { + // We have to assume all input parameters globally escape + map_ideal_node(n, phantom_obj); + } else { + assert(false, "Op_Parm"); + } break; } case Op_PartialSubtypeCheck: { - assert(false, "Op_PartialSubtypeCheck"); + if (first_iteration) { + // Produces Null or notNull and is used in only in CmpP so + // phantom_obj could be used. + map_ideal_node(n, phantom_obj); // Result is unknown + } else { + // Arguments are klasses which globally escape so do not need + // to point to them. + assert(false, "Op_PartialSubtypeCheck"); + } break; } case Op_Phi: { + // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because + // ThreadLocal has RawPrt type. + const Type* t = n->as_Phi()->type(); + if (t->make_ptr() != NULL) { + if (first_iteration) { + add_local_var(n, PointsToNode::NoEscape); + // Do not add edges during first iteration because some could be + // not defined yet. + delayed_worklist->push(n); + return; // Process it later. + } else { + for (uint i = 1; i < n->req(); i++) { + Node* in = n->in(i); + if (in == NULL) + continue; // ignore NULL + Node* uncast_in = in->uncast(); + if (uncast_in->is_top() || uncast_in == n) + continue; // ignore top or inputs which go back this node + + PointsToNode* ptn = ptnode_adr(in->_idx); + assert(ptn != NULL, "node should be registered"); + add_edge(n_ptn, ptn); + } + } #ifdef ASSERT - const Type *t = n->as_Phi()->type(); - if (t->make_ptr() == NULL) + } else if (!first_iteration) { + n->dump(1); assert(false, "Op_Phi"); #endif - for (uint i = 1; i < n->req() ; i++) { - Node* in = n->in(i); - if (in == NULL) - continue; // ignore NULL - in = in->uncast(); - if (in->is_top() || in == n) - continue; // ignore top or inputs which go back this node - int ti = in->_idx; - PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); - assert(nt != PointsToNode::UnknownType, "all nodes should be known"); - if (nt == PointsToNode::JavaObject) { - add_pointsto_edge(n_idx, ti); - } else { - add_deferred_edge(n_idx, ti); - } } - _processed.set(n_idx); break; } case Op_Proj: { // we are only interested in the oop result projection from a call - if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { - assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType, - "all nodes should be registered"); - const TypeTuple *r = n->in(0)->as_Call()->tf()->range(); - assert(r->cnt() > TypeFunc::Parms, "sanity"); - if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { - process_call_result(n->as_Proj(), phase); - assert(_processed.test(n_idx), "all call results should be processed"); - break; - } + if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && + n->in(0)->as_Call()->returns_pointer()) { + add_local_var_and_edge(n, PointsToNode::NoEscape, + n->in(0), delayed_worklist); +#ifdef ASSERT + } else if (!first_iteration) { + n->dump(1); + assert(false, "Op_Proj"); +#endif } - assert(false, "Op_Proj"); break; } + case Op_Rethrow: // Exception object escapes case Op_Return: { + if (n->req() > TypeFunc::Parms && + phase->type(n->in(TypeFunc::Parms))->isa_oopptr()) { + // Treat Return value as LocalVar with GlobalEscape escape state. + add_local_var_and_edge(n, PointsToNode::GlobalEscape, + n->in(TypeFunc::Parms), delayed_worklist); #ifdef ASSERT - if( n->req() <= TypeFunc::Parms || - !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { + } else if (!first_iteration) { assert(false, "Op_Return"); - } #endif - int ti = n->in(TypeFunc::Parms)->_idx; - assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered"); - if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { - add_pointsto_edge(n_idx, ti); - } else { - add_deferred_edge(n_idx, ti); } - _processed.set(n_idx); break; } case Op_StoreP: @@ -3038,24 +3001,52 @@ case Op_CompareAndSwapP: case Op_CompareAndSwapN: { - Node *adr = n->in(MemNode::Address); - const Type *adr_type = phase->type(adr)->make_ptr(); + Node* adr = n->in(MemNode::Address); + const Type *adr_type = phase->type(adr); + adr_type = adr_type->make_ptr(); + if (adr_type->isa_oopptr() || + (opcode == Op_StoreP || opcode == Op_StoreN) && + (adr_type == TypeRawPtr::NOTNULL && + adr->in(AddPNode::Address)->is_Proj() && + adr->in(AddPNode::Address)->in(0)->is_Allocate())) { #ifdef ASSERT - if (!adr_type->isa_oopptr()) - assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP"); + assert(adr->is_AddP(), "expecting an AddP"); + if (adr_type == TypeRawPtr::NOTNULL) { + // Verify a raw address for a store captured by Initialize node. + int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); + assert(offs != Type::OffsetBot, "offset must be a constant"); + } #endif + if (first_iteration) { + delayed_worklist->push(n); + return; // Process it later. + } else { + // Point Address to Value + PointsToNode* adr_ptn = ptnode_adr(adr->_idx); + assert(adr_ptn != NULL && + adr_ptn->as_Field()->is_oop(), "node should be registered"); + Node *val = n->in(MemNode::ValueIn); + PointsToNode* ptn = ptnode_adr(val->_idx); + assert(ptn != NULL, "node should be registered"); + add_edge(adr_ptn, ptn); + } +#ifdef ASSERT + } else { + // Ignore copy the displaced header to the BoxNode (OSR compilation). + if (adr->is_BoxLock()) + break; - assert(adr->is_AddP(), "expecting an AddP"); - Node *adr_base = get_addp_base(adr); - Node *val = n->in(MemNode::ValueIn)->uncast(); - int offset = address_offset(adr, phase); - // For everything "adr_base" could point to, create a deferred edge - // to "val" from each field with the same offset. - for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) { - uint pt = i.elem; - // Add field edge if it is missing. - add_field_edge(pt, adr->_idx, offset); - add_edge_from_fields(pt, val->_idx, offset); + if (!adr->is_AddP()) { + n->dump(1); + assert(adr->is_AddP(), "expecting an AddP"); + } + // Ignore G1 barrier's stores. + if (!UseG1GC || (opcode != Op_StoreP) || + (adr_type != TypeRawPtr::BOTTOM)) { + n->dump(1); + assert(false, "not G1 barrier raw StoreP"); + } +#endif } break; } @@ -3064,49 +3055,118 @@ case Op_StrEquals: case Op_StrIndexOf: { - // char[] arrays passed to string intrinsic do not escape but - // they are not scalar replaceable. Adjust escape state for them. - // Start from in(2) edge since in(1) is memory edge. - for (uint i = 2; i < n->req(); i++) { - Node* adr = n->in(i)->uncast(); - const Type *at = phase->type(adr); - if (!adr->is_top() && at->isa_ptr()) { - assert(at == Type::TOP || at == TypePtr::NULL_PTR || - at->isa_ptr() != NULL, "expecting an Ptr"); - if (adr->is_AddP()) { - adr = get_addp_base(adr); + if (first_iteration) { + add_local_var(n, PointsToNode::ArgEscape); + delayed_worklist->push(n); + return; // Process it later. + } else { + // char[] arrays passed to string intrinsic do not escape but + // they are not scalar replaceable. Adjust escape state for them. + // Start from in(2) edge since in(1) is memory edge. + for (uint i = 2; i < n->req(); i++) { + Node* adr = n->in(i); + const Type* at = phase->type(adr); + if (!adr->is_top() && at->isa_ptr()) { + assert(at == Type::TOP || at == TypePtr::NULL_PTR || + at->isa_ptr() != NULL, "expecting a pointer"); + if (adr->is_AddP()) { + adr = get_addp_base(adr); + } + PointsToNode* ptn = ptnode_adr(adr->_idx); + assert(ptn != NULL, "node should be registered"); + add_edge(n_ptn, ptn); } - // Mark as ArgEscape everything "adr" could point to. - set_escape_state(adr->_idx, PointsToNode::ArgEscape); } } - _processed.set(n_idx); break; } case Op_ThreadLocal: { - assert(false, "Op_ThreadLocal"); + if (first_iteration) { + add_java_object(n, PointsToNode::ArgEscape); + } else { + assert(false, "Op_ThreadLocal"); + } break; } default: - // This method should be called only for EA specific nodes. - ShouldNotReachHere(); + if (first_iteration) { + ; // nothing to do + } else { + // This method should be called only for EA specific nodes during second iteration. + ShouldNotReachHere(); + } } + return; } #ifndef PRODUCT -void ConnectionGraph::dump() { - bool first = true; +static const char *node_type_names[] = { + "UnknownType", + "JavaObject", + "LocalVar", + "Field", + "Arraycopy" +}; - uint size = nodes_size(); - for (uint ni = 0; ni < size; ni++) { - PointsToNode *ptn = ptnode_adr(ni); - PointsToNode::NodeType ptn_type = ptn->node_type(); +static const char *esc_names[] = { + "UnknownEscape", + "NoEscape", + "ArgEscape", + "GlobalEscape" +}; - if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL) +void PointsToNode::dump(bool print_state) const { + NodeType nt = node_type(); + tty->print("%s ", node_type_names[(int) nt]); + if (print_state) { + EscapeState es = escape_state(); + EscapeState fields_es = fields_escape_state(); + tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); + if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) + tty->print("NSR"); + } + if (is_Field()) { + FieldNode* f = (FieldNode*)this; + tty->print("("); + for (uint i = 0; i < f->base_count(); i++) { + PointsToNode* b = f->base(i); + tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); + } + tty->print(" )"); + } + tty->print("["); + for (uint i = 0; i < edge_count(); i++) { + PointsToNode* e = edge(i); + tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); + } + tty->print(" ["); + for (uint i = 0; i < use_count(); i++) { + PointsToNode* u = use(i); + bool is_base = false; + if (PointsToNode::is_base_use(u)) { + is_base = true; + u = PointsToNode::get_use_node(u)->as_Field(); + } + tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); + } + tty->print(" ]] "); + if (_node == NULL) + tty->print_cr(""); + else + _node->dump(); +} + +void ConnectionGraph::dump(GrowableArray& ptnodes_worklist) { + bool first = true; + + uint ptnodes_length = ptnodes_worklist.length(); + for (uint i = 0; i < ptnodes_length; i++) { + PointsToNode *ptn = ptnodes_worklist.at(i); + if (ptn == NULL || !ptn->is_JavaObject()) continue; - PointsToNode::EscapeState es = escape_state(ptn->_node); - if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { + PointsToNode::EscapeState es = ptn->escape_state(); + if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { if (first) { tty->cr(); tty->print("======== Connection graph for "); @@ -3114,24 +3174,17 @@ tty->cr(); first = false; } - tty->print("%6d ", ni); ptn->dump(); - // Print all locals which reference this allocation - for (uint li = ni; li < size; li++) { - PointsToNode *ptn_loc = ptnode_adr(li); - PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type(); - if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL && - ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) { - ptnode_adr(li)->dump(false); + // Print all locals and fields which reference this allocation + uint count = ptn->use_count(); + for (uint i = 0; i < count; i++) { + PointsToNode* use = ptn->use(i); + if (use->is_LocalVar()) { + use->dump(Verbose); + } else if (Verbose) { + use->dump(); } } - if (Verbose) { - // Print all fields which reference this allocation - for (uint i = 0; i < ptn->edge_count(); i++) { - uint ei = ptn->edge_target(i); - ptnode_adr(ei)->dump(false); - } - } tty->cr(); } } --- old/src/share/vm/opto/escape.hpp Thu Feb 23 12:08:46 2012 +++ new/src/share/vm/opto/escape.hpp Thu Feb 23 12:08:46 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,18 +115,52 @@ class CallNode; class PhiNode; class PhaseTransform; +class PointsToNode; class Type; class TypePtr; class VectorSet; -class PointsToNode { -friend class ConnectionGraph; +// Wrapper for GrowableArray +class PointsToList { + GrowableArray _list; // List of nodes this node points to public: + uint count() const { return _list.length(); } + PointsToNode* element(uint e) const { return _list.at(e); } + + bool add(PointsToNode* elem) { + // Returns TRUE if elem is added. + bool missed = !_list.contains(elem); + if (missed) + _list.append(elem); // Add new element + return missed; + } +}; + +class JavaObjectNode; +class LocalVarNode; +class FieldNode; +class ArraycopyNode; + +// ConnectionGraph nodes +class PointsToNode : public ResourceObj { + PointsToList _edges; // List of nodes this node points to + PointsToList _uses; // List of nodes which point to this node + + const u1 _type; // NodeType + u1 _flags; // NodeFlags + u1 _escape; // EscapeState of object + u1 _fields_escape; // EscapeState of object's fields + + const Node* _node; // Ideal node corresponding to this PointsTo node. + const uint _idx; // Cached ideal node's _idx + +public: typedef enum { UnknownType = 0, JavaObject = 1, LocalVar = 2, - Field = 3 + Field = 3, + Arraycopy = 4 } NodeType; typedef enum { @@ -140,179 +174,334 @@ } EscapeState; typedef enum { - UnknownEdge = 0, - PointsToEdge = 1, - DeferredEdge = 2, - FieldEdge = 3 - } EdgeType; + ScalarReplaceable = 1, // Not escaped object could be replaced with scalar + PointsToUnknown = 2, // Has edge to phantom_object + ArraycopySrc = 4, // Has edge from Arraycopy node + ArraycopyDst = 8 // Has edge to Arraycopy node + } NodeFlags; -private: - enum { - EdgeMask = 3, - EdgeShift = 2, - INITIAL_EDGE_COUNT = 4 - }; + PointsToNode(Node* n, EscapeState es, NodeType type): + _node(n), + _idx(n->_idx), + _type((u1)type), + _escape((u1)es), + _fields_escape((u1)es), + _flags(ScalarReplaceable) { + assert(n != NULL && es != UnknownEscape, "sanity"); + } - NodeType _type; - EscapeState _escape; - GrowableArray* _edges; // outgoing edges - Node* _node; // Ideal node corresponding to this PointsTo node. - int _offset; // Object fields offsets. - bool _scalar_replaceable; // Not escaped object could be replaced with scalar - bool _has_unknown_ptr; // Has edge to phantom_object + Node* ideal_node() const { return (Node*)_node; } + int idx() const { return _idx; } -public: - PointsToNode(): - _type(UnknownType), - _escape(UnknownEscape), - _edges(NULL), - _node(NULL), - _offset(-1), - _has_unknown_ptr(false), - _scalar_replaceable(true) {} + bool is_JavaObject() const { return _type == (u1)JavaObject; } + bool is_LocalVar() const { return _type == (u1)LocalVar; } + bool is_Field() const { return _type == (u1)Field; } + bool is_Arraycopy() const { return _type == (u1)Arraycopy; } + JavaObjectNode* as_JavaObject() { assert(is_JavaObject(),""); return (JavaObjectNode*)this; } + LocalVarNode* as_LocalVar() { assert(is_LocalVar(),""); return (LocalVarNode*)this; } + FieldNode* as_Field() { assert(is_Field(),""); return (FieldNode*)this; } + ArraycopyNode* as_Arraycopy() { assert(is_Arraycopy(),""); return (ArraycopyNode*)this; } - EscapeState escape_state() const { return _escape; } - NodeType node_type() const { return _type;} - int offset() { return _offset;} - bool scalar_replaceable() { return _scalar_replaceable;} - bool has_unknown_ptr() { return _has_unknown_ptr;} + EscapeState escape_state() const { return (EscapeState)_escape; } + void set_escape_state(EscapeState state) { _escape = (u1)state; } - void set_offset(int offs) { _offset = offs;} - void set_escape_state(EscapeState state) { _escape = state; } - void set_node_type(NodeType ntype) { - assert(_type == UnknownType || _type == ntype, "Can't change node type"); - _type = ntype; - } - void set_scalar_replaceable(bool v) { _scalar_replaceable = v; } - void set_has_unknown_ptr() { _has_unknown_ptr = true; } + EscapeState fields_escape_state() const { return (EscapeState)_fields_escape; } + void set_fields_escape_state(EscapeState state) { _fields_escape = (u1)state; } - // count of outgoing edges - uint edge_count() const { return (_edges == NULL) ? 0 : _edges->length(); } + bool has_unknown_ptr() const { return (_flags & PointsToUnknown) != 0; } + void set_has_unknown_ptr() { _flags |= PointsToUnknown; } - // node index of target of outgoing edge "e" - uint edge_target(uint e) const { - assert(_edges != NULL, "valid edge index"); - return (_edges->at(e) >> EdgeShift); + bool arraycopy_src() const { return (_flags & ArraycopySrc) != 0; } + void set_arraycopy_src() { _flags |= ArraycopySrc; } + bool arraycopy_dst() const { return (_flags & ArraycopyDst) != 0; } + void set_arraycopy_dst() { _flags |= ArraycopyDst; } + + bool scalar_replaceable() const { return (_flags & ScalarReplaceable) != 0;} + void set_scalar_replaceable(bool v) { + if (v) + _flags |= ScalarReplaceable; + else + _flags &= ~ScalarReplaceable; } - // type of outgoing edge "e" - EdgeType edge_type(uint e) const { - assert(_edges != NULL, "valid edge index"); - return (EdgeType) (_edges->at(e) & EdgeMask); - } - // add a edge of the specified type pointing to the specified target - void add_edge(uint targIdx, EdgeType et); + uint edge_count() const { return _edges.count(); } + PointsToNode* edge(uint e) const { return _edges.element(e); } + bool add_edge(PointsToNode* edge) { return _edges.add(edge); } - // remove an edge of the specified type pointing to the specified target - void remove_edge(uint targIdx, EdgeType et); + uint use_count() const { return _uses.count(); } + PointsToNode* use(uint e) const { return _uses.element(e); } + bool add_use(PointsToNode* use) { return _uses.add(use); } + // Mark base edge use to distinguish from stored value edge. + bool add_base_use(FieldNode* use) { return _uses.add((PointsToNode*)((intptr_t)use + 1)); } + static bool is_base_use(PointsToNode* use) { return (((intptr_t)use) & 1); } + static PointsToNode* get_use_node(PointsToNode* use) { return (PointsToNode*)(((intptr_t)use) & ~1); } + + // Return true if this node points to specified node or nodes it points to. + bool points_to(JavaObjectNode* ptn) const; + + // Return true if nodes points only to non-escaped allocations. + bool not_escaped_allocation(); + + // Return true if one node points to an other. + bool meet(PointsToNode* ptn); + #ifndef PRODUCT + NodeType node_type() const { return (NodeType)_type;} void dump(bool print_state=true) const; #endif }; +class LocalVarNode: public PointsToNode { +public: + LocalVarNode(Node* n, EscapeState es): + PointsToNode(n, es, LocalVar) {} +}; + +class JavaObjectNode: public PointsToNode { +public: + JavaObjectNode(Node* n, EscapeState es): + PointsToNode(n, es, JavaObject) { + if (es > NoEscape) + set_scalar_replaceable(false); + } +}; + +class FieldNode: public PointsToNode { + PointsToList _bases; // List of JavaObject nodes which point to this node + const int _offset; // Field's offset. + const bool _is_oop; // Field points to object + bool _has_unknown_base; // Has phantom_object base +public: + FieldNode(Node* n, EscapeState es, int offs, bool is_oop): + PointsToNode(n, es, Field), + _offset(offs), _is_oop(is_oop), + _has_unknown_base(false) {} + + int offset() const { return _offset;} + bool is_oop() const { return _is_oop;} + bool has_unknown_base() const { return _has_unknown_base; } + void set_has_unknown_base() { _has_unknown_base = true; } + + uint base_count() const { return _bases.count(); } + PointsToNode* base(uint e) const { return _bases.element(e); } + bool add_base(PointsToNode* base) { return _bases.add(base); } +#ifdef ASSERT + // Return true if bases points to this java object. + bool has_base(JavaObjectNode* ptn) const; +#endif + +}; + +class ArraycopyNode: public PointsToNode { +public: + ArraycopyNode(Node* n, EscapeState es): + PointsToNode(n, es, Arraycopy) {} +}; + class ConnectionGraph: public ResourceObj { private: - GrowableArray _nodes; // Connection graph nodes indexed - // by ideal node index. + GrowableArray _nodes; // Map from ideal nodes to + // ConnectionGraph nodes. - Unique_Node_List _delayed_worklist; // Nodes to be processed before - // the call build_connection_graph(). + GrowableArray _worklist; // Nodes to be processed - GrowableArray _mergemem_worklist; // List of all MergeMem nodes + bool _collecting; // Indicates whether escape information + // is still being collected. If false, + // no new nodes will be processed. - VectorSet _processed; // Records which nodes have been - // processed. + bool _verify; // verify graph - bool _collecting; // Indicates whether escape information - // is still being collected. If false, - // no new nodes will be processed. + JavaObjectNode* phantom_obj; // Unknown object + JavaObjectNode* null_obj; + Node* _pcmp_neq; // ConI(#CC_GT) + Node* _pcmp_eq; // ConI(#CC_EQ) - bool _progress; // Indicates whether new Graph's edges - // were created. + Compile* _compile; // Compile object for current compilation + PhaseIterGVN* _igvn; // Value numbering - uint _phantom_object; // Index of globally escaping object - // that pointer values loaded from - // a field which has not been set - // are assumed to point to. - uint _oop_null; // ConP(#NULL)->_idx - uint _noop_null; // ConN(#NULL)->_idx - Node* _pcmp_neq; // ConI(#CC_GT) - Node* _pcmp_eq; // ConI(#CC_EQ) + Unique_Node_List ideal_nodes; // Used by CG construction and types splitting. - Compile * _compile; // Compile object for current compilation - PhaseIterGVN * _igvn; // Value numbering - // Address of an element in _nodes. Used when the element is to be modified - PointsToNode *ptnode_adr(uint idx) const { + PointsToNode* ptnode_adr(uint idx) const { // There should be no new ideal nodes during ConnectionGraph build, - // growableArray::adr_at() will throw assert otherwise. - return _nodes.adr_at(idx); + // growableArray::at() will throw assert otherwise. + return _nodes.at(idx); } uint nodes_size() const { return _nodes.length(); } - bool is_null_ptr(uint idx) const { return (idx == _noop_null || idx == _oop_null); } + // Add nodes to ConnectionGraph. + void add_local_var(Node* n, PointsToNode::EscapeState es); + void add_java_object(Node* n, PointsToNode::EscapeState es); + void add_field(Node* n, PointsToNode::EscapeState es, int offset); + void add_arraycopy(Node* n, PointsToNode::EscapeState es, PointsToNode* src, PointsToNode* dst); - // Add node to ConnectionGraph. - void add_node(Node *n, PointsToNode::NodeType nt, PointsToNode::EscapeState es, bool done); + // Map ideal node to existing PointsTo node (usually phantom_object). + void map_ideal_node(Node *n, PointsToNode* ptn) { + assert(ptn != NULL, "only existing PointsTo node"); + _nodes.at_put(n->_idx, ptn); + } - // offset of a field reference - int address_offset(Node* adr, PhaseTransform *phase); - // compute the escape state for arguments to a call void process_call_arguments(CallNode *call, PhaseTransform *phase); - // compute the escape state for the return value of a call - void process_call_result(ProjNode *resproj, PhaseTransform *phase); + // add PointsToNode node corresponding to a call + void add_call_node(CallNode* call); - // Populate Connection Graph with Ideal nodes. - void record_for_escape_analysis(Node *n, PhaseTransform *phase); - // Build Connection Graph and set nodes escape state. - void build_connection_graph(Node *n, PhaseTransform *phase); + void build_connection_graph(Node *n, Unique_Node_List *delayed_worklist, PhaseTransform *phase); - // walk the connection graph starting at the node corresponding to "n" and - // add the index of everything it could point to, to "ptset". This may cause - // Phi's encountered to get (re)processed (which requires "phase".) - VectorSet* PointsTo(Node * n); + // Add all references to this JavaObject node. + int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist); - // Reused structures for PointsTo(). - VectorSet pt_ptset; - VectorSet pt_visited; - GrowableArray pt_worklist; + // Put node on worklist if it is (or was) not there. + void add_to_worklist(PointsToNode* pt) { + _worklist.push(pt); + return; + } - // Edge manipulation. The "from_i" and "to_i" arguments are the - // node indices of the source and destination of the edge - void add_pointsto_edge(uint from_i, uint to_i); - void add_deferred_edge(uint from_i, uint to_i); - void add_field_edge(uint from_i, uint to_i, int offs); + // Put on worklist all uses of this node. + void add_uses_to_worklist(PointsToNode* pt) { + uint count = pt->use_count(); + for (uint i = 0; i < count; i++) + _worklist.push(pt->use(i)); + } + // Put on worklist all field's uses and related field nodes. + void add_field_uses_to_worklist(FieldNode* field); + + // Put on worklist all related field nodes. + void add_fields_to_worklist(FieldNode* field, PointsToNode* base); + + // Find fields which have unknown value. + int find_field_value(FieldNode* field); + + // Find fields initializing values for allocations. + int find_init_values(JavaObjectNode* ptn, PointsToNode* init_val, PhaseTransform* phase); + + // Set the escape state of an object and its fields. + void set_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc) { + // Don't change non-escaping state of NULL pointer. + if (ptn != null_obj) { + if (ptn->escape_state() < esc) + ptn->set_escape_state(esc); + if (ptn->fields_escape_state() < esc) + ptn->set_fields_escape_state(esc); + } + } + void set_fields_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc) { + // Don't change non-escaping state of NULL pointer. + if (ptn != null_obj) { + if (ptn->fields_escape_state() < esc) + ptn->set_fields_escape_state(esc); + } + } + + // Propagate GlobalEscape and ArgEscape escape states to all nodes + // and check that we still have non escaped java objects. + bool find_non_escaped_objects(GrowableArray& ptnodes_worklist, + GrowableArray& non_escaped_worklist); + + // Adjust scalar_replaceable state after Connection Graph is built. + void adjust_scalar_replaceable_state(JavaObjectNode* jobj); + + // Optimize objects compare. + Node* optimize_ptr_compare(Node* n); + + // Returns unique corresponding java object or NULL. + JavaObjectNode* unique_java_object(Node *n); + // Add an edge of the specified type pointing to the specified target. - // Set _progress if new edge is added. - void add_edge(PointsToNode *f, uint to_i, PointsToNode::EdgeType et) { - uint e_cnt = f->edge_count(); - f->add_edge(to_i, et); - _progress |= (f->edge_count() != e_cnt); + bool add_edge(PointsToNode* from, PointsToNode* to) { + assert(!from->is_Field() || from->as_Field()->is_oop(), "sanity"); + + if (to == phantom_obj) { + if (from->has_unknown_ptr()) { + return false; // alredy points to phantom_obj + } + from->set_has_unknown_ptr(); + } + + bool is_new = from->add_edge(to); + assert(to != phantom_obj || is_new, "sanity"); + if (is_new) { // New edge? + assert(!_verify, "graph is incomplete"); + is_new = to->add_use(from); + assert(is_new, "use should be also new"); + } + return is_new; } - // Add an edge to node given by "to_i" from any field of adr_i whose offset - // matches "offset" A deferred edge is added if to_i is a LocalVar, and - // a pointsto edge is added if it is a JavaObject - void add_edge_from_fields(uint adr, uint to_i, int offs); + // Add an edge from Field node to its base and back. + bool add_base(FieldNode* from, PointsToNode* to) { + assert(!to->is_Arraycopy(), "sanity"); + if (to == phantom_obj) { + if (from->has_unknown_base()) { + return false; // alredy has phantom_obj base + } + from->set_has_unknown_base(); + } + bool is_new = from->add_base(to); + assert(to != phantom_obj || is_new, "sanity"); + if (is_new) { // New edge? + assert(!_verify, "graph is incomplete"); + if (to == null_obj) + return is_new; // Don't add fields to NULL pointer. + if (to->is_JavaObject()) { + is_new = to->add_edge(from); + } else { + is_new = to->add_base_use(from); + } + assert(is_new, "use should be also new"); + } + return is_new; + } - // Add a deferred edge from node given by "from_i" to any field - // of adr_i whose offset matches "offset" - void add_deferred_edge_to_fields(uint from_i, uint adr, int offs); + // Add LocalVar node and edge if possible + void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to, + Unique_Node_List *delayed_worklist) { + PointsToNode* ptn = ptnode_adr(to->_idx); + if (delayed_worklist != NULL) { // First iteration of CG construction + add_local_var(n, es); + if (ptn == NULL) { + delayed_worklist->push(n); + return; // Process it later. + } + } else { + assert(ptn != NULL, "node should be registered"); + } + add_edge(ptnode_adr(n->_idx), ptn); + } + // Helper functions + bool is_oop_field(Node* n, int offset); + static Node* get_addp_base(Node *addp); + static Node* find_second_addp(Node* addp, Node* n); - // Remove outgoing deferred edges from the node referenced by "ni". - // Any outgoing edges from the target of the deferred edge are copied - // to "ni". - void remove_deferred(uint ni, GrowableArray* deferred_edges, VectorSet* visited); + // offset of a field reference + int address_offset(Node* adr, PhaseTransform *phase); + + // Propagate unique types created for unescaped allocated objects + // through the graph + void split_unique_types(GrowableArray &alloc_worklist); + + // Helper methods for unique types split. + bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn); + + PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn, bool &new_created); + PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn); + + void move_inst_mem(Node* n, GrowableArray &orig_phis, PhaseGVN *igvn); + Node* find_inst_mem(Node* mem, int alias_idx,GrowableArray &orig_phi_worklist, PhaseGVN *igvn); + Node* step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop); + + + GrowableArray _mergemem_worklist; // List of all MergeMem nodes + Node_Array _node_map; // used for bookeeping during type splitting // Used for the following purposes: // Memory Phi - most recent unique Phi split out @@ -320,21 +509,18 @@ // MemNode - new memory input for this node // ChecCastPP - allocation that this is a cast of // allocation - CheckCastPP of the allocation - bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn); - PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn, bool &new_created); - PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn); - void move_inst_mem(Node* n, GrowableArray &orig_phis, PhaseGVN *igvn); - Node *find_inst_mem(Node *mem, int alias_idx,GrowableArray &orig_phi_worklist, PhaseGVN *igvn); - // Propagate unique types created for unescaped allocated objects - // through the graph - void split_unique_types(GrowableArray &alloc_worklist); - // manage entries in _node_map - void set_map(int idx, Node *n) { _node_map.map(idx, n); } - Node *get_map(int idx) { return _node_map[idx]; } - PhiNode *get_map_phi(int idx) { - Node *phi = _node_map[idx]; + + void set_map(Node* from, Node* to) { + ideal_nodes.push(from); + _node_map.map(from->_idx, to); + } + + Node* get_map(int idx) { return _node_map[idx]; } + + PhiNode* get_map_phi(int idx) { + Node* phi = _node_map[idx]; return (phi == NULL) ? NULL : phi->as_Phi(); } @@ -344,23 +530,6 @@ _igvn->add_users_to_worklist(n); } - // Set the escape state of a node - void set_escape_state(uint ni, PointsToNode::EscapeState es); - - // Find fields initializing values for allocations. - void find_init_values(Node* n, VectorSet* visited, PhaseTransform* phase); - - // Adjust escape state after Connection Graph is built. - void adjust_escape_state(Node* n); - - // Propagate escape states to referenced nodes. - bool propagate_escape_state(GrowableArray* cg_worklist, - GrowableArray* worklist, - PointsToNode::EscapeState esc_state); - - // Optimize objects compare. - Node* optimize_ptr_compare(Node* n); - // Compute the escape information bool compute_escape(); @@ -373,11 +542,10 @@ // Perform escape analysis static void do_analysis(Compile *C, PhaseIterGVN *igvn); - // escape state of a node - PointsToNode::EscapeState escape_state(Node *n); + bool not_global_escape(Node *n); #ifndef PRODUCT - void dump(); + void dump(GrowableArray& ptnodes_worklist); #endif }; --- old/src/share/vm/opto/phase.cpp Thu Feb 23 12:08:46 2012 +++ new/src/share/vm/opto/phase.cpp Thu Feb 23 12:08:46 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,8 +39,9 @@ // The next timers used for LogCompilation elapsedTimer Phase::_t_parser; -elapsedTimer Phase::_t_escapeAnalysis; elapsedTimer Phase::_t_optimizer; +elapsedTimer Phase::_t_escapeAnalysis; +elapsedTimer Phase::_t_buildConnectionGraph; elapsedTimer Phase::_t_idealLoop; elapsedTimer Phase::_t_ccp; elapsedTimer Phase::_t_matcher; @@ -51,6 +52,7 @@ elapsedTimer Phase::_t_graphReshaping; elapsedTimer Phase::_t_scheduler; elapsedTimer Phase::_t_blockOrdering; +elapsedTimer Phase::_t_macroEliminate; elapsedTimer Phase::_t_macroExpand; elapsedTimer Phase::_t_peephole; elapsedTimer Phase::_t_codeGeneration; @@ -104,6 +106,8 @@ if (DoEscapeAnalysis) { // EA is part of Optimizer. tty->print_cr (" escape analysis: %3.3f sec", Phase::_t_escapeAnalysis.seconds()); + tty->print_cr (" build connection graph: %3.3f sec", Phase::_t_buildConnectionGraph.seconds()); + tty->print_cr (" macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds()); } tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds()); tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds()); @@ -112,9 +116,10 @@ tty->print_cr (" iterGVN2 : %3.3f sec", Phase::_t_iterGVN2.seconds()); tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds()); tty->print_cr (" graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds()); - double optimizer_subtotal = Phase::_t_iterGVN.seconds() + + double optimizer_subtotal = Phase::_t_iterGVN.seconds() + Phase::_t_iterGVN2.seconds() + + Phase::_t_escapeAnalysis.seconds() + Phase::_t_macroEliminate.seconds() + Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() + - Phase::_t_graphReshaping.seconds(); + Phase::_t_macroExpand.seconds() + Phase::_t_graphReshaping.seconds(); double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0)); tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", optimizer_subtotal, percent_of_optimizer); } --- old/src/share/vm/opto/phase.hpp Thu Feb 23 12:08:47 2012 +++ new/src/share/vm/opto/phase.hpp Thu Feb 23 12:08:47 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,8 +72,12 @@ // The next timers used for LogCompilation static elapsedTimer _t_parser; - static elapsedTimer _t_escapeAnalysis; static elapsedTimer _t_optimizer; +public: + // ConnectionGraph can't be Phase since it is used after EA done. + static elapsedTimer _t_escapeAnalysis; + static elapsedTimer _t_buildConnectionGraph; +protected: static elapsedTimer _t_idealLoop; static elapsedTimer _t_ccp; static elapsedTimer _t_matcher; @@ -84,6 +88,7 @@ static elapsedTimer _t_graphReshaping; static elapsedTimer _t_scheduler; static elapsedTimer _t_blockOrdering; + static elapsedTimer _t_macroEliminate; static elapsedTimer _t_macroExpand; static elapsedTimer _t_peephole; static elapsedTimer _t_codeGeneration; --- old/src/share/vm/utilities/growableArray.hpp Thu Feb 23 12:08:48 2012 +++ new/src/share/vm/utilities/growableArray.hpp Thu Feb 23 12:08:47 2012 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -298,6 +298,14 @@ _len--; } + void delete_at(int index) { + assert(0 <= index && index < _len, "illegal index"); + if (index < --_len) { + // Replace removed element with last one. + _data[index] = _data[_len]; + } + } + // inserts the given element before the element at index i void insert_before(const int idx, const E& elem) { check_nesting();