< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page
rev 52560 : 8213615: GC/C2 abstraction for escape analysis


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"

  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_G1GC
  43 #include "gc/g1/g1ThreadLocalData.hpp"
  44 #endif // INCLUDE_G1GC
  45 #if INCLUDE_ZGC
  46 #include "gc/z/c2/zBarrierSetC2.hpp"
  47 #endif
  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  50   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  51   _in_worklist(C->comp_arena()),
  52   _next_pidx(0),
  53   _collecting(true),
  54   _verify(false),
  55   _compile(C),
  56   _igvn(igvn),
  57   _node_map(C->comp_arena()) {
  58   // Add unknown java object.
  59   add_java_object(C->top(), PointsToNode::GlobalEscape);
  60   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  61   // Add ConP(#NULL) and ConN(#NULL) nodes.
  62   Node* oop_null = igvn->zerocon(T_OBJECT);
  63   assert(oop_null->_idx < nodes_size(), "should be created already");
  64   add_java_object(oop_null, PointsToNode::NoEscape);
  65   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  66   if (UseCompressedOops) {
  67     Node* noop_null = igvn->zerocon(T_NARROWOOP);


 371           return; // Skip uncommon traps
 372       }
 373       // Don't mark as processed since call's arguments have to be processed.
 374       delayed_worklist->push(n);
 375       // Check if a call returns an object.
 376       if ((n->as_Call()->returns_pointer() &&
 377            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 378           (n->is_CallStaticJava() &&
 379            n->as_CallStaticJava()->is_boxing_method())) {
 380         add_call_node(n->as_Call());
 381       }
 382     }
 383     return;
 384   }
 385   // Put this check here to process call arguments since some call nodes
 386   // point to phantom_obj.
 387   if (n_ptn == phantom_obj || n_ptn == null_obj)
 388     return; // Skip predefined nodes.
 389 
 390   int opcode = n->Opcode();




 391   switch (opcode) {
 392     case Op_AddP: {
 393       Node* base = get_addp_base(n);
 394       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 395       // Field nodes are created for all field types. They are used in
 396       // adjust_scalar_replaceable_state() and split_unique_types().
 397       // Note, non-oop fields will have only base edges in Connection
 398       // Graph because such fields are not used for oop loads and stores.
 399       int offset = address_offset(n, igvn);
 400       add_field(n, PointsToNode::NoEscape, offset);
 401       if (ptn_base == NULL) {
 402         delayed_worklist->push(n); // Process it later.
 403       } else {
 404         n_ptn = ptnode_adr(n_idx);
 405         add_base(n_ptn->as_Field(), ptn_base);
 406       }
 407       break;
 408     }
 409     case Op_CastX2P: {
 410       map_ideal_node(n, phantom_obj);


 436       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
 437         es = PointsToNode::NoEscape;
 438       } else {
 439         es = PointsToNode::GlobalEscape;
 440       }
 441       add_java_object(n, es);
 442       break;
 443     }
 444     case Op_CreateEx: {
 445       // assume that all exception objects globally escape
 446       map_ideal_node(n, phantom_obj);
 447       break;
 448     }
 449     case Op_LoadKlass:
 450     case Op_LoadNKlass: {
 451       // Unknown class is loaded
 452       map_ideal_node(n, phantom_obj);
 453       break;
 454     }
 455     case Op_LoadP:
 456 #if INCLUDE_ZGC
 457     case Op_LoadBarrierSlowReg:
 458     case Op_LoadBarrierWeakSlowReg:
 459 #endif
 460     case Op_LoadN:
 461     case Op_LoadPLocked: {
 462       add_objload_to_connection_graph(n, delayed_worklist);
 463       break;
 464     }
 465     case Op_Parm: {
 466       map_ideal_node(n, phantom_obj);
 467       break;
 468     }
 469     case Op_PartialSubtypeCheck: {
 470       // Produces Null or notNull and is used in only in CmpP so
 471       // phantom_obj could be used.
 472       map_ideal_node(n, phantom_obj); // Result is unknown
 473       break;
 474     }
 475     case Op_Phi: {
 476       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 477       // ThreadLocal has RawPtr type.
 478       const Type* t = n->as_Phi()->type();
 479       if (t->make_ptr() != NULL) {
 480         add_local_var(n, PointsToNode::NoEscape);
 481         // Do not add edges during first iteration because some could be
 482         // not defined yet.
 483         delayed_worklist->push(n);
 484       }
 485       break;
 486     }
 487     case Op_Proj: {
 488       // we are only interested in the oop result projection from a call
 489       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 490           n->in(0)->as_Call()->returns_pointer()) {
 491         add_local_var_and_edge(n, PointsToNode::NoEscape,
 492                                n->in(0), delayed_worklist);
 493       }
 494 #if INCLUDE_ZGC
 495       else if (UseZGC) {
 496         if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
 497           add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
 498         }
 499       }
 500 #endif
 501       break;
 502     }
 503     case Op_Rethrow: // Exception object escapes
 504     case Op_Return: {
 505       if (n->req() > TypeFunc::Parms &&
 506           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 507         // Treat Return value as LocalVar with GlobalEscape escape state.
 508         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 509                                n->in(TypeFunc::Parms), delayed_worklist);
 510       }
 511       break;
 512     }
 513     case Op_CompareAndExchangeP:
 514     case Op_CompareAndExchangeN:
 515     case Op_GetAndSetP:
 516     case Op_GetAndSetN: {
 517       add_objload_to_connection_graph(n, delayed_worklist);
 518       // fallthrough
 519     }
 520     case Op_StoreP:
 521     case Op_StoreN:
 522     case Op_StoreNKlass:
 523     case Op_StorePConditional:
 524     case Op_WeakCompareAndSwapP:
 525     case Op_WeakCompareAndSwapN:
 526     case Op_CompareAndSwapP:
 527     case Op_CompareAndSwapN: {
 528       Node* adr = n->in(MemNode::Address);
 529       const Type *adr_type = igvn->type(adr);
 530       adr_type = adr_type->make_ptr();
 531       if (adr_type == NULL) {
 532         break; // skip dead nodes
 533       }
 534       if (   adr_type->isa_oopptr()
 535           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 536               && adr_type == TypeRawPtr::NOTNULL
 537               && adr->in(AddPNode::Address)->is_Proj()
 538               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 539         delayed_worklist->push(n); // Process it later.
 540 #ifdef ASSERT
 541         assert(adr->is_AddP(), "expecting an AddP");
 542         if (adr_type == TypeRawPtr::NOTNULL) {
 543           // Verify a raw address for a store captured by Initialize node.
 544           int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 545           assert(offs != Type::OffsetBot, "offset must be a constant");
 546         }
 547 #endif
 548       } else {
 549         // Ignore copy the displaced header to the BoxNode (OSR compilation).
 550         if (adr->is_BoxLock())
 551           break;
 552         // Stored value escapes in unsafe access.
 553         if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 554           // Pointer stores in G1 barriers looks like unsafe access.
 555           // Ignore such stores to be able scalar replace non-escaping
 556           // allocations.
 557 #if INCLUDE_G1GC
 558           if (UseG1GC && adr->is_AddP()) {
 559             Node* base = get_addp_base(adr);
 560             if (base->Opcode() == Op_LoadP &&
 561                 base->in(MemNode::Address)->is_AddP()) {
 562               adr = base->in(MemNode::Address);
 563               Node* tls = get_addp_base(adr);
 564               if (tls->Opcode() == Op_ThreadLocal) {
 565                 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 566                 if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) {
 567                   break; // G1 pre barrier previous oop value store.
 568                 }
 569                 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
 570                   break; // G1 post barrier card address store.
 571                 }
 572               }
 573             }
 574           }
 575 #endif
 576           delayed_worklist->push(n); // Process unsafe access later.
 577           break;
 578         }
 579 #ifdef ASSERT
 580         n->dump(1);
 581         assert(false, "not unsafe or G1 barrier raw StoreP");
 582 #endif
 583       }
 584       break;
 585     }
 586     case Op_AryEq:
 587     case Op_HasNegatives:
 588     case Op_StrComp:
 589     case Op_StrEquals:
 590     case Op_StrIndexOf:
 591     case Op_StrIndexOfChar:
 592     case Op_StrInflatedCopy:
 593     case Op_StrCompressedCopy:
 594     case Op_EncodeISOArray: {
 595       add_local_var(n, PointsToNode::ArgEscape);
 596       delayed_worklist->push(n); // Process it later.
 597       break;
 598     }
 599     case Op_ThreadLocal: {
 600       add_java_object(n, PointsToNode::ArgEscape);
 601       break;
 602     }
 603     default:


 616 #define ELSE_FAIL(name) \
 617       break;
 618 #endif
 619 
 620 // Add final simple edges to graph.
 621 void ConnectionGraph::add_final_edges(Node *n) {
 622   PointsToNode* n_ptn = ptnode_adr(n->_idx);
 623 #ifdef ASSERT
 624   if (_verify && n_ptn->is_JavaObject())
 625     return; // This method does not change graph for JavaObject.
 626 #endif
 627 
 628   if (n->is_Call()) {
 629     process_call_arguments(n->as_Call());
 630     return;
 631   }
 632   assert(n->is_Store() || n->is_LoadStore() ||
 633          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 634          "node should be registered already");
 635   int opcode = n->Opcode();




 636   switch (opcode) {
 637     case Op_AddP: {
 638       Node* base = get_addp_base(n);
 639       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 640       assert(ptn_base != NULL, "field's base should be registered");
 641       add_base(n_ptn->as_Field(), ptn_base);
 642       break;
 643     }
 644     case Op_CastPP:
 645     case Op_CheckCastPP:
 646     case Op_EncodeP:
 647     case Op_DecodeN:
 648     case Op_EncodePKlass:
 649     case Op_DecodeNKlass: {
 650       add_local_var_and_edge(n, PointsToNode::NoEscape,
 651                              n->in(1), NULL);
 652       break;
 653     }
 654     case Op_CMoveP: {
 655       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 656         Node* in = n->in(i);
 657         if (in == NULL)
 658           continue;  // ignore NULL
 659         Node* uncast_in = in->uncast();
 660         if (uncast_in->is_top() || uncast_in == n)
 661           continue;  // ignore top or inputs which go back this node
 662         PointsToNode* ptn = ptnode_adr(in->_idx);
 663         assert(ptn != NULL, "node should be registered");
 664         add_edge(n_ptn, ptn);
 665       }
 666       break;
 667     }
 668     case Op_LoadP:
 669 #if INCLUDE_ZGC
 670     case Op_LoadBarrierSlowReg:
 671     case Op_LoadBarrierWeakSlowReg:
 672 #endif
 673     case Op_LoadN:
 674     case Op_LoadPLocked: {
 675       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 676       // ThreadLocal has RawPtr type.
 677       const Type* t = _igvn->type(n);
 678       if (t->make_ptr() != NULL) {
 679         Node* adr = n->in(MemNode::Address);
 680         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 681         break;
 682       }
 683       ELSE_FAIL("Op_LoadP");
 684     }
 685     case Op_Phi: {
 686       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 687       // ThreadLocal has RawPtr type.
 688       const Type* t = n->as_Phi()->type();
 689       if (t->make_ptr() != NULL) {
 690         for (uint i = 1; i < n->req(); i++) {
 691           Node* in = n->in(i);
 692           if (in == NULL)
 693             continue;  // ignore NULL
 694           Node* uncast_in = in->uncast();
 695           if (uncast_in->is_top() || uncast_in == n)
 696             continue;  // ignore top or inputs which go back this node
 697           PointsToNode* ptn = ptnode_adr(in->_idx);
 698           assert(ptn != NULL, "node should be registered");
 699           add_edge(n_ptn, ptn);
 700         }
 701         break;
 702       }
 703       ELSE_FAIL("Op_Phi");
 704     }
 705     case Op_Proj: {
 706       // we are only interested in the oop result projection from a call
 707       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 708           n->in(0)->as_Call()->returns_pointer()) {
 709         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 710         break;
 711       }
 712 #if INCLUDE_ZGC
 713       else if (UseZGC) {
 714         if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
 715           add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
 716           break;
 717         }
 718       }
 719 #endif
 720       ELSE_FAIL("Op_Proj");
 721     }
 722     case Op_Rethrow: // Exception object escapes
 723     case Op_Return: {
 724       if (n->req() > TypeFunc::Parms &&
 725           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 726         // Treat Return value as LocalVar with GlobalEscape escape state.
 727         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 728                                n->in(TypeFunc::Parms), NULL);
 729         break;
 730       }
 731       ELSE_FAIL("Op_Return");
 732     }
 733     case Op_StoreP:
 734     case Op_StoreN:
 735     case Op_StoreNKlass:
 736     case Op_StorePConditional:
 737     case Op_CompareAndExchangeP:
 738     case Op_CompareAndExchangeN:
 739     case Op_CompareAndSwapP:
 740     case Op_CompareAndSwapN:
 741     case Op_WeakCompareAndSwapP:
 742     case Op_WeakCompareAndSwapN:
 743     case Op_GetAndSetP:
 744     case Op_GetAndSetN: {
 745       Node* adr = n->in(MemNode::Address);
 746       const Type *adr_type = _igvn->type(adr);
 747       adr_type = adr_type->make_ptr();
 748 #ifdef ASSERT
 749       if (adr_type == NULL) {
 750         n->dump(1);
 751         assert(adr_type != NULL, "dead node should not be on list");
 752         break;
 753       }
 754 #endif
 755       if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
 756           opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
 757         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 758       }
 759       if (   adr_type->isa_oopptr()
 760           || (   (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 761               && adr_type == TypeRawPtr::NOTNULL
 762               && adr->in(AddPNode::Address)->is_Proj()
 763               && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 764         // Point Address to Value
 765         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 766         assert(adr_ptn != NULL &&
 767                adr_ptn->as_Field()->is_oop(), "node should be registered");
 768         Node *val = n->in(MemNode::ValueIn);
 769         PointsToNode* ptn = ptnode_adr(val->_idx);
 770         assert(ptn != NULL, "node should be registered");
 771         add_edge(adr_ptn, ptn);
 772         break;
 773       } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 774         // Stored value escapes in unsafe access.
 775         Node *val = n->in(MemNode::ValueIn);
 776         PointsToNode* ptn = ptnode_adr(val->_idx);
 777         assert(ptn != NULL, "node should be registered");
 778         set_escape_state(ptn, PointsToNode::GlobalEscape);
 779         // Add edge to object for unsafe access with offset.
 780         PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 781         assert(adr_ptn != NULL, "node should be registered");
 782         if (adr_ptn->is_Field()) {
 783           assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 784           add_edge(adr_ptn, ptn);
 785         }
 786         break;
 787       }
 788       ELSE_FAIL("Op_StoreP");
 789     }
 790     case Op_AryEq:
 791     case Op_HasNegatives:
 792     case Op_StrComp:
 793     case Op_StrEquals:
 794     case Op_StrIndexOf:
 795     case Op_StrIndexOfChar:
 796     case Op_StrInflatedCopy:
 797     case Op_StrCompressedCopy:
 798     case Op_EncodeISOArray: {
 799       // char[]/byte[] arrays passed to string intrinsic do not escape but
 800       // they are not scalar replaceable. Adjust escape state for them.
 801       // Start from in(2) edge since in(1) is memory edge.
 802       for (uint i = 2; i < n->req(); i++) {
 803         Node* adr = n->in(i);
 804         const Type* at = _igvn->type(adr);
 805         if (!adr->is_top() && at->isa_ptr()) {


 810           }
 811           PointsToNode* ptn = ptnode_adr(adr->_idx);
 812           assert(ptn != NULL, "node should be registered");
 813           add_edge(n_ptn, ptn);
 814         }
 815       }
 816       break;
 817     }
 818     default: {
 819       // This method should be called only for EA specific nodes which may
 820       // miss some edges when they were created.
 821 #ifdef ASSERT
 822       n->dump(1);
 823 #endif
 824       guarantee(false, "unknown node");
 825     }
 826   }
 827   return;
 828 }
 829 























































































 830 void ConnectionGraph::add_call_node(CallNode* call) {
 831   assert(call->returns_pointer(), "only for call which returns pointer");
 832   uint call_idx = call->_idx;
 833   if (call->is_Allocate()) {
 834     Node* k = call->in(AllocateNode::KlassNode);
 835     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 836     assert(kt != NULL, "TypeKlassPtr  required.");
 837     ciKlass* cik = kt->klass();
 838     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 839     bool scalar_replaceable = true;
 840     if (call->is_AllocateArray()) {
 841       if (!cik->is_array_klass()) { // StressReflectiveCode
 842         es = PointsToNode::GlobalEscape;
 843       } else {
 844         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 845         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
 846           // Not scalar replaceable if the length is not constant or too big.
 847           scalar_replaceable = false;
 848         }
 849       }


2083   BasicType bt = T_INT;
2084   if (offset == Type::OffsetBot) {
2085     // Check only oop fields.
2086     if (!adr_type->isa_aryptr() ||
2087         (adr_type->isa_aryptr()->klass() == NULL) ||
2088          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2089       // OffsetBot is used to reference array's element. Ignore first AddP.
2090       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2091         bt = T_OBJECT;
2092       }
2093     }
2094   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2095     if (adr_type->isa_instptr()) {
2096       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2097       if (field != NULL) {
2098         bt = field->layout_type();
2099       } else {
2100         // Check for unsafe oop field access
2101         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2102             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2103             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {

2104           bt = T_OBJECT;
2105           (*unsafe) = true;
2106         }
2107       }
2108     } else if (adr_type->isa_aryptr()) {
2109       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2110         // Ignore array length load.
2111       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2112         // Ignore first AddP.
2113       } else {
2114         const Type* elemtype = adr_type->isa_aryptr()->elem();
2115         bt = elemtype->array_element_basic_type();
2116       }
2117     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2118       // Allocation initialization, ThreadLocal field access, unsafe access
2119       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2120           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2121           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {

2122         bt = T_OBJECT;
2123       }
2124     }
2125   }
2126   return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2127 }
2128 
2129 // Returns unique pointed java object or NULL.
2130 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2131   assert(!_collecting, "should not call when contructed graph");
2132   // If the node was created after the escape computation we can't answer.
2133   uint idx = n->_idx;
2134   if (idx >= nodes_size()) {
2135     return NULL;
2136   }
2137   PointsToNode* ptn = ptnode_adr(idx);
2138   if (ptn->is_JavaObject()) {
2139     return ptn->as_JavaObject();
2140   }
2141   assert(ptn->is_LocalVar(), "sanity");


2342   //     AddP  ( base == top )
2343   //
2344   Node *base = addp->in(AddPNode::Base);
2345   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2346     base = addp->in(AddPNode::Address);
2347     while (base->is_AddP()) {
2348       // Case #6 (unsafe access) may have several chained AddP nodes.
2349       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2350       base = base->in(AddPNode::Address);
2351     }
2352     if (base->Opcode() == Op_CheckCastPP &&
2353         base->bottom_type()->isa_rawptr() &&
2354         _igvn->type(base->in(1))->isa_oopptr()) {
2355       base = base->in(1); // Case #9
2356     } else {
2357       Node* uncast_base = base->uncast();
2358       int opcode = uncast_base->Opcode();
2359       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2360              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2361              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2362              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");

2363     }
2364   }
2365   return base;
2366 }
2367 
2368 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2369   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2370   Node* addp2 = addp->raw_out(0);
2371   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2372       addp2->in(AddPNode::Base) == n &&
2373       addp2->in(AddPNode::Address) == addp) {
2374     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2375     //
2376     // Find array's offset to push it on worklist first and
2377     // as result process an array's element offset first (pushed second)
2378     // to avoid CastPP for the array's offset.
2379     // Otherwise the inserted CastPP (LocalVar) will point to what
2380     // the AddP (Field) points to. Which would be wrong since
2381     // the algorithm expects the CastPP has the same point as
2382     // as AddP's base CheckCastPP (LocalVar).


3075           }
3076         }
3077       }
3078     } else if (n->is_AddP()) {
3079       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3080       if (jobj == NULL || jobj == phantom_obj) {
3081 #ifdef ASSERT
3082         ptnode_adr(get_addp_base(n)->_idx)->dump();
3083         ptnode_adr(n->_idx)->dump();
3084         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3085 #endif
3086         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3087         return;
3088       }
3089       Node *base = get_map(jobj->idx());  // CheckCastPP node
3090       if (!split_AddP(n, base)) continue; // wrong type from dead path
3091     } else if (n->is_Phi() ||
3092                n->is_CheckCastPP() ||
3093                n->is_EncodeP() ||
3094                n->is_DecodeN() ||

3095                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3096       if (visited.test_set(n->_idx)) {
3097         assert(n->is_Phi(), "loops only through Phi's");
3098         continue;  // already processed
3099       }
3100       JavaObjectNode* jobj = unique_java_object(n);
3101       if (jobj == NULL || jobj == phantom_obj) {
3102 #ifdef ASSERT
3103         ptnode_adr(n->_idx)->dump();
3104         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3105 #endif
3106         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3107         return;
3108       } else {
3109         Node *val = get_map(jobj->idx());   // CheckCastPP node
3110         TypeNode *tn = n->as_Type();
3111         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3112         assert(tinst != NULL && tinst->is_known_instance() &&
3113                tinst->instance_id() == jobj->idx() , "instance type expected.");
3114 


3145     // push allocation's users on appropriate worklist
3146     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3147       Node *use = n->fast_out(i);
3148       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3149         // Load/store to instance's field
3150         memnode_worklist.append_if_missing(use);
3151       } else if (use->is_MemBar()) {
3152         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3153           memnode_worklist.append_if_missing(use);
3154         }
3155       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3156         Node* addp2 = find_second_addp(use, n);
3157         if (addp2 != NULL) {
3158           alloc_worklist.append_if_missing(addp2);
3159         }
3160         alloc_worklist.append_if_missing(use);
3161       } else if (use->is_Phi() ||
3162                  use->is_CheckCastPP() ||
3163                  use->is_EncodeNarrowPtr() ||
3164                  use->is_DecodeNarrowPtr() ||

3165                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3166         alloc_worklist.append_if_missing(use);
3167 #ifdef ASSERT
3168       } else if (use->is_Mem()) {
3169         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3170       } else if (use->is_MergeMem()) {
3171         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3172       } else if (use->is_SafePoint()) {
3173         // Look for MergeMem nodes for calls which reference unique allocation
3174         // (through CheckCastPP nodes) even for debug info.
3175         Node* m = use->in(TypeFunc::Memory);
3176         if (m->is_MergeMem()) {
3177           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3178         }
3179       } else if (use->Opcode() == Op_EncodeISOArray) {
3180         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3181           // EncodeISOArray overwrites destination array
3182           memnode_worklist.append_if_missing(use);
3183         }
3184       } else {


3547         tty->print("======== Connection graph for ");
3548         _compile->method()->print_short_name();
3549         tty->cr();
3550         first = false;
3551       }
3552       ptn->dump();
3553       // Print all locals and fields which reference this allocation
3554       for (UseIterator j(ptn); j.has_next(); j.next()) {
3555         PointsToNode* use = j.get();
3556         if (use->is_LocalVar()) {
3557           use->dump(Verbose);
3558         } else if (Verbose) {
3559           use->dump();
3560         }
3561       }
3562       tty->cr();
3563     }
3564   }
3565 }
3566 #endif







   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "utilities/macros.hpp"






  43 
  44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
  45   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  46   _in_worklist(C->comp_arena()),
  47   _next_pidx(0),
  48   _collecting(true),
  49   _verify(false),
  50   _compile(C),
  51   _igvn(igvn),
  52   _node_map(C->comp_arena()) {
  53   // Add unknown java object.
  54   add_java_object(C->top(), PointsToNode::GlobalEscape);
  55   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  56   // Add ConP(#NULL) and ConN(#NULL) nodes.
  57   Node* oop_null = igvn->zerocon(T_OBJECT);
  58   assert(oop_null->_idx < nodes_size(), "should be created already");
  59   add_java_object(oop_null, PointsToNode::NoEscape);
  60   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  61   if (UseCompressedOops) {
  62     Node* noop_null = igvn->zerocon(T_NARROWOOP);


 366           return; // Skip uncommon traps
 367       }
 368       // Don't mark as processed since call's arguments have to be processed.
 369       delayed_worklist->push(n);
 370       // Check if a call returns an object.
 371       if ((n->as_Call()->returns_pointer() &&
 372            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 373           (n->is_CallStaticJava() &&
 374            n->as_CallStaticJava()->is_boxing_method())) {
 375         add_call_node(n->as_Call());
 376       }
 377     }
 378     return;
 379   }
 380   // Put this check here to process call arguments since some call nodes
 381   // point to phantom_obj.
 382   if (n_ptn == phantom_obj || n_ptn == null_obj)
 383     return; // Skip predefined nodes.
 384 
 385   int opcode = n->Opcode();
 386   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
 387   if (gc_handled) {
 388     return; // Ignore node if already handled by GC.
 389   }
 390   switch (opcode) {
 391     case Op_AddP: {
 392       Node* base = get_addp_base(n);
 393       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 394       // Field nodes are created for all field types. They are used in
 395       // adjust_scalar_replaceable_state() and split_unique_types().
 396       // Note, non-oop fields will have only base edges in Connection
 397       // Graph because such fields are not used for oop loads and stores.
 398       int offset = address_offset(n, igvn);
 399       add_field(n, PointsToNode::NoEscape, offset);
 400       if (ptn_base == NULL) {
 401         delayed_worklist->push(n); // Process it later.
 402       } else {
 403         n_ptn = ptnode_adr(n_idx);
 404         add_base(n_ptn->as_Field(), ptn_base);
 405       }
 406       break;
 407     }
 408     case Op_CastX2P: {
 409       map_ideal_node(n, phantom_obj);


 435       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
 436         es = PointsToNode::NoEscape;
 437       } else {
 438         es = PointsToNode::GlobalEscape;
 439       }
 440       add_java_object(n, es);
 441       break;
 442     }
 443     case Op_CreateEx: {
 444       // assume that all exception objects globally escape
 445       map_ideal_node(n, phantom_obj);
 446       break;
 447     }
 448     case Op_LoadKlass:
 449     case Op_LoadNKlass: {
 450       // Unknown class is loaded
 451       map_ideal_node(n, phantom_obj);
 452       break;
 453     }
 454     case Op_LoadP:




 455     case Op_LoadN:
 456     case Op_LoadPLocked: {
 457       add_objload_to_connection_graph(n, delayed_worklist);
 458       break;
 459     }
 460     case Op_Parm: {
 461       map_ideal_node(n, phantom_obj);
 462       break;
 463     }
 464     case Op_PartialSubtypeCheck: {
 465       // Produces Null or notNull and is used in only in CmpP so
 466       // phantom_obj could be used.
 467       map_ideal_node(n, phantom_obj); // Result is unknown
 468       break;
 469     }
 470     case Op_Phi: {
 471       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 472       // ThreadLocal has RawPtr type.
 473       const Type* t = n->as_Phi()->type();
 474       if (t->make_ptr() != NULL) {
 475         add_local_var(n, PointsToNode::NoEscape);
 476         // Do not add edges during first iteration because some could be
 477         // not defined yet.
 478         delayed_worklist->push(n);
 479       }
 480       break;
 481     }
 482     case Op_Proj: {
 483       // we are only interested in the oop result projection from a call
 484       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 485           n->in(0)->as_Call()->returns_pointer()) {
 486         add_local_var_and_edge(n, PointsToNode::NoEscape,
 487                                n->in(0), delayed_worklist);
 488       }







 489       break;
 490     }
 491     case Op_Rethrow: // Exception object escapes
 492     case Op_Return: {
 493       if (n->req() > TypeFunc::Parms &&
 494           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 495         // Treat Return value as LocalVar with GlobalEscape escape state.
 496         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 497                                n->in(TypeFunc::Parms), delayed_worklist);
 498       }
 499       break;
 500     }
 501     case Op_CompareAndExchangeP:
 502     case Op_CompareAndExchangeN:
 503     case Op_GetAndSetP:
 504     case Op_GetAndSetN: {
 505       add_objload_to_connection_graph(n, delayed_worklist);
 506       // fallthrough
 507     }
 508     case Op_StoreP:
 509     case Op_StoreN:
 510     case Op_StoreNKlass:
 511     case Op_StorePConditional:
 512     case Op_WeakCompareAndSwapP:
 513     case Op_WeakCompareAndSwapN:
 514     case Op_CompareAndSwapP:
 515     case Op_CompareAndSwapN: {
 516       add_to_congraph_unsafe_access(n, opcode, delayed_worklist);























































 517       break;
 518     }
 519     case Op_AryEq:
 520     case Op_HasNegatives:
 521     case Op_StrComp:
 522     case Op_StrEquals:
 523     case Op_StrIndexOf:
 524     case Op_StrIndexOfChar:
 525     case Op_StrInflatedCopy:
 526     case Op_StrCompressedCopy:
 527     case Op_EncodeISOArray: {
 528       add_local_var(n, PointsToNode::ArgEscape);
 529       delayed_worklist->push(n); // Process it later.
 530       break;
 531     }
 532     case Op_ThreadLocal: {
 533       add_java_object(n, PointsToNode::ArgEscape);
 534       break;
 535     }
 536     default:


 549 #define ELSE_FAIL(name) \
 550       break;
 551 #endif
 552 
 553 // Add final simple edges to graph.
 554 void ConnectionGraph::add_final_edges(Node *n) {
 555   PointsToNode* n_ptn = ptnode_adr(n->_idx);
 556 #ifdef ASSERT
 557   if (_verify && n_ptn->is_JavaObject())
 558     return; // This method does not change graph for JavaObject.
 559 #endif
 560 
 561   if (n->is_Call()) {
 562     process_call_arguments(n->as_Call());
 563     return;
 564   }
 565   assert(n->is_Store() || n->is_LoadStore() ||
 566          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 567          "node should be registered already");
 568   int opcode = n->Opcode();
 569   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 570   if (gc_handled) {
 571     return; // Ignore node if already handled by GC.
 572   }
 573   switch (opcode) {
 574     case Op_AddP: {
 575       Node* base = get_addp_base(n);
 576       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 577       assert(ptn_base != NULL, "field's base should be registered");
 578       add_base(n_ptn->as_Field(), ptn_base);
 579       break;
 580     }
 581     case Op_CastPP:
 582     case Op_CheckCastPP:
 583     case Op_EncodeP:
 584     case Op_DecodeN:
 585     case Op_EncodePKlass:
 586     case Op_DecodeNKlass: {
 587       add_local_var_and_edge(n, PointsToNode::NoEscape,
 588                              n->in(1), NULL);
 589       break;
 590     }
 591     case Op_CMoveP: {
 592       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 593         Node* in = n->in(i);
 594         if (in == NULL)
 595           continue;  // ignore NULL
 596         Node* uncast_in = in->uncast();
 597         if (uncast_in->is_top() || uncast_in == n)
 598           continue;  // ignore top or inputs which go back this node
 599         PointsToNode* ptn = ptnode_adr(in->_idx);
 600         assert(ptn != NULL, "node should be registered");
 601         add_edge(n_ptn, ptn);
 602       }
 603       break;
 604     }
 605     case Op_LoadP:




 606     case Op_LoadN:
 607     case Op_LoadPLocked: {
 608       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 609       // ThreadLocal has RawPtr type.
 610       const Type* t = _igvn->type(n);
 611       if (t->make_ptr() != NULL) {
 612         Node* adr = n->in(MemNode::Address);
 613         add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 614         break;
 615       }
 616       ELSE_FAIL("Op_LoadP");
 617     }
 618     case Op_Phi: {
 619       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 620       // ThreadLocal has RawPtr type.
 621       const Type* t = n->as_Phi()->type();
 622       if (t->make_ptr() != NULL) {
 623         for (uint i = 1; i < n->req(); i++) {
 624           Node* in = n->in(i);
 625           if (in == NULL)
 626             continue;  // ignore NULL
 627           Node* uncast_in = in->uncast();
 628           if (uncast_in->is_top() || uncast_in == n)
 629             continue;  // ignore top or inputs which go back this node
 630           PointsToNode* ptn = ptnode_adr(in->_idx);
 631           assert(ptn != NULL, "node should be registered");
 632           add_edge(n_ptn, ptn);
 633         }
 634         break;
 635       }
 636       ELSE_FAIL("Op_Phi");
 637     }
 638     case Op_Proj: {
 639       // we are only interested in the oop result projection from a call
 640       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 641           n->in(0)->as_Call()->returns_pointer()) {
 642         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 643         break;
 644       }








 645       ELSE_FAIL("Op_Proj");
 646     }
 647     case Op_Rethrow: // Exception object escapes
 648     case Op_Return: {
 649       if (n->req() > TypeFunc::Parms &&
 650           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 651         // Treat Return value as LocalVar with GlobalEscape escape state.
 652         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 653                                n->in(TypeFunc::Parms), NULL);
 654         break;
 655       }
 656       ELSE_FAIL("Op_Return");
 657     }
 658     case Op_StoreP:
 659     case Op_StoreN:
 660     case Op_StoreNKlass:
 661     case Op_StorePConditional:
 662     case Op_CompareAndExchangeP:
 663     case Op_CompareAndExchangeN:
 664     case Op_CompareAndSwapP:
 665     case Op_CompareAndSwapN:
 666     case Op_WeakCompareAndSwapP:
 667     case Op_WeakCompareAndSwapN:
 668     case Op_GetAndSetP:
 669     case Op_GetAndSetN: {
 670       if (add_final_edges_unsafe_access(n, opcode)) {








































 671         break;
 672       }
 673       ELSE_FAIL("Op_StoreP");
 674     }
 675     case Op_AryEq:
 676     case Op_HasNegatives:
 677     case Op_StrComp:
 678     case Op_StrEquals:
 679     case Op_StrIndexOf:
 680     case Op_StrIndexOfChar:
 681     case Op_StrInflatedCopy:
 682     case Op_StrCompressedCopy:
 683     case Op_EncodeISOArray: {
 684       // char[]/byte[] arrays passed to string intrinsic do not escape but
 685       // they are not scalar replaceable. Adjust escape state for them.
 686       // Start from in(2) edge since in(1) is memory edge.
 687       for (uint i = 2; i < n->req(); i++) {
 688         Node* adr = n->in(i);
 689         const Type* at = _igvn->type(adr);
 690         if (!adr->is_top() && at->isa_ptr()) {


 695           }
 696           PointsToNode* ptn = ptnode_adr(adr->_idx);
 697           assert(ptn != NULL, "node should be registered");
 698           add_edge(n_ptn, ptn);
 699         }
 700       }
 701       break;
 702     }
 703     default: {
 704       // This method should be called only for EA specific nodes which may
 705       // miss some edges when they were created.
 706 #ifdef ASSERT
 707       n->dump(1);
 708 #endif
 709       guarantee(false, "unknown node");
 710     }
 711   }
 712   return;
 713 }
 714 
 715 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
 716   Node* adr = n->in(MemNode::Address);
 717   const Type* adr_type = _igvn->type(adr);
 718   adr_type = adr_type->make_ptr();
 719   if (adr_type == NULL) {
 720     return; // skip dead nodes
 721   }
 722   if (adr_type->isa_oopptr()
 723       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 724           && adr_type == TypeRawPtr::NOTNULL
 725           && adr->in(AddPNode::Address)->is_Proj()
 726           && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 727     delayed_worklist->push(n); // Process it later.
 728 #ifdef ASSERT
 729     assert (adr->is_AddP(), "expecting an AddP");
 730     if (adr_type == TypeRawPtr::NOTNULL) {
 731       // Verify a raw address for a store captured by Initialize node.
 732       int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 733       assert(offs != Type::OffsetBot, "offset must be a constant");
 734     }
 735 #endif
 736   } else {
 737     // Ignore copy the displaced header to the BoxNode (OSR compilation).
 738     if (adr->is_BoxLock()) {
 739       return;
 740     }
 741     // Stored value escapes in unsafe access.
 742     if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 743       delayed_worklist->push(n); // Process unsafe access later.
 744       return;
 745     }
 746 #ifdef ASSERT
 747     n->dump(1);
 748     assert(false, "not unsafe");
 749 #endif
 750   }
 751 }
 752 
 753 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
 754   Node* adr = n->in(MemNode::Address);
 755   const Type *adr_type = _igvn->type(adr);
 756   adr_type = adr_type->make_ptr();
 757 #ifdef ASSERT
 758   if (adr_type == NULL) {
 759     n->dump(1);
 760     assert(adr_type != NULL, "dead node should not be on list");
 761     return true;
 762   }
 763 #endif
 764 
 765   if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
 766       opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
 767     add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
 768   }
 769 
 770   if (adr_type->isa_oopptr()
 771       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 772            && adr_type == TypeRawPtr::NOTNULL
 773            && adr->in(AddPNode::Address)->is_Proj()
 774            && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
 775     // Point Address to Value
 776     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 777     assert(adr_ptn != NULL &&
 778            adr_ptn->as_Field()->is_oop(), "node should be registered");
 779     Node* val = n->in(MemNode::ValueIn);
 780     PointsToNode* ptn = ptnode_adr(val->_idx);
 781     assert(ptn != NULL, "node should be registered");
 782     add_edge(adr_ptn, ptn);
 783     return true;
 784   } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 785     // Stored value escapes in unsafe access.
 786     Node* val = n->in(MemNode::ValueIn);
 787     PointsToNode* ptn = ptnode_adr(val->_idx);
 788     assert(ptn != NULL, "node should be registered");
 789     set_escape_state(ptn, PointsToNode::GlobalEscape);
 790     // Add edge to object for unsafe access with offset.
 791     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 792     assert(adr_ptn != NULL, "node should be registered");
 793     if (adr_ptn->is_Field()) {
 794       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 795       add_edge(adr_ptn, ptn);
 796     }
 797     return true;
 798   }
 799   return false;
 800 }
 801 
 802 void ConnectionGraph::add_call_node(CallNode* call) {
 803   assert(call->returns_pointer(), "only for call which returns pointer");
 804   uint call_idx = call->_idx;
 805   if (call->is_Allocate()) {
 806     Node* k = call->in(AllocateNode::KlassNode);
 807     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 808     assert(kt != NULL, "TypeKlassPtr  required.");
 809     ciKlass* cik = kt->klass();
 810     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 811     bool scalar_replaceable = true;
 812     if (call->is_AllocateArray()) {
 813       if (!cik->is_array_klass()) { // StressReflectiveCode
 814         es = PointsToNode::GlobalEscape;
 815       } else {
 816         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 817         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
 818           // Not scalar replaceable if the length is not constant or too big.
 819           scalar_replaceable = false;
 820         }
 821       }


2055   BasicType bt = T_INT;
2056   if (offset == Type::OffsetBot) {
2057     // Check only oop fields.
2058     if (!adr_type->isa_aryptr() ||
2059         (adr_type->isa_aryptr()->klass() == NULL) ||
2060          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2061       // OffsetBot is used to reference array's element. Ignore first AddP.
2062       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2063         bt = T_OBJECT;
2064       }
2065     }
2066   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2067     if (adr_type->isa_instptr()) {
2068       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2069       if (field != NULL) {
2070         bt = field->layout_type();
2071       } else {
2072         // Check for unsafe oop field access
2073         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2074             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2075             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2076             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2077           bt = T_OBJECT;
2078           (*unsafe) = true;
2079         }
2080       }
2081     } else if (adr_type->isa_aryptr()) {
2082       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2083         // Ignore array length load.
2084       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2085         // Ignore first AddP.
2086       } else {
2087         const Type* elemtype = adr_type->isa_aryptr()->elem();
2088         bt = elemtype->array_element_basic_type();
2089       }
2090     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2091       // Allocation initialization, ThreadLocal field access, unsafe access
2092       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2093           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2094           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2095           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2096         bt = T_OBJECT;
2097       }
2098     }
2099   }
2100   return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2101 }
2102 
2103 // Returns unique pointed java object or NULL.
2104 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2105   assert(!_collecting, "should not call when contructed graph");
2106   // If the node was created after the escape computation we can't answer.
2107   uint idx = n->_idx;
2108   if (idx >= nodes_size()) {
2109     return NULL;
2110   }
2111   PointsToNode* ptn = ptnode_adr(idx);
2112   if (ptn->is_JavaObject()) {
2113     return ptn->as_JavaObject();
2114   }
2115   assert(ptn->is_LocalVar(), "sanity");


2316   //     AddP  ( base == top )
2317   //
2318   Node *base = addp->in(AddPNode::Base);
2319   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2320     base = addp->in(AddPNode::Address);
2321     while (base->is_AddP()) {
2322       // Case #6 (unsafe access) may have several chained AddP nodes.
2323       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2324       base = base->in(AddPNode::Address);
2325     }
2326     if (base->Opcode() == Op_CheckCastPP &&
2327         base->bottom_type()->isa_rawptr() &&
2328         _igvn->type(base->in(1))->isa_oopptr()) {
2329       base = base->in(1); // Case #9
2330     } else {
2331       Node* uncast_base = base->uncast();
2332       int opcode = uncast_base->Opcode();
2333       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2334              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2335              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2336              (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||
2337              BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(uncast_base), "sanity");
2338     }
2339   }
2340   return base;
2341 }
2342 
2343 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2344   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2345   Node* addp2 = addp->raw_out(0);
2346   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2347       addp2->in(AddPNode::Base) == n &&
2348       addp2->in(AddPNode::Address) == addp) {
2349     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2350     //
2351     // Find array's offset to push it on worklist first and
2352     // as result process an array's element offset first (pushed second)
2353     // to avoid CastPP for the array's offset.
2354     // Otherwise the inserted CastPP (LocalVar) will point to what
2355     // the AddP (Field) points to. Which would be wrong since
2356     // the algorithm expects the CastPP has the same point as
2357     // as AddP's base CheckCastPP (LocalVar).


3050           }
3051         }
3052       }
3053     } else if (n->is_AddP()) {
3054       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3055       if (jobj == NULL || jobj == phantom_obj) {
3056 #ifdef ASSERT
3057         ptnode_adr(get_addp_base(n)->_idx)->dump();
3058         ptnode_adr(n->_idx)->dump();
3059         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3060 #endif
3061         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3062         return;
3063       }
3064       Node *base = get_map(jobj->idx());  // CheckCastPP node
3065       if (!split_AddP(n, base)) continue; // wrong type from dead path
3066     } else if (n->is_Phi() ||
3067                n->is_CheckCastPP() ||
3068                n->is_EncodeP() ||
3069                n->is_DecodeN() ||
3070                BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(n) ||
3071                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3072       if (visited.test_set(n->_idx)) {
3073         assert(n->is_Phi(), "loops only through Phi's");
3074         continue;  // already processed
3075       }
3076       JavaObjectNode* jobj = unique_java_object(n);
3077       if (jobj == NULL || jobj == phantom_obj) {
3078 #ifdef ASSERT
3079         ptnode_adr(n->_idx)->dump();
3080         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3081 #endif
3082         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3083         return;
3084       } else {
3085         Node *val = get_map(jobj->idx());   // CheckCastPP node
3086         TypeNode *tn = n->as_Type();
3087         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3088         assert(tinst != NULL && tinst->is_known_instance() &&
3089                tinst->instance_id() == jobj->idx() , "instance type expected.");
3090 


3121     // push allocation's users on appropriate worklist
3122     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3123       Node *use = n->fast_out(i);
3124       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3125         // Load/store to instance's field
3126         memnode_worklist.append_if_missing(use);
3127       } else if (use->is_MemBar()) {
3128         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3129           memnode_worklist.append_if_missing(use);
3130         }
3131       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3132         Node* addp2 = find_second_addp(use, n);
3133         if (addp2 != NULL) {
3134           alloc_worklist.append_if_missing(addp2);
3135         }
3136         alloc_worklist.append_if_missing(use);
3137       } else if (use->is_Phi() ||
3138                  use->is_CheckCastPP() ||
3139                  use->is_EncodeNarrowPtr() ||
3140                  use->is_DecodeNarrowPtr() ||
3141                  BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(use) ||
3142                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3143         alloc_worklist.append_if_missing(use);
3144 #ifdef ASSERT
3145       } else if (use->is_Mem()) {
3146         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3147       } else if (use->is_MergeMem()) {
3148         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3149       } else if (use->is_SafePoint()) {
3150         // Look for MergeMem nodes for calls which reference unique allocation
3151         // (through CheckCastPP nodes) even for debug info.
3152         Node* m = use->in(TypeFunc::Memory);
3153         if (m->is_MergeMem()) {
3154           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3155         }
3156       } else if (use->Opcode() == Op_EncodeISOArray) {
3157         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3158           // EncodeISOArray overwrites destination array
3159           memnode_worklist.append_if_missing(use);
3160         }
3161       } else {


3524         tty->print("======== Connection graph for ");
3525         _compile->method()->print_short_name();
3526         tty->cr();
3527         first = false;
3528       }
3529       ptn->dump();
3530       // Print all locals and fields which reference this allocation
3531       for (UseIterator j(ptn); j.has_next(); j.next()) {
3532         PointsToNode* use = j.get();
3533         if (use->is_LocalVar()) {
3534           use->dump(Verbose);
3535         } else if (Verbose) {
3536           use->dump();
3537         }
3538       }
3539       tty->cr();
3540     }
3541   }
3542 }
3543 #endif
3544 
3545 void ConnectionGraph::record_for_optimizer(Node *n) {
3546   _igvn->_worklist.push(n);
3547   _igvn->add_users_to_worklist(n);
3548 }
< prev index next >