124 GrowableArray<JavaObjectNode*> non_escaped_worklist;
125 GrowableArray<FieldNode*> oop_fields_worklist;
126 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
127
128 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
129
130 // 1. Populate Connection Graph (CG) with PointsTo nodes.
131 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
132 // Initialize worklist
133 if (C->root() != NULL) {
134 ideal_nodes.push(C->root());
135 }
136 // Processed ideal nodes are unique on ideal_nodes list
137 // but several ideal nodes are mapped to the phantom_obj.
138 // To avoid duplicated entries on the following worklists
139 // add the phantom_obj only once to them.
140 ptnodes_worklist.append(phantom_obj);
141 java_objects_worklist.append(phantom_obj);
142 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
143 Node* n = ideal_nodes.at(next);
144 // Create PointsTo nodes and add them to Connection Graph. Called
145 // only once per ideal node since ideal_nodes is Unique_Node list.
146 add_node_to_connection_graph(n, &delayed_worklist);
147 PointsToNode* ptn = ptnode_adr(n->_idx);
148 if (ptn != NULL && ptn != phantom_obj) {
149 ptnodes_worklist.append(ptn);
150 if (ptn->is_JavaObject()) {
151 java_objects_worklist.append(ptn->as_JavaObject());
152 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
153 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
154 // Only allocations and java static calls results are interesting.
155 non_escaped_worklist.append(ptn->as_JavaObject());
156 }
157 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
158 oop_fields_worklist.append(ptn->as_Field());
159 }
160 }
161 if (n->is_MergeMem()) {
162 // Collect all MergeMem nodes to add memory slices for
163 // scalar replaceable objects in split_unique_types().
164 _mergemem_worklist.append(n->as_MergeMem());
165 } else if (OptimizePtrCompare && n->is_Cmp() &&
166 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
167 // Collect compare pointers nodes.
168 ptr_cmp_worklist.append(n);
169 } else if (n->is_MemBarStoreStore()) {
170 // Collect all MemBarStoreStore nodes so that depending on the
171 // escape status of the associated Allocate node some of them
172 // may be eliminated.
173 storestore_worklist.append(n);
174 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
175 (n->req() > MemBarNode::Precedent)) {
176 record_for_optimizer(n);
177 #ifdef ASSERT
178 } else if (n->is_AddP()) {
179 // Collect address nodes for graph verification.
180 addp_worklist.append(n);
181 #endif
182 } else if (n->is_ArrayCopy()) {
183 // Keep a list of ArrayCopy nodes so if one of its input is non
184 // escaping, we can record a unique type
185 arraycopy_worklist.append(n->as_ArrayCopy());
186 }
356 // Put Lock and Unlock nodes on IGVN worklist to process them during
357 // first IGVN optimization when escape information is still available.
358 record_for_optimizer(n);
359 } else if (n->is_Allocate()) {
360 add_call_node(n->as_Call());
361 record_for_optimizer(n);
362 } else {
363 if (n->is_CallStaticJava()) {
364 const char* name = n->as_CallStaticJava()->_name;
365 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
366 return; // Skip uncommon traps
367 }
368 // Don't mark as processed since call's arguments have to be processed.
369 delayed_worklist->push(n);
370 // Check if a call returns an object.
371 if ((n->as_Call()->returns_pointer() &&
372 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
373 (n->is_CallStaticJava() &&
374 n->as_CallStaticJava()->is_boxing_method())) {
375 add_call_node(n->as_Call());
376 }
377 }
378 return;
379 }
380 // Put this check here to process call arguments since some call nodes
381 // point to phantom_obj.
382 if (n_ptn == phantom_obj || n_ptn == null_obj)
383 return; // Skip predefined nodes.
384
385 int opcode = n->Opcode();
386 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
387 if (gc_handled) {
388 return; // Ignore node if already handled by GC.
389 }
390 switch (opcode) {
391 case Op_AddP: {
392 Node* base = get_addp_base(n);
393 PointsToNode* ptn_base = ptnode_adr(base->_idx);
394 // Field nodes are created for all field types. They are used in
395 // adjust_scalar_replaceable_state() and split_unique_types().
464 case Op_PartialSubtypeCheck: {
465 // Produces Null or notNull and is used in only in CmpP so
466 // phantom_obj could be used.
467 map_ideal_node(n, phantom_obj); // Result is unknown
468 break;
469 }
470 case Op_Phi: {
471 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
472 // ThreadLocal has RawPtr type.
473 const Type* t = n->as_Phi()->type();
474 if (t->make_ptr() != NULL) {
475 add_local_var(n, PointsToNode::NoEscape);
476 // Do not add edges during first iteration because some could be
477 // not defined yet.
478 delayed_worklist->push(n);
479 }
480 break;
481 }
482 case Op_Proj: {
483 // we are only interested in the oop result projection from a call
484 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
485 n->in(0)->as_Call()->returns_pointer()) {
486 add_local_var_and_edge(n, PointsToNode::NoEscape,
487 n->in(0), delayed_worklist);
488 }
489 break;
490 }
491 case Op_Rethrow: // Exception object escapes
492 case Op_Return: {
493 if (n->req() > TypeFunc::Parms &&
494 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
495 // Treat Return value as LocalVar with GlobalEscape escape state.
496 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
497 n->in(TypeFunc::Parms), delayed_worklist);
498 }
499 break;
500 }
501 case Op_CompareAndExchangeP:
502 case Op_CompareAndExchangeN:
503 case Op_GetAndSetP:
504 case Op_GetAndSetN: {
505 add_objload_to_connection_graph(n, delayed_worklist);
620 // ThreadLocal has RawPtr type.
621 const Type* t = n->as_Phi()->type();
622 if (t->make_ptr() != NULL) {
623 for (uint i = 1; i < n->req(); i++) {
624 Node* in = n->in(i);
625 if (in == NULL)
626 continue; // ignore NULL
627 Node* uncast_in = in->uncast();
628 if (uncast_in->is_top() || uncast_in == n)
629 continue; // ignore top or inputs which go back this node
630 PointsToNode* ptn = ptnode_adr(in->_idx);
631 assert(ptn != NULL, "node should be registered");
632 add_edge(n_ptn, ptn);
633 }
634 break;
635 }
636 ELSE_FAIL("Op_Phi");
637 }
638 case Op_Proj: {
639 // we are only interested in the oop result projection from a call
640 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
641 n->in(0)->as_Call()->returns_pointer()) {
642 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
643 break;
644 }
645 ELSE_FAIL("Op_Proj");
646 }
647 case Op_Rethrow: // Exception object escapes
648 case Op_Return: {
649 if (n->req() > TypeFunc::Parms &&
650 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
651 // Treat Return value as LocalVar with GlobalEscape escape state.
652 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
653 n->in(TypeFunc::Parms), NULL);
654 break;
655 }
656 ELSE_FAIL("Op_Return");
657 }
658 case Op_StoreP:
659 case Op_StoreN:
660 case Op_StoreNKlass:
661 case Op_StorePConditional:
783 return true;
784 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
785 // Stored value escapes in unsafe access.
786 Node* val = n->in(MemNode::ValueIn);
787 PointsToNode* ptn = ptnode_adr(val->_idx);
788 assert(ptn != NULL, "node should be registered");
789 set_escape_state(ptn, PointsToNode::GlobalEscape);
790 // Add edge to object for unsafe access with offset.
791 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
792 assert(adr_ptn != NULL, "node should be registered");
793 if (adr_ptn->is_Field()) {
794 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
795 add_edge(adr_ptn, ptn);
796 }
797 return true;
798 }
799 return false;
800 }
801
802 void ConnectionGraph::add_call_node(CallNode* call) {
803 assert(call->returns_pointer(), "only for call which returns pointer");
804 uint call_idx = call->_idx;
805 if (call->is_Allocate()) {
806 Node* k = call->in(AllocateNode::KlassNode);
807 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
808 assert(kt != NULL, "TypeKlassPtr required.");
809 ciKlass* cik = kt->klass();
810 PointsToNode::EscapeState es = PointsToNode::NoEscape;
811 bool scalar_replaceable = true;
812 if (call->is_AllocateArray()) {
813 if (!cik->is_array_klass()) { // StressReflectiveCode
814 es = PointsToNode::GlobalEscape;
815 } else {
816 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
817 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
818 // Not scalar replaceable if the length is not constant or too big.
819 scalar_replaceable = false;
820 }
821 }
822 } else { // Allocate instance
823 if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
870 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
871 // It does not escape if object is always allocated.
872 es = PointsToNode::NoEscape;
873 } else {
874 // It escapes globally if object could be loaded from cache.
875 es = PointsToNode::GlobalEscape;
876 }
877 add_java_object(call, es);
878 } else {
879 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
880 call_analyzer->copy_dependencies(_compile->dependencies());
881 if (call_analyzer->is_return_allocated()) {
882 // Returns a newly allocated unescaped object, simply
883 // update dependency information.
884 // Mark it as NoEscape so that objects referenced by
885 // it's fields will be marked as NoEscape at least.
886 add_java_object(call, PointsToNode::NoEscape);
887 ptnode_adr(call_idx)->set_scalar_replaceable(false);
888 } else {
889 // Determine whether any arguments are returned.
890 const TypeTuple* d = call->tf()->domain();
891 bool ret_arg = false;
892 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
893 if (d->field_at(i)->isa_ptr() != NULL &&
894 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
895 ret_arg = true;
896 break;
897 }
898 }
899 if (ret_arg) {
900 add_local_var(call, PointsToNode::ArgEscape);
901 } else {
902 // Returns unknown object.
903 map_ideal_node(call, phantom_obj);
904 }
905 }
906 }
907 } else {
908 // An other type of call, assume the worst case:
909 // returned value is unknown and globally escapes.
910 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
917 switch (call->Opcode()) {
918 #ifdef ASSERT
919 case Op_Allocate:
920 case Op_AllocateArray:
921 case Op_Lock:
922 case Op_Unlock:
923 assert(false, "should be done already");
924 break;
925 #endif
926 case Op_ArrayCopy:
927 case Op_CallLeafNoFP:
928 // Most array copies are ArrayCopy nodes at this point but there
929 // are still a few direct calls to the copy subroutines (See
930 // PhaseStringOpts::copy_string())
931 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
932 call->as_CallLeaf()->is_call_to_arraycopystub();
933 // fall through
934 case Op_CallLeaf: {
935 // Stub calls, objects do not escape but they are not scale replaceable.
936 // Adjust escape state for outgoing arguments.
937 const TypeTuple * d = call->tf()->domain();
938 bool src_has_oops = false;
939 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
940 const Type* at = d->field_at(i);
941 Node *arg = call->in(i);
942 if (arg == NULL) {
943 continue;
944 }
945 const Type *aat = _igvn->type(arg);
946 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
947 continue;
948 if (arg->is_AddP()) {
949 //
950 // The inline_native_clone() case when the arraycopy stub is called
951 // after the allocation before Initialize and CheckCastPP nodes.
952 // Or normal arraycopy for object arrays case.
953 //
954 // Set AddP's base (Allocate) as not scalar replaceable since
955 // pointer to the base (with offset) is passed as argument.
956 //
957 arg = get_addp_base(arg);
958 }
959 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
960 assert(arg_ptn != NULL, "should be registered");
961 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
962 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
963 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
964 aat->isa_ptr() != NULL, "expecting an Ptr");
965 bool arg_has_oops = aat->isa_oopptr() &&
966 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
967 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
968 if (i == TypeFunc::Parms) {
969 src_has_oops = arg_has_oops;
970 }
971 //
972 // src or dst could be j.l.Object when other is basic type array:
973 //
974 // arraycopy(char[],0,Object*,0,size);
975 // arraycopy(Object*,0,char[],0,size);
976 //
977 // Don't add edges in such cases.
978 //
979 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
980 arg_has_oops && (i > TypeFunc::Parms);
981 #ifdef ASSERT
982 if (!(is_arraycopy ||
983 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
984 (call->as_CallLeaf()->_name != NULL &&
985 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
986 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
987 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
988 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
989 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
990 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
991 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
992 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
993 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
994 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
995 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
996 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
997 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
998 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
999 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1000 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1001 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1002 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1003 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1004 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1005 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1006 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0)
1007 ))) {
1008 call->dump();
1009 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1010 }
1011 #endif
1012 // Always process arraycopy's destination object since
1013 // we need to add all possible edges to references in
1014 // source object.
1015 if (arg_esc >= PointsToNode::ArgEscape &&
1016 !arg_is_arraycopy_dest) {
1017 continue;
1018 }
1019 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1020 if (call->is_ArrayCopy()) {
1021 ArrayCopyNode* ac = call->as_ArrayCopy();
1022 if (ac->is_clonebasic() ||
1023 ac->is_arraycopy_validated() ||
1024 ac->is_copyof_validated() ||
1025 ac->is_copyofrange_validated()) {
1026 es = PointsToNode::NoEscape;
1045 }
1046 }
1047 }
1048 break;
1049 }
1050 case Op_CallStaticJava: {
1051 // For a static call, we know exactly what method is being called.
1052 // Use bytecode estimator to record the call's escape affects
1053 #ifdef ASSERT
1054 const char* name = call->as_CallStaticJava()->_name;
1055 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1056 #endif
1057 ciMethod* meth = call->as_CallJava()->method();
1058 if ((meth != NULL) && meth->is_boxing_method()) {
1059 break; // Boxing methods do not modify any oops.
1060 }
1061 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1062 // fall-through if not a Java method or no analyzer information
1063 if (call_analyzer != NULL) {
1064 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1065 const TypeTuple* d = call->tf()->domain();
1066 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1067 const Type* at = d->field_at(i);
1068 int k = i - TypeFunc::Parms;
1069 Node* arg = call->in(i);
1070 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1071 if (at->isa_ptr() != NULL &&
1072 call_analyzer->is_arg_returned(k)) {
1073 // The call returns arguments.
1074 if (call_ptn != NULL) { // Is call's result used?
1075 assert(call_ptn->is_LocalVar(), "node should be registered");
1076 assert(arg_ptn != NULL, "node should be registered");
1077 add_edge(call_ptn, arg_ptn);
1078 }
1079 }
1080 if (at->isa_oopptr() != NULL &&
1081 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1082 if (!call_analyzer->is_arg_stack(k)) {
1083 // The argument global escapes
1084 set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1085 } else {
1089 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1090 }
1091 }
1092 }
1093 }
1094 if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1095 // The call returns arguments.
1096 assert(call_ptn->edge_count() > 0, "sanity");
1097 if (!call_analyzer->is_return_local()) {
1098 // Returns also unknown object.
1099 add_edge(call_ptn, phantom_obj);
1100 }
1101 }
1102 break;
1103 }
1104 }
1105 default: {
1106 // Fall-through here if not a Java method or no analyzer information
1107 // or some other type of call, assume the worst case: all arguments
1108 // globally escape.
1109 const TypeTuple* d = call->tf()->domain();
1110 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1111 const Type* at = d->field_at(i);
1112 if (at->isa_oopptr() != NULL) {
1113 Node* arg = call->in(i);
1114 if (arg->is_AddP()) {
1115 arg = get_addp_base(arg);
1116 }
1117 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1118 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
1119 }
1120 }
1121 }
1122 }
1123 }
1124
1125
1126 // Finish Graph construction.
1127 bool ConnectionGraph::complete_connection_graph(
1128 GrowableArray<PointsToNode*>& ptnodes_worklist,
1129 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1610 } else {
1611 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1612 tty->print_cr("----------init store has invalid value -----");
1613 store->dump();
1614 val->dump();
1615 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1616 }
1617 for (EdgeIterator j(val); j.has_next(); j.next()) {
1618 PointsToNode* obj = j.get();
1619 if (obj->is_JavaObject()) {
1620 if (!field->points_to(obj->as_JavaObject())) {
1621 missed_obj = obj;
1622 break;
1623 }
1624 }
1625 }
1626 }
1627 if (missed_obj != NULL) {
1628 tty->print_cr("----------field---------------------------------");
1629 field->dump();
1630 tty->print_cr("----------missed referernce to object-----------");
1631 missed_obj->dump();
1632 tty->print_cr("----------object referernced by init store -----");
1633 store->dump();
1634 val->dump();
1635 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1636 }
1637 }
1638 #endif
1639 } else {
1640 // There could be initializing stores which follow allocation.
1641 // For example, a volatile field store is not collected
1642 // by Initialize node.
1643 //
1644 // Need to check for dependent loads to separate such stores from
1645 // stores which follow loads. For now, add initial value NULL so
1646 // that compare pointers optimization works correctly.
1647 }
1648 }
1649 if (value == NULL) {
1650 // A field's initializing value was not recorded. Add NULL.
1651 if (add_edge(field, null_obj)) {
1652 // New edge was added
2048 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2049 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2050 PointsToNode* ptadr = _nodes.at(n->_idx);
2051 if (ptadr != NULL) {
2052 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2053 return;
2054 }
2055 Compile* C = _compile;
2056 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2057 _nodes.at_put(n->_idx, ptadr);
2058 // Add edge from arraycopy node to source object.
2059 (void)add_edge(ptadr, src);
2060 src->set_arraycopy_src();
2061 // Add edge from destination object to arraycopy node.
2062 (void)add_edge(dst, ptadr);
2063 dst->set_arraycopy_dst();
2064 }
2065
2066 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2067 const Type* adr_type = n->as_AddP()->bottom_type();
2068 BasicType bt = T_INT;
2069 if (offset == Type::OffsetBot) {
2070 // Check only oop fields.
2071 if (!adr_type->isa_aryptr() ||
2072 (adr_type->isa_aryptr()->klass() == NULL) ||
2073 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2074 // OffsetBot is used to reference array's element. Ignore first AddP.
2075 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2076 bt = T_OBJECT;
2077 }
2078 }
2079 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2080 if (adr_type->isa_instptr()) {
2081 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2082 if (field != NULL) {
2083 bt = field->layout_type();
2084 } else {
2085 // Check for unsafe oop field access
2086 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2087 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2088 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2089 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2090 bt = T_OBJECT;
2091 (*unsafe) = true;
2092 }
2093 }
2094 } else if (adr_type->isa_aryptr()) {
2095 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2096 // Ignore array length load.
2097 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2098 // Ignore first AddP.
2099 } else {
2100 const Type* elemtype = adr_type->isa_aryptr()->elem();
2101 bt = elemtype->array_element_basic_type();
2102 }
2103 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2104 // Allocation initialization, ThreadLocal field access, unsafe access
2105 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2106 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2107 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2108 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2109 bt = T_OBJECT;
2110 }
2111 }
2112 }
2113 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2114 }
2115
2116 // Returns unique pointed java object or NULL.
2117 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2118 assert(!_collecting, "should not call when contructed graph");
2119 // If the node was created after the escape computation we can't answer.
2120 uint idx = n->_idx;
2121 if (idx >= nodes_size()) {
2122 return NULL;
2123 }
2124 PointsToNode* ptn = ptnode_adr(idx);
2125 if (ptn->is_JavaObject()) {
2126 return ptn->as_JavaObject();
2127 }
2128 assert(ptn->is_LocalVar(), "sanity");
2129 // Check all java objects it points to.
2130 JavaObjectNode* jobj = NULL;
2131 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2132 PointsToNode* e = i.get();
2133 if (e->is_JavaObject()) {
2134 if (jobj == NULL) {
2135 jobj = e->as_JavaObject();
2136 } else if (jobj != e) {
2137 return NULL;
2138 }
2235 if (i.get() == jobj)
2236 return true;
2237 }
2238 return false;
2239 }
2240 #endif
2241
2242 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2243 const Type *adr_type = phase->type(adr);
2244 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
2245 adr->in(AddPNode::Address)->is_Proj() &&
2246 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2247 // We are computing a raw address for a store captured by an Initialize
2248 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2249 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2250 assert(offs != Type::OffsetBot ||
2251 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2252 "offset must be a constant or it is initialization of array");
2253 return offs;
2254 }
2255 const TypePtr *t_ptr = adr_type->isa_ptr();
2256 assert(t_ptr != NULL, "must be a pointer type");
2257 return t_ptr->offset();
2258 }
2259
2260 Node* ConnectionGraph::get_addp_base(Node *addp) {
2261 assert(addp->is_AddP(), "must be AddP");
2262 //
2263 // AddP cases for Base and Address inputs:
2264 // case #1. Direct object's field reference:
2265 // Allocate
2266 // |
2267 // Proj #5 ( oop result )
2268 // |
2269 // CheckCastPP (cast to instance type)
2270 // | |
2271 // AddP ( base == address )
2272 //
2273 // case #2. Indirect object's field reference:
2274 // Phi
2275 // |
2276 // CastPP (cast to instance type)
2277 // | |
2392 }
2393 return NULL;
2394 }
2395
2396 //
2397 // Adjust the type and inputs of an AddP which computes the
2398 // address of a field of an instance
2399 //
2400 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2401 PhaseGVN* igvn = _igvn;
2402 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2403 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2404 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2405 if (t == NULL) {
2406 // We are computing a raw address for a store captured by an Initialize
2407 // compute an appropriate address type (cases #3 and #5).
2408 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2409 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2410 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2411 assert(offs != Type::OffsetBot, "offset must be a constant");
2412 t = base_t->add_offset(offs)->is_oopptr();
2413 }
2414 int inst_id = base_t->instance_id();
2415 assert(!t->is_known_instance() || t->instance_id() == inst_id,
2416 "old type must be non-instance or match new type");
2417
2418 // The type 't' could be subclass of 'base_t'.
2419 // As result t->offset() could be large then base_t's size and it will
2420 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2421 // constructor verifies correctness of the offset.
2422 //
2423 // It could happened on subclass's branch (from the type profiling
2424 // inlining) which was not eliminated during parsing since the exactness
2425 // of the allocation type was not propagated to the subclass type check.
2426 //
2427 // Or the type 't' could be not related to 'base_t' at all.
2428 // It could happened when CHA type is different from MDO type on a dead path
2429 // (for example, from instanceof check) which is not collapsed during parsing.
2430 //
2431 // Do nothing for such AddP node and don't process its users since
2432 // this code branch will go away.
2433 //
2434 if (!t->is_known_instance() &&
2435 !base_t->klass()->is_subtype_of(t->klass())) {
2436 return false; // bail out
2437 }
2438 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2439 // Do NOT remove the next line: ensure a new alias index is allocated
2440 // for the instance type. Note: C++ will not remove it since the call
2441 // has side effect.
2442 int alias_idx = _compile->get_alias_index(tinst);
2443 igvn->set_type(addp, tinst);
2444 // record the allocation in the node map
2445 set_map(addp, get_map(base->_idx));
2446 // Set addp's Base and Address to 'base'.
2447 Node *abase = addp->in(AddPNode::Base);
2448 Node *adr = addp->in(AddPNode::Address);
2449 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2450 adr->in(0)->_idx == (uint)inst_id) {
2451 // Skip AddP cases #3 and #5.
2452 } else {
2453 assert(!abase->is_top(), "sanity"); // AddP case #3
2454 if (abase != base) {
2455 igvn->hash_delete(addp);
2456 addp->set_req(AddPNode::Base, base);
2457 if (abase == adr) {
2458 addp->set_req(AddPNode::Address, base);
3122 igvn->hash_delete(tn);
3123 igvn->set_type(tn, tn_type);
3124 tn->set_type(tn_type);
3125 igvn->hash_insert(tn);
3126 record_for_optimizer(n);
3127 } else {
3128 assert(tn_type == TypePtr::NULL_PTR ||
3129 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
3130 "unexpected type");
3131 continue; // Skip dead path with different type
3132 }
3133 }
3134 } else {
3135 debug_only(n->dump();)
3136 assert(false, "EA: unexpected node");
3137 continue;
3138 }
3139 // push allocation's users on appropriate worklist
3140 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3141 Node *use = n->fast_out(i);
3142 if(use->is_Mem() && use->in(MemNode::Address) == n) {
3143 // Load/store to instance's field
3144 memnode_worklist.append_if_missing(use);
3145 } else if (use->is_MemBar()) {
3146 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3147 memnode_worklist.append_if_missing(use);
3148 }
3149 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3150 Node* addp2 = find_second_addp(use, n);
3151 if (addp2 != NULL) {
3152 alloc_worklist.append_if_missing(addp2);
3153 }
3154 alloc_worklist.append_if_missing(use);
3155 } else if (use->is_Phi() ||
3156 use->is_CheckCastPP() ||
3157 use->is_EncodeNarrowPtr() ||
3158 use->is_DecodeNarrowPtr() ||
3159 BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(use) ||
3160 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3161 alloc_worklist.append_if_missing(use);
3162 #ifdef ASSERT
3163 } else if (use->is_Mem()) {
3164 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3165 } else if (use->is_MergeMem()) {
3166 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3167 } else if (use->is_SafePoint()) {
3168 // Look for MergeMem nodes for calls which reference unique allocation
3169 // (through CheckCastPP nodes) even for debug info.
3170 Node* m = use->in(TypeFunc::Memory);
3171 if (m->is_MergeMem()) {
3172 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3173 }
3174 } else if (use->Opcode() == Op_EncodeISOArray) {
3175 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3176 // EncodeISOArray overwrites destination array
3177 memnode_worklist.append_if_missing(use);
3178 }
3179 } else {
3180 uint op = use->Opcode();
3181 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3182 (use->in(MemNode::Memory) == n)) {
3183 // They overwrite memory edge corresponding to destination array,
3184 memnode_worklist.append_if_missing(use);
3185 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3186 op == Op_CastP2X || op == Op_StoreCM ||
3187 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3188 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3189 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3190 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3191 n->dump();
3192 use->dump();
3193 assert(false, "EA: missing allocation reference path");
3194 }
3195 #endif
3196 }
3197 }
3198
3199 }
3200
3201 // Go over all ArrayCopy nodes and if one of the inputs has a unique
3202 // type, record it in the ArrayCopy node so we know what memory this
3203 // node uses/modified.
3204 for (int next = 0; next < arraycopy_worklist.length(); next++) {
3205 ArrayCopyNode* ac = arraycopy_worklist.at(next);
3206 Node* dest = ac->in(ArrayCopyNode::Dest);
3207 if (dest->is_AddP()) {
3208 dest = get_addp_base(dest);
3209 }
3210 JavaObjectNode* jobj = unique_java_object(dest);
3237 // compute new values for Memory inputs (the Memory inputs are not
3238 // actually updated until phase 4.)
3239 if (memnode_worklist.length() == 0)
3240 return; // nothing to do
3241 while (memnode_worklist.length() != 0) {
3242 Node *n = memnode_worklist.pop();
3243 if (visited.test_set(n->_idx))
3244 continue;
3245 if (n->is_Phi() || n->is_ClearArray()) {
3246 // we don't need to do anything, but the users must be pushed
3247 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3248 // we don't need to do anything, but the users must be pushed
3249 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3250 if (n == NULL)
3251 continue;
3252 } else if (n->Opcode() == Op_StrCompressedCopy ||
3253 n->Opcode() == Op_EncodeISOArray) {
3254 // get the memory projection
3255 n = n->find_out_with(Op_SCMemProj);
3256 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3257 } else {
3258 assert(n->is_Mem(), "memory node required.");
3259 Node *addr = n->in(MemNode::Address);
3260 const Type *addr_t = igvn->type(addr);
3261 if (addr_t == Type::TOP)
3262 continue;
3263 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3264 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3265 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3266 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3267 if (_compile->failing()) {
3268 return;
3269 }
3270 if (mem != n->in(MemNode::Memory)) {
3271 // We delay the memory edge update since we need old one in
3272 // MergeMem code below when instances memory slices are separated.
3273 set_map(n, mem);
3274 }
3275 if (n->is_Load()) {
3276 continue; // don't push users
3277 } else if (n->is_LoadStore()) {
3278 // get the memory projection
3279 n = n->find_out_with(Op_SCMemProj);
3280 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3281 }
3282 }
3283 // push user on appropriate worklist
3284 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3285 Node *use = n->fast_out(i);
3286 if (use->is_Phi() || use->is_ClearArray()) {
3287 memnode_worklist.append_if_missing(use);
3288 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3289 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3290 continue;
3291 memnode_worklist.append_if_missing(use);
3292 } else if (use->is_MemBar()) {
3293 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3294 memnode_worklist.append_if_missing(use);
3295 }
3296 #ifdef ASSERT
3297 } else if(use->is_Mem()) {
3298 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3299 } else if (use->is_MergeMem()) {
3300 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3301 } else if (use->Opcode() == Op_EncodeISOArray) {
3302 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3303 // EncodeISOArray overwrites destination array
3304 memnode_worklist.append_if_missing(use);
3305 }
3306 } else {
3307 uint op = use->Opcode();
3308 if ((use->in(MemNode::Memory) == n) &&
3309 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3310 // They overwrite memory edge corresponding to destination array,
3311 memnode_worklist.append_if_missing(use);
3312 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3313 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3314 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3315 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3316 n->dump();
3317 use->dump();
3318 assert(false, "EA: missing memory path");
3319 }
3320 #endif
3321 }
3322 }
3323 }
3324
3325 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3326 // Walk each memory slice moving the first node encountered of each
3327 // instance type to the the input corresponding to its alias index.
3328 uint length = _mergemem_worklist.length();
3329 for( uint next = 0; next < length; ++next ) {
3330 MergeMemNode* nmm = _mergemem_worklist.at(next);
3331 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3332 // Note: we don't want to use MergeMemStream here because we only want to
3333 // scan inputs which exist at the start, not ones we add during processing.
3334 // Note 2: MergeMem may already contains instance memory slices added
3335 // during find_inst_mem() call when memory nodes were processed above.
3336 igvn->hash_delete(nmm);
3337 uint nslices = MIN2(nmm->req(), new_index_start);
3338 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3339 Node* mem = nmm->in(i);
3340 Node* cur = NULL;
3341 if (mem == NULL || mem->is_top())
3342 continue;
3343 // First, update mergemem by moving memory nodes to corresponding slices
3344 // if their type became more precise since this mergemem was created.
3345 while (mem->is_Mem()) {
3346 const Type *at = igvn->type(mem->in(MemNode::Address));
3347 if (at != Type::TOP) {
3379 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3380 Node* result = step_through_mergemem(nmm, ni, tinst);
3381 if (result == nmm->base_memory()) {
3382 // Didn't find instance memory, search through general slice recursively.
3383 result = nmm->memory_at(_compile->get_general_index(ni));
3384 result = find_inst_mem(result, ni, orig_phis);
3385 if (_compile->failing()) {
3386 return;
3387 }
3388 nmm->set_memory_at(ni, result);
3389 }
3390 }
3391 igvn->hash_insert(nmm);
3392 record_for_optimizer(nmm);
3393 }
3394
3395 // Phase 4: Update the inputs of non-instance memory Phis and
3396 // the Memory input of memnodes
3397 // First update the inputs of any non-instance Phi's from
3398 // which we split out an instance Phi. Note we don't have
3399 // to recursively process Phi's encounted on the input memory
3400 // chains as is done in split_memory_phi() since they will
3401 // also be processed here.
3402 for (int j = 0; j < orig_phis.length(); j++) {
3403 PhiNode *phi = orig_phis.at(j);
3404 int alias_idx = _compile->get_alias_index(phi->adr_type());
3405 igvn->hash_delete(phi);
3406 for (uint i = 1; i < phi->req(); i++) {
3407 Node *mem = phi->in(i);
3408 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3409 if (_compile->failing()) {
3410 return;
3411 }
3412 if (mem != new_mem) {
3413 phi->set_req(i, new_mem);
3414 }
3415 }
3416 igvn->hash_insert(phi);
3417 record_for_optimizer(phi);
3418 }
3419
|
124 GrowableArray<JavaObjectNode*> non_escaped_worklist;
125 GrowableArray<FieldNode*> oop_fields_worklist;
126 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
127
128 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
129
130 // 1. Populate Connection Graph (CG) with PointsTo nodes.
131 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
132 // Initialize worklist
133 if (C->root() != NULL) {
134 ideal_nodes.push(C->root());
135 }
136 // Processed ideal nodes are unique on ideal_nodes list
137 // but several ideal nodes are mapped to the phantom_obj.
138 // To avoid duplicated entries on the following worklists
139 // add the phantom_obj only once to them.
140 ptnodes_worklist.append(phantom_obj);
141 java_objects_worklist.append(phantom_obj);
142 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
143 Node* n = ideal_nodes.at(next);
144 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
145 !n->in(MemNode::Address)->is_AddP() &&
146 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
147 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
148 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
149 _igvn->register_new_node_with_optimizer(addp);
150 _igvn->replace_input_of(n, MemNode::Address, addp);
151 ideal_nodes.push(addp);
152 _nodes.at_put_grow(addp->_idx, NULL, NULL);
153 }
154 // Create PointsTo nodes and add them to Connection Graph. Called
155 // only once per ideal node since ideal_nodes is Unique_Node list.
156 add_node_to_connection_graph(n, &delayed_worklist);
157 PointsToNode* ptn = ptnode_adr(n->_idx);
158 if (ptn != NULL && ptn != phantom_obj) {
159 ptnodes_worklist.append(ptn);
160 if (ptn->is_JavaObject()) {
161 java_objects_worklist.append(ptn->as_JavaObject());
162 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
163 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
164 // Only allocations and java static calls results are interesting.
165 non_escaped_worklist.append(ptn->as_JavaObject());
166 }
167 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
168 oop_fields_worklist.append(ptn->as_Field());
169 }
170 }
171 if (n->is_MergeMem()) {
172 // Collect all MergeMem nodes to add memory slices for
173 // scalar replaceable objects in split_unique_types().
174 _mergemem_worklist.append(n->as_MergeMem());
175 } else if (OptimizePtrCompare && n->is_Cmp() &&
176 ((n->Opcode() == Op_CmpP && !(((CmpPNode*)n)->has_perturbed_operand() != NULL)) ||
177 n->Opcode() == Op_CmpN)) {
178 // Collect compare pointers nodes.
179 ptr_cmp_worklist.append(n);
180 } else if (n->is_MemBarStoreStore()) {
181 // Collect all MemBarStoreStore nodes so that depending on the
182 // escape status of the associated Allocate node some of them
183 // may be eliminated.
184 storestore_worklist.append(n);
185 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
186 (n->req() > MemBarNode::Precedent)) {
187 record_for_optimizer(n);
188 #ifdef ASSERT
189 } else if (n->is_AddP()) {
190 // Collect address nodes for graph verification.
191 addp_worklist.append(n);
192 #endif
193 } else if (n->is_ArrayCopy()) {
194 // Keep a list of ArrayCopy nodes so if one of its input is non
195 // escaping, we can record a unique type
196 arraycopy_worklist.append(n->as_ArrayCopy());
197 }
367 // Put Lock and Unlock nodes on IGVN worklist to process them during
368 // first IGVN optimization when escape information is still available.
369 record_for_optimizer(n);
370 } else if (n->is_Allocate()) {
371 add_call_node(n->as_Call());
372 record_for_optimizer(n);
373 } else {
374 if (n->is_CallStaticJava()) {
375 const char* name = n->as_CallStaticJava()->_name;
376 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
377 return; // Skip uncommon traps
378 }
379 // Don't mark as processed since call's arguments have to be processed.
380 delayed_worklist->push(n);
381 // Check if a call returns an object.
382 if ((n->as_Call()->returns_pointer() &&
383 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
384 (n->is_CallStaticJava() &&
385 n->as_CallStaticJava()->is_boxing_method())) {
386 add_call_node(n->as_Call());
387 } else if (n->as_Call()->tf()->returns_value_type_as_fields()) {
388 bool returns_oop = false;
389 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
390 ProjNode* pn = n->fast_out(i)->as_Proj();
391 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
392 returns_oop = true;
393 }
394 }
395 if (returns_oop) {
396 add_call_node(n->as_Call());
397 }
398 }
399 }
400 return;
401 }
402 // Put this check here to process call arguments since some call nodes
403 // point to phantom_obj.
404 if (n_ptn == phantom_obj || n_ptn == null_obj)
405 return; // Skip predefined nodes.
406
407 int opcode = n->Opcode();
408 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
409 if (gc_handled) {
410 return; // Ignore node if already handled by GC.
411 }
412 switch (opcode) {
413 case Op_AddP: {
414 Node* base = get_addp_base(n);
415 PointsToNode* ptn_base = ptnode_adr(base->_idx);
416 // Field nodes are created for all field types. They are used in
417 // adjust_scalar_replaceable_state() and split_unique_types().
486 case Op_PartialSubtypeCheck: {
487 // Produces Null or notNull and is used in only in CmpP so
488 // phantom_obj could be used.
489 map_ideal_node(n, phantom_obj); // Result is unknown
490 break;
491 }
492 case Op_Phi: {
493 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
494 // ThreadLocal has RawPtr type.
495 const Type* t = n->as_Phi()->type();
496 if (t->make_ptr() != NULL) {
497 add_local_var(n, PointsToNode::NoEscape);
498 // Do not add edges during first iteration because some could be
499 // not defined yet.
500 delayed_worklist->push(n);
501 }
502 break;
503 }
504 case Op_Proj: {
505 // we are only interested in the oop result projection from a call
506 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
507 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
508 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
509 n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?");
510 add_local_var_and_edge(n, PointsToNode::NoEscape,
511 n->in(0), delayed_worklist);
512 }
513 break;
514 }
515 case Op_Rethrow: // Exception object escapes
516 case Op_Return: {
517 if (n->req() > TypeFunc::Parms &&
518 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
519 // Treat Return value as LocalVar with GlobalEscape escape state.
520 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
521 n->in(TypeFunc::Parms), delayed_worklist);
522 }
523 break;
524 }
525 case Op_CompareAndExchangeP:
526 case Op_CompareAndExchangeN:
527 case Op_GetAndSetP:
528 case Op_GetAndSetN: {
529 add_objload_to_connection_graph(n, delayed_worklist);
644 // ThreadLocal has RawPtr type.
645 const Type* t = n->as_Phi()->type();
646 if (t->make_ptr() != NULL) {
647 for (uint i = 1; i < n->req(); i++) {
648 Node* in = n->in(i);
649 if (in == NULL)
650 continue; // ignore NULL
651 Node* uncast_in = in->uncast();
652 if (uncast_in->is_top() || uncast_in == n)
653 continue; // ignore top or inputs which go back this node
654 PointsToNode* ptn = ptnode_adr(in->_idx);
655 assert(ptn != NULL, "node should be registered");
656 add_edge(n_ptn, ptn);
657 }
658 break;
659 }
660 ELSE_FAIL("Op_Phi");
661 }
662 case Op_Proj: {
663 // we are only interested in the oop result projection from a call
664 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
665 (n->in(0)->as_Call()->returns_pointer()|| n->bottom_type()->isa_ptr())) {
666 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
667 n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?");
668 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
669 break;
670 }
671 ELSE_FAIL("Op_Proj");
672 }
673 case Op_Rethrow: // Exception object escapes
674 case Op_Return: {
675 if (n->req() > TypeFunc::Parms &&
676 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
677 // Treat Return value as LocalVar with GlobalEscape escape state.
678 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
679 n->in(TypeFunc::Parms), NULL);
680 break;
681 }
682 ELSE_FAIL("Op_Return");
683 }
684 case Op_StoreP:
685 case Op_StoreN:
686 case Op_StoreNKlass:
687 case Op_StorePConditional:
809 return true;
810 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
811 // Stored value escapes in unsafe access.
812 Node* val = n->in(MemNode::ValueIn);
813 PointsToNode* ptn = ptnode_adr(val->_idx);
814 assert(ptn != NULL, "node should be registered");
815 set_escape_state(ptn, PointsToNode::GlobalEscape);
816 // Add edge to object for unsafe access with offset.
817 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
818 assert(adr_ptn != NULL, "node should be registered");
819 if (adr_ptn->is_Field()) {
820 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
821 add_edge(adr_ptn, ptn);
822 }
823 return true;
824 }
825 return false;
826 }
827
828 void ConnectionGraph::add_call_node(CallNode* call) {
829 assert(call->returns_pointer() || call->tf()->returns_value_type_as_fields(), "only for call which returns pointer");
830 uint call_idx = call->_idx;
831 if (call->is_Allocate()) {
832 Node* k = call->in(AllocateNode::KlassNode);
833 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
834 assert(kt != NULL, "TypeKlassPtr required.");
835 ciKlass* cik = kt->klass();
836 PointsToNode::EscapeState es = PointsToNode::NoEscape;
837 bool scalar_replaceable = true;
838 if (call->is_AllocateArray()) {
839 if (!cik->is_array_klass()) { // StressReflectiveCode
840 es = PointsToNode::GlobalEscape;
841 } else {
842 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
843 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
844 // Not scalar replaceable if the length is not constant or too big.
845 scalar_replaceable = false;
846 }
847 }
848 } else { // Allocate instance
849 if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
896 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
897 // It does not escape if object is always allocated.
898 es = PointsToNode::NoEscape;
899 } else {
900 // It escapes globally if object could be loaded from cache.
901 es = PointsToNode::GlobalEscape;
902 }
903 add_java_object(call, es);
904 } else {
905 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
906 call_analyzer->copy_dependencies(_compile->dependencies());
907 if (call_analyzer->is_return_allocated()) {
908 // Returns a newly allocated unescaped object, simply
909 // update dependency information.
910 // Mark it as NoEscape so that objects referenced by
911 // it's fields will be marked as NoEscape at least.
912 add_java_object(call, PointsToNode::NoEscape);
913 ptnode_adr(call_idx)->set_scalar_replaceable(false);
914 } else {
915 // Determine whether any arguments are returned.
916 const TypeTuple* d = call->tf()->domain_cc();
917 bool ret_arg = false;
918 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
919 if (d->field_at(i)->isa_ptr() != NULL &&
920 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
921 ret_arg = true;
922 break;
923 }
924 }
925 if (ret_arg) {
926 add_local_var(call, PointsToNode::ArgEscape);
927 } else {
928 // Returns unknown object.
929 map_ideal_node(call, phantom_obj);
930 }
931 }
932 }
933 } else {
934 // An other type of call, assume the worst case:
935 // returned value is unknown and globally escapes.
936 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
943 switch (call->Opcode()) {
944 #ifdef ASSERT
945 case Op_Allocate:
946 case Op_AllocateArray:
947 case Op_Lock:
948 case Op_Unlock:
949 assert(false, "should be done already");
950 break;
951 #endif
952 case Op_ArrayCopy:
953 case Op_CallLeafNoFP:
954 // Most array copies are ArrayCopy nodes at this point but there
955 // are still a few direct calls to the copy subroutines (See
956 // PhaseStringOpts::copy_string())
957 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
958 call->as_CallLeaf()->is_call_to_arraycopystub();
959 // fall through
960 case Op_CallLeaf: {
961 // Stub calls, objects do not escape but they are not scale replaceable.
962 // Adjust escape state for outgoing arguments.
963 const TypeTuple * d = call->tf()->domain_sig();
964 bool src_has_oops = false;
965 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
966 const Type* at = d->field_at(i);
967 Node *arg = call->in(i);
968 if (arg == NULL) {
969 continue;
970 }
971 const Type *aat = _igvn->type(arg);
972 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
973 continue;
974 if (arg->is_AddP()) {
975 //
976 // The inline_native_clone() case when the arraycopy stub is called
977 // after the allocation before Initialize and CheckCastPP nodes.
978 // Or normal arraycopy for object arrays case.
979 //
980 // Set AddP's base (Allocate) as not scalar replaceable since
981 // pointer to the base (with offset) is passed as argument.
982 //
983 arg = get_addp_base(arg);
984 }
985 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
986 assert(arg_ptn != NULL, "should be registered");
987 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
988 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
989 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
990 aat->isa_ptr() != NULL, "expecting an Ptr");
991 bool arg_has_oops = aat->isa_oopptr() &&
992 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
993 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()) ||
994 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != NULL &&
995 aat->isa_aryptr()->elem()->isa_valuetype() &&
996 aat->isa_aryptr()->elem()->isa_valuetype()->value_klass()->contains_oops()));
997 if (i == TypeFunc::Parms) {
998 src_has_oops = arg_has_oops;
999 }
1000 //
1001 // src or dst could be j.l.Object when other is basic type array:
1002 //
1003 // arraycopy(char[],0,Object*,0,size);
1004 // arraycopy(Object*,0,char[],0,size);
1005 //
1006 // Don't add edges in such cases.
1007 //
1008 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1009 arg_has_oops && (i > TypeFunc::Parms);
1010 #ifdef ASSERT
1011 if (!(is_arraycopy ||
1012 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1013 (call->as_CallLeaf()->_name != NULL &&
1014 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1015 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1016 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1017 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
1018 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
1019 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
1020 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
1021 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
1022 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1023 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1024 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1025 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1026 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1027 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1028 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1029 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1030 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1031 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1032 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1033 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1034 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1035 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1036 strcmp(call->as_CallLeaf()->_name, "load_unknown_value") == 0 ||
1037 strcmp(call->as_CallLeaf()->_name, "store_unknown_value") == 0)
1038 ))) {
1039 call->dump();
1040 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1041 }
1042 #endif
1043 // Always process arraycopy's destination object since
1044 // we need to add all possible edges to references in
1045 // source object.
1046 if (arg_esc >= PointsToNode::ArgEscape &&
1047 !arg_is_arraycopy_dest) {
1048 continue;
1049 }
1050 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1051 if (call->is_ArrayCopy()) {
1052 ArrayCopyNode* ac = call->as_ArrayCopy();
1053 if (ac->is_clonebasic() ||
1054 ac->is_arraycopy_validated() ||
1055 ac->is_copyof_validated() ||
1056 ac->is_copyofrange_validated()) {
1057 es = PointsToNode::NoEscape;
1076 }
1077 }
1078 }
1079 break;
1080 }
1081 case Op_CallStaticJava: {
1082 // For a static call, we know exactly what method is being called.
1083 // Use bytecode estimator to record the call's escape affects
1084 #ifdef ASSERT
1085 const char* name = call->as_CallStaticJava()->_name;
1086 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1087 #endif
1088 ciMethod* meth = call->as_CallJava()->method();
1089 if ((meth != NULL) && meth->is_boxing_method()) {
1090 break; // Boxing methods do not modify any oops.
1091 }
1092 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1093 // fall-through if not a Java method or no analyzer information
1094 if (call_analyzer != NULL) {
1095 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1096 const TypeTuple* d = call->tf()->domain_cc();
1097 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1098 const Type* at = d->field_at(i);
1099 int k = i - TypeFunc::Parms;
1100 Node* arg = call->in(i);
1101 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1102 if (at->isa_ptr() != NULL &&
1103 call_analyzer->is_arg_returned(k)) {
1104 // The call returns arguments.
1105 if (call_ptn != NULL) { // Is call's result used?
1106 assert(call_ptn->is_LocalVar(), "node should be registered");
1107 assert(arg_ptn != NULL, "node should be registered");
1108 add_edge(call_ptn, arg_ptn);
1109 }
1110 }
1111 if (at->isa_oopptr() != NULL &&
1112 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1113 if (!call_analyzer->is_arg_stack(k)) {
1114 // The argument global escapes
1115 set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1116 } else {
1120 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1121 }
1122 }
1123 }
1124 }
1125 if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1126 // The call returns arguments.
1127 assert(call_ptn->edge_count() > 0, "sanity");
1128 if (!call_analyzer->is_return_local()) {
1129 // Returns also unknown object.
1130 add_edge(call_ptn, phantom_obj);
1131 }
1132 }
1133 break;
1134 }
1135 }
1136 default: {
1137 // Fall-through here if not a Java method or no analyzer information
1138 // or some other type of call, assume the worst case: all arguments
1139 // globally escape.
1140 const TypeTuple* d = call->tf()->domain_cc();
1141 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1142 const Type* at = d->field_at(i);
1143 if (at->isa_oopptr() != NULL) {
1144 Node* arg = call->in(i);
1145 if (arg->is_AddP()) {
1146 arg = get_addp_base(arg);
1147 }
1148 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1149 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
1150 }
1151 }
1152 }
1153 }
1154 }
1155
1156
1157 // Finish Graph construction.
1158 bool ConnectionGraph::complete_connection_graph(
1159 GrowableArray<PointsToNode*>& ptnodes_worklist,
1160 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1641 } else {
1642 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1643 tty->print_cr("----------init store has invalid value -----");
1644 store->dump();
1645 val->dump();
1646 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1647 }
1648 for (EdgeIterator j(val); j.has_next(); j.next()) {
1649 PointsToNode* obj = j.get();
1650 if (obj->is_JavaObject()) {
1651 if (!field->points_to(obj->as_JavaObject())) {
1652 missed_obj = obj;
1653 break;
1654 }
1655 }
1656 }
1657 }
1658 if (missed_obj != NULL) {
1659 tty->print_cr("----------field---------------------------------");
1660 field->dump();
1661 tty->print_cr("----------missed reference to object------------");
1662 missed_obj->dump();
1663 tty->print_cr("----------object referenced by init store-------");
1664 store->dump();
1665 val->dump();
1666 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1667 }
1668 }
1669 #endif
1670 } else {
1671 // There could be initializing stores which follow allocation.
1672 // For example, a volatile field store is not collected
1673 // by Initialize node.
1674 //
1675 // Need to check for dependent loads to separate such stores from
1676 // stores which follow loads. For now, add initial value NULL so
1677 // that compare pointers optimization works correctly.
1678 }
1679 }
1680 if (value == NULL) {
1681 // A field's initializing value was not recorded. Add NULL.
1682 if (add_edge(field, null_obj)) {
1683 // New edge was added
2079 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2080 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2081 PointsToNode* ptadr = _nodes.at(n->_idx);
2082 if (ptadr != NULL) {
2083 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2084 return;
2085 }
2086 Compile* C = _compile;
2087 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2088 _nodes.at_put(n->_idx, ptadr);
2089 // Add edge from arraycopy node to source object.
2090 (void)add_edge(ptadr, src);
2091 src->set_arraycopy_src();
2092 // Add edge from destination object to arraycopy node.
2093 (void)add_edge(dst, ptadr);
2094 dst->set_arraycopy_dst();
2095 }
2096
2097 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2098 const Type* adr_type = n->as_AddP()->bottom_type();
2099 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
2100 BasicType bt = T_INT;
2101 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
2102 // Check only oop fields.
2103 if (!adr_type->isa_aryptr() ||
2104 (adr_type->isa_aryptr()->klass() == NULL) ||
2105 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2106 // OffsetBot is used to reference array's element. Ignore first AddP.
2107 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2108 bt = T_OBJECT;
2109 }
2110 }
2111 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2112 if (adr_type->isa_instptr()) {
2113 ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
2114 if (field != NULL) {
2115 bt = field->layout_type();
2116 } else {
2117 // Check for unsafe oop field access
2118 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2119 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2120 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2121 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2122 bt = T_OBJECT;
2123 (*unsafe) = true;
2124 }
2125 }
2126 } else if (adr_type->isa_aryptr()) {
2127 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2128 // Ignore array length load.
2129 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2130 // Ignore first AddP.
2131 } else {
2132 const Type* elemtype = adr_type->isa_aryptr()->elem();
2133 if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) {
2134 ciValueKlass* vk = elemtype->is_valuetype()->value_klass();
2135 field_offset += vk->first_field_offset();
2136 bt = vk->get_field_by_offset(field_offset, false)->layout_type();
2137 } else {
2138 bt = elemtype->array_element_basic_type();
2139 }
2140 }
2141 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2142 // Allocation initialization, ThreadLocal field access, unsafe access
2143 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2144 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2145 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2146 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2147 bt = T_OBJECT;
2148 }
2149 }
2150 }
2151 return (bt == T_OBJECT || bt == T_VALUETYPE || bt == T_NARROWOOP || bt == T_ARRAY);
2152 }
2153
2154 // Returns unique pointed java object or NULL.
2155 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2156 assert(!_collecting, "should not call when constructed graph");
2157 // If the node was created after the escape computation we can't answer.
2158 uint idx = n->_idx;
2159 if (idx >= nodes_size()) {
2160 return NULL;
2161 }
2162 PointsToNode* ptn = ptnode_adr(idx);
2163 if (ptn->is_JavaObject()) {
2164 return ptn->as_JavaObject();
2165 }
2166 assert(ptn->is_LocalVar(), "sanity");
2167 // Check all java objects it points to.
2168 JavaObjectNode* jobj = NULL;
2169 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2170 PointsToNode* e = i.get();
2171 if (e->is_JavaObject()) {
2172 if (jobj == NULL) {
2173 jobj = e->as_JavaObject();
2174 } else if (jobj != e) {
2175 return NULL;
2176 }
2273 if (i.get() == jobj)
2274 return true;
2275 }
2276 return false;
2277 }
2278 #endif
2279
2280 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2281 const Type *adr_type = phase->type(adr);
2282 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
2283 adr->in(AddPNode::Address)->is_Proj() &&
2284 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2285 // We are computing a raw address for a store captured by an Initialize
2286 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2287 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2288 assert(offs != Type::OffsetBot ||
2289 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2290 "offset must be a constant or it is initialization of array");
2291 return offs;
2292 }
2293 return adr_type->is_ptr()->flattened_offset();
2294 }
2295
2296 Node* ConnectionGraph::get_addp_base(Node *addp) {
2297 assert(addp->is_AddP(), "must be AddP");
2298 //
2299 // AddP cases for Base and Address inputs:
2300 // case #1. Direct object's field reference:
2301 // Allocate
2302 // |
2303 // Proj #5 ( oop result )
2304 // |
2305 // CheckCastPP (cast to instance type)
2306 // | |
2307 // AddP ( base == address )
2308 //
2309 // case #2. Indirect object's field reference:
2310 // Phi
2311 // |
2312 // CastPP (cast to instance type)
2313 // | |
2428 }
2429 return NULL;
2430 }
2431
2432 //
2433 // Adjust the type and inputs of an AddP which computes the
2434 // address of a field of an instance
2435 //
2436 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2437 PhaseGVN* igvn = _igvn;
2438 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2439 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2440 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2441 if (t == NULL) {
2442 // We are computing a raw address for a store captured by an Initialize
2443 // compute an appropriate address type (cases #3 and #5).
2444 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2445 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2446 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2447 assert(offs != Type::OffsetBot, "offset must be a constant");
2448 if (base_t->isa_aryptr() != NULL) {
2449 // In the case of a flattened value type array, each field has its
2450 // own slice so we need to extract the field being accessed from
2451 // the address computation
2452 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
2453 } else {
2454 t = base_t->add_offset(offs)->is_oopptr();
2455 }
2456 }
2457 int inst_id = base_t->instance_id();
2458 assert(!t->is_known_instance() || t->instance_id() == inst_id,
2459 "old type must be non-instance or match new type");
2460
2461 // The type 't' could be subclass of 'base_t'.
2462 // As result t->offset() could be large then base_t's size and it will
2463 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2464 // constructor verifies correctness of the offset.
2465 //
2466 // It could happened on subclass's branch (from the type profiling
2467 // inlining) which was not eliminated during parsing since the exactness
2468 // of the allocation type was not propagated to the subclass type check.
2469 //
2470 // Or the type 't' could be not related to 'base_t' at all.
2471 // It could happen when CHA type is different from MDO type on a dead path
2472 // (for example, from instanceof check) which is not collapsed during parsing.
2473 //
2474 // Do nothing for such AddP node and don't process its users since
2475 // this code branch will go away.
2476 //
2477 if (!t->is_known_instance() &&
2478 !base_t->klass()->is_subtype_of(t->klass())) {
2479 return false; // bail out
2480 }
2481 const TypePtr* tinst = base_t->add_offset(t->offset());
2482 if (tinst->isa_aryptr() && t->isa_aryptr()) {
2483 // In the case of a flattened value type array, each field has its
2484 // own slice so we need to keep track of the field being accessed.
2485 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
2486 }
2487
2488 // Do NOT remove the next line: ensure a new alias index is allocated
2489 // for the instance type. Note: C++ will not remove it since the call
2490 // has side effect.
2491 int alias_idx = _compile->get_alias_index(tinst);
2492 igvn->set_type(addp, tinst);
2493 // record the allocation in the node map
2494 set_map(addp, get_map(base->_idx));
2495 // Set addp's Base and Address to 'base'.
2496 Node *abase = addp->in(AddPNode::Base);
2497 Node *adr = addp->in(AddPNode::Address);
2498 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2499 adr->in(0)->_idx == (uint)inst_id) {
2500 // Skip AddP cases #3 and #5.
2501 } else {
2502 assert(!abase->is_top(), "sanity"); // AddP case #3
2503 if (abase != base) {
2504 igvn->hash_delete(addp);
2505 addp->set_req(AddPNode::Base, base);
2506 if (abase == adr) {
2507 addp->set_req(AddPNode::Address, base);
3171 igvn->hash_delete(tn);
3172 igvn->set_type(tn, tn_type);
3173 tn->set_type(tn_type);
3174 igvn->hash_insert(tn);
3175 record_for_optimizer(n);
3176 } else {
3177 assert(tn_type == TypePtr::NULL_PTR ||
3178 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
3179 "unexpected type");
3180 continue; // Skip dead path with different type
3181 }
3182 }
3183 } else {
3184 debug_only(n->dump();)
3185 assert(false, "EA: unexpected node");
3186 continue;
3187 }
3188 // push allocation's users on appropriate worklist
3189 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3190 Node *use = n->fast_out(i);
3191 if (use->is_Mem() && use->in(MemNode::Address) == n) {
3192 // Load/store to instance's field
3193 memnode_worklist.append_if_missing(use);
3194 } else if (use->is_MemBar()) {
3195 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3196 memnode_worklist.append_if_missing(use);
3197 }
3198 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3199 Node* addp2 = find_second_addp(use, n);
3200 if (addp2 != NULL) {
3201 alloc_worklist.append_if_missing(addp2);
3202 }
3203 alloc_worklist.append_if_missing(use);
3204 } else if (use->is_Phi() ||
3205 use->is_CheckCastPP() ||
3206 use->is_EncodeNarrowPtr() ||
3207 use->is_DecodeNarrowPtr() ||
3208 BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(use) ||
3209 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3210 alloc_worklist.append_if_missing(use);
3211 #ifdef ASSERT
3212 } else if (use->is_Mem()) {
3213 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3214 } else if (use->is_MergeMem()) {
3215 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3216 } else if (use->is_SafePoint()) {
3217 // Look for MergeMem nodes for calls which reference unique allocation
3218 // (through CheckCastPP nodes) even for debug info.
3219 Node* m = use->in(TypeFunc::Memory);
3220 if (m->is_MergeMem()) {
3221 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3222 }
3223 } else if (use->Opcode() == Op_EncodeISOArray) {
3224 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3225 // EncodeISOArray overwrites destination array
3226 memnode_worklist.append_if_missing(use);
3227 }
3228 } else if (use->Opcode() == Op_Return) {
3229 assert(_compile->tf()->returns_value_type_as_fields(), "must return a value type");
3230 // Get ValueKlass by removing the tag bit from the metadata pointer
3231 Node* klass = use->in(TypeFunc::Parms);
3232 intptr_t ptr = igvn->type(klass)->isa_rawptr()->get_con();
3233 clear_nth_bit(ptr, 0);
3234 assert(Metaspace::contains((void*)ptr), "should be klass");
3235 assert(((ValueKlass*)ptr)->contains_oops(), "returned value type must contain a reference field");
3236 } else {
3237 uint op = use->Opcode();
3238 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3239 (use->in(MemNode::Memory) == n)) {
3240 // They overwrite memory edge corresponding to destination array,
3241 memnode_worklist.append_if_missing(use);
3242 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3243 op == Op_CastP2X || op == Op_StoreCM ||
3244 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3245 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3246 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3247 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3248 op == Op_ValueType)) {
3249 n->dump();
3250 use->dump();
3251 assert(false, "EA: missing allocation reference path");
3252 }
3253 #endif
3254 }
3255 }
3256
3257 }
3258
3259 // Go over all ArrayCopy nodes and if one of the inputs has a unique
3260 // type, record it in the ArrayCopy node so we know what memory this
3261 // node uses/modified.
3262 for (int next = 0; next < arraycopy_worklist.length(); next++) {
3263 ArrayCopyNode* ac = arraycopy_worklist.at(next);
3264 Node* dest = ac->in(ArrayCopyNode::Dest);
3265 if (dest->is_AddP()) {
3266 dest = get_addp_base(dest);
3267 }
3268 JavaObjectNode* jobj = unique_java_object(dest);
3295 // compute new values for Memory inputs (the Memory inputs are not
3296 // actually updated until phase 4.)
3297 if (memnode_worklist.length() == 0)
3298 return; // nothing to do
3299 while (memnode_worklist.length() != 0) {
3300 Node *n = memnode_worklist.pop();
3301 if (visited.test_set(n->_idx))
3302 continue;
3303 if (n->is_Phi() || n->is_ClearArray()) {
3304 // we don't need to do anything, but the users must be pushed
3305 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3306 // we don't need to do anything, but the users must be pushed
3307 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3308 if (n == NULL)
3309 continue;
3310 } else if (n->Opcode() == Op_StrCompressedCopy ||
3311 n->Opcode() == Op_EncodeISOArray) {
3312 // get the memory projection
3313 n = n->find_out_with(Op_SCMemProj);
3314 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3315 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != NULL &&
3316 strcmp(n->as_CallLeaf()->_name, "store_unknown_value") == 0) {
3317 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
3318 } else {
3319 assert(n->is_Mem(), "memory node required.");
3320 Node *addr = n->in(MemNode::Address);
3321 const Type *addr_t = igvn->type(addr);
3322 if (addr_t == Type::TOP)
3323 continue;
3324 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3325 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3326 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3327 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3328 if (_compile->failing()) {
3329 return;
3330 }
3331 if (mem != n->in(MemNode::Memory)) {
3332 // We delay the memory edge update since we need old one in
3333 // MergeMem code below when instances memory slices are separated.
3334 set_map(n, mem);
3335 }
3336 if (n->is_Load()) {
3337 continue; // don't push users
3338 } else if (n->is_LoadStore()) {
3339 // get the memory projection
3340 n = n->find_out_with(Op_SCMemProj);
3341 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3342 }
3343 }
3344 // push user on appropriate worklist
3345 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3346 Node *use = n->fast_out(i);
3347 if (use->is_Phi() || use->is_ClearArray()) {
3348 memnode_worklist.append_if_missing(use);
3349 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3350 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3351 continue;
3352 memnode_worklist.append_if_missing(use);
3353 } else if (use->is_MemBar()) {
3354 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3355 memnode_worklist.append_if_missing(use);
3356 }
3357 #ifdef ASSERT
3358 } else if (use->is_Mem()) {
3359 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3360 } else if (use->is_MergeMem()) {
3361 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3362 } else if (use->Opcode() == Op_EncodeISOArray) {
3363 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3364 // EncodeISOArray overwrites destination array
3365 memnode_worklist.append_if_missing(use);
3366 }
3367 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != NULL &&
3368 strcmp(use->as_CallLeaf()->_name, "store_unknown_value") == 0) {
3369 // store_unknown_value overwrites destination array
3370 memnode_worklist.append_if_missing(use);
3371 } else {
3372 uint op = use->Opcode();
3373 if ((use->in(MemNode::Memory) == n) &&
3374 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3375 // They overwrite memory edge corresponding to destination array,
3376 memnode_worklist.append_if_missing(use);
3377 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3378 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3379 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3380 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3381 n->dump();
3382 use->dump();
3383 assert(false, "EA: missing memory path");
3384 }
3385 #endif
3386 }
3387 }
3388 }
3389
3390 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3391 // Walk each memory slice moving the first node encountered of each
3392 // instance type to the input corresponding to its alias index.
3393 uint length = _mergemem_worklist.length();
3394 for( uint next = 0; next < length; ++next ) {
3395 MergeMemNode* nmm = _mergemem_worklist.at(next);
3396 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3397 // Note: we don't want to use MergeMemStream here because we only want to
3398 // scan inputs which exist at the start, not ones we add during processing.
3399 // Note 2: MergeMem may already contains instance memory slices added
3400 // during find_inst_mem() call when memory nodes were processed above.
3401 igvn->hash_delete(nmm);
3402 uint nslices = MIN2(nmm->req(), new_index_start);
3403 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3404 Node* mem = nmm->in(i);
3405 Node* cur = NULL;
3406 if (mem == NULL || mem->is_top())
3407 continue;
3408 // First, update mergemem by moving memory nodes to corresponding slices
3409 // if their type became more precise since this mergemem was created.
3410 while (mem->is_Mem()) {
3411 const Type *at = igvn->type(mem->in(MemNode::Address));
3412 if (at != Type::TOP) {
3444 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3445 Node* result = step_through_mergemem(nmm, ni, tinst);
3446 if (result == nmm->base_memory()) {
3447 // Didn't find instance memory, search through general slice recursively.
3448 result = nmm->memory_at(_compile->get_general_index(ni));
3449 result = find_inst_mem(result, ni, orig_phis);
3450 if (_compile->failing()) {
3451 return;
3452 }
3453 nmm->set_memory_at(ni, result);
3454 }
3455 }
3456 igvn->hash_insert(nmm);
3457 record_for_optimizer(nmm);
3458 }
3459
3460 // Phase 4: Update the inputs of non-instance memory Phis and
3461 // the Memory input of memnodes
3462 // First update the inputs of any non-instance Phi's from
3463 // which we split out an instance Phi. Note we don't have
3464 // to recursively process Phi's encountered on the input memory
3465 // chains as is done in split_memory_phi() since they will
3466 // also be processed here.
3467 for (int j = 0; j < orig_phis.length(); j++) {
3468 PhiNode *phi = orig_phis.at(j);
3469 int alias_idx = _compile->get_alias_index(phi->adr_type());
3470 igvn->hash_delete(phi);
3471 for (uint i = 1; i < phi->req(); i++) {
3472 Node *mem = phi->in(i);
3473 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3474 if (_compile->failing()) {
3475 return;
3476 }
3477 if (mem != new_mem) {
3478 phi->set_req(i, new_mem);
3479 }
3480 }
3481 igvn->hash_insert(phi);
3482 record_for_optimizer(phi);
3483 }
3484
|