543 break; // G1 pre barrier previous oop value store.
544 }
545 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
546 PtrQueue::byte_offset_of_buf())) {
547 break; // G1 post barrier card address store.
548 }
549 }
550 }
551 }
552 delayed_worklist->push(n); // Process unsafe access later.
553 break;
554 }
555 #ifdef ASSERT
556 n->dump(1);
557 assert(false, "not unsafe or G1 barrier raw StoreP");
558 #endif
559 }
560 break;
561 }
562 case Op_AryEq:
563 case Op_StrComp:
564 case Op_StrEquals:
565 case Op_StrIndexOf:
566 case Op_EncodeISOArray: {
567 add_local_var(n, PointsToNode::ArgEscape);
568 delayed_worklist->push(n); // Process it later.
569 break;
570 }
571 case Op_ThreadLocal: {
572 add_java_object(n, PointsToNode::ArgEscape);
573 break;
574 }
575 default:
576 ; // Do nothing for nodes not related to EA.
577 }
578 return;
579 }
580
581 #ifdef ASSERT
582 #define ELSE_FAIL(name) \
583 /* Should not be called for not pointer type. */ \
584 n->dump(1); \
585 assert(false, name); \
726 add_edge(adr_ptn, ptn);
727 break;
728 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
729 // Stored value escapes in unsafe access.
730 Node *val = n->in(MemNode::ValueIn);
731 PointsToNode* ptn = ptnode_adr(val->_idx);
732 assert(ptn != NULL, "node should be registered");
733 set_escape_state(ptn, PointsToNode::GlobalEscape);
734 // Add edge to object for unsafe access with offset.
735 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
736 assert(adr_ptn != NULL, "node should be registered");
737 if (adr_ptn->is_Field()) {
738 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
739 add_edge(adr_ptn, ptn);
740 }
741 break;
742 }
743 ELSE_FAIL("Op_StoreP");
744 }
745 case Op_AryEq:
746 case Op_StrComp:
747 case Op_StrEquals:
748 case Op_StrIndexOf:
749 case Op_EncodeISOArray: {
750 // char[] arrays passed to string intrinsic do not escape but
751 // they are not scalar replaceable. Adjust escape state for them.
752 // Start from in(2) edge since in(1) is memory edge.
753 for (uint i = 2; i < n->req(); i++) {
754 Node* adr = n->in(i);
755 const Type* at = _igvn->type(adr);
756 if (!adr->is_top() && at->isa_ptr()) {
757 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
758 at->isa_ptr() != NULL, "expecting a pointer");
759 if (adr->is_AddP()) {
760 adr = get_addp_base(adr);
761 }
762 PointsToNode* ptn = ptnode_adr(adr->_idx);
763 assert(ptn != NULL, "node should be registered");
764 add_edge(n_ptn, ptn);
765 }
766 }
767 break;
768 }
769 default: {
770 // This method should be called only for EA specific nodes which may
2705 Node *un = result->as_Phi()->unique_input(igvn);
2706 if (un != NULL) {
2707 orig_phis.append_if_missing(result->as_Phi());
2708 result = un;
2709 } else {
2710 break;
2711 }
2712 } else if (result->is_ClearArray()) {
2713 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2714 // Can not bypass initialization of the instance
2715 // we are looking for.
2716 break;
2717 }
2718 // Otherwise skip it (the call updated 'result' value).
2719 } else if (result->Opcode() == Op_SCMemProj) {
2720 Node* mem = result->in(0);
2721 Node* adr = NULL;
2722 if (mem->is_LoadStore()) {
2723 adr = mem->in(MemNode::Address);
2724 } else {
2725 assert(mem->Opcode() == Op_EncodeISOArray, "sanity");
2726 adr = mem->in(3); // Memory edge corresponds to destination array
2727 }
2728 const Type *at = igvn->type(adr);
2729 if (at != Type::TOP) {
2730 assert (at->isa_ptr() != NULL, "pointer type required.");
2731 int idx = C->get_alias_index(at->is_ptr());
2732 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
2733 break;
2734 }
2735 result = mem->in(MemNode::Memory);
2736 }
2737 }
2738 if (result->is_Phi()) {
2739 PhiNode *mphi = result->as_Phi();
2740 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2741 const TypePtr *t = mphi->adr_type();
2742 if (!is_instance) {
2743 // Push all non-instance Phis on the orig_phis worklist to update inputs
2744 // during Phase 4 if needed.
2745 orig_phis.append_if_missing(mphi);
2746 } else if (C->get_alias_index(t) != alias_idx) {
2747 // Create a new Phi with the specified alias index type.
2748 result = split_memory_phi(mphi, alias_idx, orig_phis);
2749 }
2750 }
2751 // the result is either MemNode, PhiNode, InitializeNode.
2752 return result;
2753 }
2754
2755 //
3079 alloc_worklist.append_if_missing(use);
3080 #ifdef ASSERT
3081 } else if (use->is_Mem()) {
3082 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3083 } else if (use->is_MergeMem()) {
3084 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3085 } else if (use->is_SafePoint()) {
3086 // Look for MergeMem nodes for calls which reference unique allocation
3087 // (through CheckCastPP nodes) even for debug info.
3088 Node* m = use->in(TypeFunc::Memory);
3089 if (m->is_MergeMem()) {
3090 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3091 }
3092 } else if (use->Opcode() == Op_EncodeISOArray) {
3093 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3094 // EncodeISOArray overwrites destination array
3095 memnode_worklist.append_if_missing(use);
3096 }
3097 } else {
3098 uint op = use->Opcode();
3099 if (!(op == Op_CmpP || op == Op_Conv2B ||
3100 op == Op_CastP2X || op == Op_StoreCM ||
3101 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3102 op == Op_StrEquals || op == Op_StrIndexOf)) {
3103 n->dump();
3104 use->dump();
3105 assert(false, "EA: missing allocation reference path");
3106 }
3107 #endif
3108 }
3109 }
3110
3111 }
3112
3113 // Go over all ArrayCopy nodes and if one of the inputs has a unique
3114 // type, record it in the ArrayCopy node so we know what memory this
3115 // node uses/modified.
3116 for (int next = 0; next < arraycopy_worklist.length(); next++) {
3117 ArrayCopyNode* ac = arraycopy_worklist.at(next);
3118 Node* dest = ac->in(ArrayCopyNode::Dest);
3119 if (dest->is_AddP()) {
3120 dest = get_addp_base(dest);
3121 }
3122 JavaObjectNode* jobj = unique_java_object(dest);
3144 // New alias types were created in split_AddP().
3145 uint new_index_end = (uint) _compile->num_alias_types();
3146 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3147
3148 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3149 // compute new values for Memory inputs (the Memory inputs are not
3150 // actually updated until phase 4.)
3151 if (memnode_worklist.length() == 0)
3152 return; // nothing to do
3153 while (memnode_worklist.length() != 0) {
3154 Node *n = memnode_worklist.pop();
3155 if (visited.test_set(n->_idx))
3156 continue;
3157 if (n->is_Phi() || n->is_ClearArray()) {
3158 // we don't need to do anything, but the users must be pushed
3159 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3160 // we don't need to do anything, but the users must be pushed
3161 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
3162 if (n == NULL)
3163 continue;
3164 } else if (n->Opcode() == Op_EncodeISOArray) {
3165 // get the memory projection
3166 n = n->find_out_with(Op_SCMemProj);
3167 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3168 } else {
3169 assert(n->is_Mem(), "memory node required.");
3170 Node *addr = n->in(MemNode::Address);
3171 const Type *addr_t = igvn->type(addr);
3172 if (addr_t == Type::TOP)
3173 continue;
3174 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3175 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3176 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3177 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3178 if (_compile->failing()) {
3179 return;
3180 }
3181 if (mem != n->in(MemNode::Memory)) {
3182 // We delay the memory edge update since we need old one in
3183 // MergeMem code below when instances memory slices are separated.
3184 set_map(n, mem);
3199 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3200 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3201 continue;
3202 memnode_worklist.append_if_missing(use);
3203 } else if (use->is_MemBar()) {
3204 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3205 memnode_worklist.append_if_missing(use);
3206 }
3207 #ifdef ASSERT
3208 } else if(use->is_Mem()) {
3209 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3210 } else if (use->is_MergeMem()) {
3211 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3212 } else if (use->Opcode() == Op_EncodeISOArray) {
3213 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3214 // EncodeISOArray overwrites destination array
3215 memnode_worklist.append_if_missing(use);
3216 }
3217 } else {
3218 uint op = use->Opcode();
3219 if (!(op == Op_StoreCM ||
3220 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
3221 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
3222 op == Op_AryEq || op == Op_StrComp ||
3223 op == Op_StrEquals || op == Op_StrIndexOf)) {
3224 n->dump();
3225 use->dump();
3226 assert(false, "EA: missing memory path");
3227 }
3228 #endif
3229 }
3230 }
3231 }
3232
3233 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3234 // Walk each memory slice moving the first node encountered of each
3235 // instance type to the the input corresponding to its alias index.
3236 uint length = _mergemem_worklist.length();
3237 for( uint next = 0; next < length; ++next ) {
3238 MergeMemNode* nmm = _mergemem_worklist.at(next);
3239 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3240 // Note: we don't want to use MergeMemStream here because we only want to
3241 // scan inputs which exist at the start, not ones we add during processing.
3242 // Note 2: MergeMem may already contains instance memory slices added
3243 // during find_inst_mem() call when memory nodes were processed above.
|
543 break; // G1 pre barrier previous oop value store.
544 }
545 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
546 PtrQueue::byte_offset_of_buf())) {
547 break; // G1 post barrier card address store.
548 }
549 }
550 }
551 }
552 delayed_worklist->push(n); // Process unsafe access later.
553 break;
554 }
555 #ifdef ASSERT
556 n->dump(1);
557 assert(false, "not unsafe or G1 barrier raw StoreP");
558 #endif
559 }
560 break;
561 }
562 case Op_AryEq:
563 case Op_HasNegatives:
564 case Op_StrComp:
565 case Op_StrEquals:
566 case Op_StrIndexOf:
567 case Op_StrIndexOfChar:
568 case Op_StrInflatedCopy:
569 case Op_StrCompressedCopy:
570 case Op_EncodeISOArray: {
571 add_local_var(n, PointsToNode::ArgEscape);
572 delayed_worklist->push(n); // Process it later.
573 break;
574 }
575 case Op_ThreadLocal: {
576 add_java_object(n, PointsToNode::ArgEscape);
577 break;
578 }
579 default:
580 ; // Do nothing for nodes not related to EA.
581 }
582 return;
583 }
584
585 #ifdef ASSERT
586 #define ELSE_FAIL(name) \
587 /* Should not be called for not pointer type. */ \
588 n->dump(1); \
589 assert(false, name); \
730 add_edge(adr_ptn, ptn);
731 break;
732 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
733 // Stored value escapes in unsafe access.
734 Node *val = n->in(MemNode::ValueIn);
735 PointsToNode* ptn = ptnode_adr(val->_idx);
736 assert(ptn != NULL, "node should be registered");
737 set_escape_state(ptn, PointsToNode::GlobalEscape);
738 // Add edge to object for unsafe access with offset.
739 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
740 assert(adr_ptn != NULL, "node should be registered");
741 if (adr_ptn->is_Field()) {
742 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
743 add_edge(adr_ptn, ptn);
744 }
745 break;
746 }
747 ELSE_FAIL("Op_StoreP");
748 }
749 case Op_AryEq:
750 case Op_HasNegatives:
751 case Op_StrComp:
752 case Op_StrEquals:
753 case Op_StrIndexOf:
754 case Op_StrIndexOfChar:
755 case Op_StrInflatedCopy:
756 case Op_StrCompressedCopy:
757 case Op_EncodeISOArray: {
758 // char[]/byte[] arrays passed to string intrinsic do not escape but
759 // they are not scalar replaceable. Adjust escape state for them.
760 // Start from in(2) edge since in(1) is memory edge.
761 for (uint i = 2; i < n->req(); i++) {
762 Node* adr = n->in(i);
763 const Type* at = _igvn->type(adr);
764 if (!adr->is_top() && at->isa_ptr()) {
765 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
766 at->isa_ptr() != NULL, "expecting a pointer");
767 if (adr->is_AddP()) {
768 adr = get_addp_base(adr);
769 }
770 PointsToNode* ptn = ptnode_adr(adr->_idx);
771 assert(ptn != NULL, "node should be registered");
772 add_edge(n_ptn, ptn);
773 }
774 }
775 break;
776 }
777 default: {
778 // This method should be called only for EA specific nodes which may
2713 Node *un = result->as_Phi()->unique_input(igvn);
2714 if (un != NULL) {
2715 orig_phis.append_if_missing(result->as_Phi());
2716 result = un;
2717 } else {
2718 break;
2719 }
2720 } else if (result->is_ClearArray()) {
2721 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2722 // Can not bypass initialization of the instance
2723 // we are looking for.
2724 break;
2725 }
2726 // Otherwise skip it (the call updated 'result' value).
2727 } else if (result->Opcode() == Op_SCMemProj) {
2728 Node* mem = result->in(0);
2729 Node* adr = NULL;
2730 if (mem->is_LoadStore()) {
2731 adr = mem->in(MemNode::Address);
2732 } else {
2733 assert(mem->Opcode() == Op_EncodeISOArray ||
2734 mem->Opcode() == Op_StrCompressedCopy, "sanity");
2735 adr = mem->in(3); // Memory edge corresponds to destination array
2736 }
2737 const Type *at = igvn->type(adr);
2738 if (at != Type::TOP) {
2739 assert(at->isa_ptr() != NULL, "pointer type required.");
2740 int idx = C->get_alias_index(at->is_ptr());
2741 if (idx == alias_idx) {
2742 // Assert in debug mode
2743 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
2744 break; // In product mode return SCMemProj node
2745 }
2746 }
2747 result = mem->in(MemNode::Memory);
2748 } else if (result->Opcode() == Op_StrInflatedCopy) {
2749 Node* adr = result->in(3); // Memory edge corresponds to destination array
2750 const Type *at = igvn->type(adr);
2751 if (at != Type::TOP) {
2752 assert(at->isa_ptr() != NULL, "pointer type required.");
2753 int idx = C->get_alias_index(at->is_ptr());
2754 if (idx == alias_idx) {
2755 // Assert in debug mode
2756 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
2757 break; // In product mode return SCMemProj node
2758 }
2759 }
2760 result = result->in(MemNode::Memory);
2761 }
2762 }
2763 if (result->is_Phi()) {
2764 PhiNode *mphi = result->as_Phi();
2765 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2766 const TypePtr *t = mphi->adr_type();
2767 if (!is_instance) {
2768 // Push all non-instance Phis on the orig_phis worklist to update inputs
2769 // during Phase 4 if needed.
2770 orig_phis.append_if_missing(mphi);
2771 } else if (C->get_alias_index(t) != alias_idx) {
2772 // Create a new Phi with the specified alias index type.
2773 result = split_memory_phi(mphi, alias_idx, orig_phis);
2774 }
2775 }
2776 // the result is either MemNode, PhiNode, InitializeNode.
2777 return result;
2778 }
2779
2780 //
3104 alloc_worklist.append_if_missing(use);
3105 #ifdef ASSERT
3106 } else if (use->is_Mem()) {
3107 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3108 } else if (use->is_MergeMem()) {
3109 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3110 } else if (use->is_SafePoint()) {
3111 // Look for MergeMem nodes for calls which reference unique allocation
3112 // (through CheckCastPP nodes) even for debug info.
3113 Node* m = use->in(TypeFunc::Memory);
3114 if (m->is_MergeMem()) {
3115 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3116 }
3117 } else if (use->Opcode() == Op_EncodeISOArray) {
3118 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3119 // EncodeISOArray overwrites destination array
3120 memnode_worklist.append_if_missing(use);
3121 }
3122 } else {
3123 uint op = use->Opcode();
3124 if ((use->in(MemNode::Memory) == n) &&
3125 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3126 // They overwrite memory edge corresponding to destination array,
3127 memnode_worklist.append_if_missing(use);
3128 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3129 op == Op_CastP2X || op == Op_StoreCM ||
3130 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3131 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3132 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3133 n->dump();
3134 use->dump();
3135 assert(false, "EA: missing allocation reference path");
3136 }
3137 #endif
3138 }
3139 }
3140
3141 }
3142
3143 // Go over all ArrayCopy nodes and if one of the inputs has a unique
3144 // type, record it in the ArrayCopy node so we know what memory this
3145 // node uses/modified.
3146 for (int next = 0; next < arraycopy_worklist.length(); next++) {
3147 ArrayCopyNode* ac = arraycopy_worklist.at(next);
3148 Node* dest = ac->in(ArrayCopyNode::Dest);
3149 if (dest->is_AddP()) {
3150 dest = get_addp_base(dest);
3151 }
3152 JavaObjectNode* jobj = unique_java_object(dest);
3174 // New alias types were created in split_AddP().
3175 uint new_index_end = (uint) _compile->num_alias_types();
3176 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3177
3178 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3179 // compute new values for Memory inputs (the Memory inputs are not
3180 // actually updated until phase 4.)
3181 if (memnode_worklist.length() == 0)
3182 return; // nothing to do
3183 while (memnode_worklist.length() != 0) {
3184 Node *n = memnode_worklist.pop();
3185 if (visited.test_set(n->_idx))
3186 continue;
3187 if (n->is_Phi() || n->is_ClearArray()) {
3188 // we don't need to do anything, but the users must be pushed
3189 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3190 // we don't need to do anything, but the users must be pushed
3191 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
3192 if (n == NULL)
3193 continue;
3194 } else if (n->Opcode() == Op_StrCompressedCopy ||
3195 n->Opcode() == Op_EncodeISOArray) {
3196 // get the memory projection
3197 n = n->find_out_with(Op_SCMemProj);
3198 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3199 } else {
3200 assert(n->is_Mem(), "memory node required.");
3201 Node *addr = n->in(MemNode::Address);
3202 const Type *addr_t = igvn->type(addr);
3203 if (addr_t == Type::TOP)
3204 continue;
3205 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3206 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3207 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3208 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3209 if (_compile->failing()) {
3210 return;
3211 }
3212 if (mem != n->in(MemNode::Memory)) {
3213 // We delay the memory edge update since we need old one in
3214 // MergeMem code below when instances memory slices are separated.
3215 set_map(n, mem);
3230 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3231 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3232 continue;
3233 memnode_worklist.append_if_missing(use);
3234 } else if (use->is_MemBar()) {
3235 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3236 memnode_worklist.append_if_missing(use);
3237 }
3238 #ifdef ASSERT
3239 } else if(use->is_Mem()) {
3240 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3241 } else if (use->is_MergeMem()) {
3242 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3243 } else if (use->Opcode() == Op_EncodeISOArray) {
3244 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3245 // EncodeISOArray overwrites destination array
3246 memnode_worklist.append_if_missing(use);
3247 }
3248 } else {
3249 uint op = use->Opcode();
3250 if ((use->in(MemNode::Memory) == n) &&
3251 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3252 // They overwrite memory edge corresponding to destination array,
3253 memnode_worklist.append_if_missing(use);
3254 } else if (!(op == Op_StoreCM ||
3255 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
3256 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
3257 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3258 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3259 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3260 n->dump();
3261 use->dump();
3262 assert(false, "EA: missing memory path");
3263 }
3264 #endif
3265 }
3266 }
3267 }
3268
3269 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3270 // Walk each memory slice moving the first node encountered of each
3271 // instance type to the the input corresponding to its alias index.
3272 uint length = _mergemem_worklist.length();
3273 for( uint next = 0; next < length; ++next ) {
3274 MergeMemNode* nmm = _mergemem_worklist.at(next);
3275 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3276 // Note: we don't want to use MergeMemStream here because we only want to
3277 // scan inputs which exist at the start, not ones we add during processing.
3278 // Note 2: MergeMem may already contains instance memory slices added
3279 // during find_inst_mem() call when memory nodes were processed above.
|