< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page
rev 54386 : 8221766: Load-reference barriers for Shenandoah


1065 }
1066 
1067 static bool merge_point_safe(Node* region) {
1068   // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1069   // having a PhiNode input. This sidesteps the dangerous case where the split
1070   // ConvI2LNode may become TOP if the input Value() does not
1071   // overlap the ConvI2L range, leaving a node which may not dominate its
1072   // uses.
1073   // A better fix for this problem can be found in the BugTraq entry, but
1074   // expediency for Mantis demands this hack.
1075   // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
1076   // split_if_with_blocks from splitting a block because we could not move around
1077   // the FastLockNode.
1078   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1079     Node* n = region->fast_out(i);
1080     if (n->is_Phi()) {
1081       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1082         Node* m = n->fast_out(j);
1083         if (m->is_FastLock())
1084           return false;
1085 #if INCLUDE_SHENANDOAHGC
1086         if (m->is_ShenandoahBarrier() && m->has_out_with(Op_FastLock)) {
1087           return false;
1088         }
1089 #endif
1090 #ifdef _LP64
1091         if (m->Opcode() == Op_ConvI2L)
1092           return false;
1093         if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
1094           return false;
1095         }
1096 #endif
1097       }
1098     }
1099   }
1100   return true;
1101 }
1102 
1103 
1104 //------------------------------place_near_use---------------------------------
1105 // Place some computation next to use but not inside inner loops.
1106 // For inner loop uses move it to the preheader area.
1107 Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
1108   IdealLoopTree *u_loop = get_loop( useblock );
1109   if (u_loop->_irreducible) {


3193   // Evacuate nodes in peel region into the not_peeled region if possible
3194   uint new_phi_cnt = 0;
3195   uint cloned_for_outside_use = 0;
3196   for (i = 0; i < peel_list.size();) {
3197     Node* n = peel_list.at(i);
3198 #if !defined(PRODUCT)
3199   if (TracePartialPeeling) n->dump();
3200 #endif
3201     bool incr = true;
3202     if ( !n->is_CFG() ) {
3203 
3204       if ( has_use_in_set(n, not_peel) ) {
3205 
3206         // If not used internal to the peeled region,
3207         // move "n" from peeled to not_peeled region.
3208 
3209         if ( !has_use_internal_to_set(n, peel, loop) ) {
3210 
3211           // if not pinned and not a load (which maybe anti-dependent on a store)
3212           // and not a CMove (Matcher expects only bool->cmove).
3213           if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove() && n->Opcode() != Op_ShenandoahWBMemProj) {
3214             cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
3215             sink_list.push(n);
3216             peel     >>= n->_idx; // delete n from peel set.
3217             not_peel <<= n->_idx; // add n to not_peel set.
3218             peel_list.remove(i);
3219             incr = false;
3220 #if !defined(PRODUCT)
3221             if (TracePartialPeeling) {
3222               tty->print_cr("sink to not_peeled region: %d newbb: %d",
3223                             n->_idx, get_ctrl(n)->_idx);
3224             }
3225 #endif
3226           }
3227         } else {
3228           // Otherwise check for special def-use cases that span
3229           // the peel/not_peel boundary such as bool->if
3230           clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist );
3231           new_phi_cnt++;
3232         }
3233       }




1065 }
1066 
1067 static bool merge_point_safe(Node* region) {
1068   // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1069   // having a PhiNode input. This sidesteps the dangerous case where the split
1070   // ConvI2LNode may become TOP if the input Value() does not
1071   // overlap the ConvI2L range, leaving a node which may not dominate its
1072   // uses.
1073   // A better fix for this problem can be found in the BugTraq entry, but
1074   // expediency for Mantis demands this hack.
1075   // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
1076   // split_if_with_blocks from splitting a block because we could not move around
1077   // the FastLockNode.
1078   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1079     Node* n = region->fast_out(i);
1080     if (n->is_Phi()) {
1081       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1082         Node* m = n->fast_out(j);
1083         if (m->is_FastLock())
1084           return false;





1085 #ifdef _LP64
1086         if (m->Opcode() == Op_ConvI2L)
1087           return false;
1088         if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
1089           return false;
1090         }
1091 #endif
1092       }
1093     }
1094   }
1095   return true;
1096 }
1097 
1098 
1099 //------------------------------place_near_use---------------------------------
1100 // Place some computation next to use but not inside inner loops.
1101 // For inner loop uses move it to the preheader area.
1102 Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
1103   IdealLoopTree *u_loop = get_loop( useblock );
1104   if (u_loop->_irreducible) {


3188   // Evacuate nodes in peel region into the not_peeled region if possible
3189   uint new_phi_cnt = 0;
3190   uint cloned_for_outside_use = 0;
3191   for (i = 0; i < peel_list.size();) {
3192     Node* n = peel_list.at(i);
3193 #if !defined(PRODUCT)
3194   if (TracePartialPeeling) n->dump();
3195 #endif
3196     bool incr = true;
3197     if ( !n->is_CFG() ) {
3198 
3199       if ( has_use_in_set(n, not_peel) ) {
3200 
3201         // If not used internal to the peeled region,
3202         // move "n" from peeled to not_peeled region.
3203 
3204         if ( !has_use_internal_to_set(n, peel, loop) ) {
3205 
3206           // if not pinned and not a load (which maybe anti-dependent on a store)
3207           // and not a CMove (Matcher expects only bool->cmove).
3208           if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
3209             cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
3210             sink_list.push(n);
3211             peel     >>= n->_idx; // delete n from peel set.
3212             not_peel <<= n->_idx; // add n to not_peel set.
3213             peel_list.remove(i);
3214             incr = false;
3215 #if !defined(PRODUCT)
3216             if (TracePartialPeeling) {
3217               tty->print_cr("sink to not_peeled region: %d newbb: %d",
3218                             n->_idx, get_ctrl(n)->_idx);
3219             }
3220 #endif
3221           }
3222         } else {
3223           // Otherwise check for special def-use cases that span
3224           // the peel/not_peel boundary such as bool->if
3225           clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist );
3226           new_phi_cnt++;
3227         }
3228       }


< prev index next >