src/share/vm/opto/output.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/output.cpp

Print this page




  99 #endif
 100     ) {
 101     // checking for _method means that OptoBreakpoint does not apply to
 102     // runtime stubs or frame converters
 103     _cfg->insert( entry, 1, new MachBreakpointNode() );
 104   }
 105 
 106   // Insert epilogs before every return
 107   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 108     Block* block = _cfg->get_block(i);
 109     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 110       Node* m = block->end();
 111       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 112         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 113         block->add_inst(epilog);
 114         _cfg->map_node_to_block(epilog, block);
 115       }
 116     }
 117   }
 118 
 119 # ifdef ENABLE_ZAP_DEAD_LOCALS
 120   if (ZapDeadCompiledLocals) {
 121     Insert_zap_nodes();
 122   }
 123 # endif
 124 
 125   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 126   blk_starts[0] = 0;
 127 
 128   // Initialize code buffer and process short branches.
 129   CodeBuffer* cb = init_buffer(blk_starts);
 130 
 131   if (cb == NULL || failing()) {
 132     return;
 133   }
 134 
 135   ScheduleAndBundle();
 136 
 137 #ifndef PRODUCT
 138   if (trace_opto_output()) {
 139     tty->print("\n---- After ScheduleAndBundle ----\n");
 140     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 141       tty->print("\nBB#%03d:\n", i);
 142       Block* block = _cfg->get_block(i);
 143       for (uint j = 0; j < block->number_of_nodes(); j++) {
 144         Node* n = block->get_node(j);


 167   // Determine if we need to generate a stack overflow check.
 168   // Do it if the method is not a stub function and
 169   // has java calls or has frame size > vm_page_size/8.
 170   // The debug VM checks that deoptimization doesn't trigger an
 171   // unexpected stack overflow (compiled method stack banging should
 172   // guarantee it doesn't happen) so we always need the stack bang in
 173   // a debug VM.
 174   return (UseStackBanging && stub_function() == NULL &&
 175           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
 176            DEBUG_ONLY(|| true)));
 177 }
 178 
 179 bool Compile::need_register_stack_bang() const {
 180   // Determine if we need to generate a register stack overflow check.
 181   // This is only used on architectures which have split register
 182   // and memory stacks (ie. IA64).
 183   // Bang if the method is not a stub function and has java calls
 184   return (stub_function() == NULL && has_java_calls());
 185 }
 186 
 187 # ifdef ENABLE_ZAP_DEAD_LOCALS
 188 
 189 
 190 // In order to catch compiler oop-map bugs, we have implemented
 191 // a debugging mode called ZapDeadCompilerLocals.
 192 // This mode causes the compiler to insert a call to a runtime routine,
 193 // "zap_dead_locals", right before each place in compiled code
 194 // that could potentially be a gc-point (i.e., a safepoint or oop map point).
 195 // The runtime routine checks that locations mapped as oops are really
 196 // oops, that locations mapped as values do not look like oops,
 197 // and that locations mapped as dead are not used later
 198 // (by zapping them to an invalid address).
 199 
 200 int Compile::_CompiledZap_count = 0;
 201 
 202 void Compile::Insert_zap_nodes() {
 203   bool skip = false;
 204 
 205 
 206   // Dink with static counts because code code without the extra
 207   // runtime calls is MUCH faster for debugging purposes
 208 
 209        if ( CompileZapFirst  ==  0  ) ; // nothing special
 210   else if ( CompileZapFirst  >  CompiledZap_count() )  skip = true;
 211   else if ( CompileZapFirst  == CompiledZap_count() )
 212     warning("starting zap compilation after skipping");
 213 
 214        if ( CompileZapLast  ==  -1  ) ; // nothing special
 215   else if ( CompileZapLast  <   CompiledZap_count() )  skip = true;
 216   else if ( CompileZapLast  ==  CompiledZap_count() )
 217     warning("about to compile last zap");
 218 
 219   ++_CompiledZap_count; // counts skipped zaps, too
 220 
 221   if ( skip )  return;
 222 
 223 
 224   if ( _method == NULL )
 225     return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
 226 
 227   // Insert call to zap runtime stub before every node with an oop map
 228   for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
 229     Block *b = _cfg->get_block(i);
 230     for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
 231       Node *n = b->get_node(j);
 232 
 233       // Determining if we should insert a zap-a-lot node in output.
 234       // We do that for all nodes that has oopmap info, except for calls
 235       // to allocation.  Calls to allocation passes in the old top-of-eden pointer
 236       // and expect the C code to reset it.  Hence, there can be no safepoints between
 237       // the inlined-allocation and the call to new_Java, etc.
 238       // We also cannot zap monitor calls, as they must hold the microlock
 239       // during the call to Zap, which also wants to grab the microlock.
 240       bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
 241       if ( insert ) { // it is MachSafePoint
 242         if ( !n->is_MachCall() ) {
 243           insert = false;
 244         } else if ( n->is_MachCall() ) {
 245           MachCallNode* call = n->as_MachCall();
 246           if (call->entry_point() == OptoRuntime::new_instance_Java() ||
 247               call->entry_point() == OptoRuntime::new_array_Java() ||
 248               call->entry_point() == OptoRuntime::multianewarray2_Java() ||
 249               call->entry_point() == OptoRuntime::multianewarray3_Java() ||
 250               call->entry_point() == OptoRuntime::multianewarray4_Java() ||
 251               call->entry_point() == OptoRuntime::multianewarray5_Java() ||
 252               call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
 253               call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
 254               ) {
 255             insert = false;
 256           }
 257         }
 258         if (insert) {
 259           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
 260           b->insert_node(zap, j);
 261           _cfg->map_node_to_block(zap, b);
 262           ++j;
 263         }
 264       }
 265     }
 266   }
 267 }
 268 
 269 
 270 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
 271   const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
 272   CallStaticJavaNode* ideal_node =
 273     new CallStaticJavaNode( tf,
 274          OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
 275                        "call zap dead locals stub", 0, TypePtr::BOTTOM);
 276   // We need to copy the OopMap from the site we're zapping at.
 277   // We have to make a copy, because the zap site might not be
 278   // a call site, and zap_dead is a call site.
 279   OopMap* clone = node_to_check->oop_map()->deep_copy();
 280 
 281   // Add the cloned OopMap to the zap node
 282   ideal_node->set_oop_map(clone);
 283   return _matcher->match_sfpt(ideal_node);
 284 }
 285 
 286 bool Compile::is_node_getting_a_safepoint( Node* n) {
 287   // This code duplicates the logic prior to the call of add_safepoint
 288   // below in this file.
 289   if( n->is_MachSafePoint() ) return true;
 290   return false;
 291 }
 292 
 293 # endif // ENABLE_ZAP_DEAD_LOCALS
 294 
 295 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
 296 // of a loop. When aligning a loop we need to provide enough instructions
 297 // in cpu's fetch buffer to feed decoders. The loop alignment could be
 298 // avoided if we have enough instructions in fetch buffer at the head of a loop.
 299 // By default, the size is set to 999999 by Block's constructor so that
 300 // a loop will be aligned if the size is not reset here.
 301 //
 302 // Note: Mach instructions could contain several HW instructions
 303 // so the size is estimated only.
 304 //
 305 void Compile::compute_loop_first_inst_sizes() {
 306   // The next condition is used to gate the loop alignment optimization.
 307   // Don't aligned a loop if there are enough instructions at the head of a loop
 308   // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
 309   // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
 310   // equal to 11 bytes which is the largest address NOP instruction.
 311   if (MaxLoopPad < OptoLoopAlignment - 1) {
 312     uint last_block = _cfg->number_of_blocks() - 1;
 313     for (uint i = 1; i <= last_block; i++) {


 817     break;
 818   default:
 819     ShouldNotReachHere();
 820     break;
 821   }
 822 }
 823 
 824 // Determine if this node starts a bundle
 825 bool Compile::starts_bundle(const Node *n) const {
 826   return (_node_bundling_limit > n->_idx &&
 827           _node_bundling_base[n->_idx].starts_bundle());
 828 }
 829 
 830 //--------------------------Process_OopMap_Node--------------------------------
 831 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 832 
 833   // Handle special safepoint nodes for synchronization
 834   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 835   MachCallNode      *mcall;
 836 
 837 #ifdef ENABLE_ZAP_DEAD_LOCALS
 838   assert( is_node_getting_a_safepoint(mach),  "logic does not match; false negative");
 839 #endif
 840 
 841   int safepoint_pc_offset = current_offset;
 842   bool is_method_handle_invoke = false;
 843   bool return_oop = false;
 844 
 845   // Add the safepoint in the DebugInfoRecorder
 846   if( !mach->is_MachCall() ) {
 847     mcall = NULL;
 848     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 849   } else {
 850     mcall = mach->as_MachCall();
 851 
 852     // Is the call a MethodHandle call?
 853     if (mcall->is_MachCallJava()) {
 854       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 855         assert(has_method_handle_invokes(), "must have been set during call generation");
 856         is_method_handle_invoke = true;
 857       }
 858     }
 859 
 860     // Check if a call returns an object.


1277     for (uint j = 0; j<last_inst; j++) {
1278 
1279       // Get the node
1280       Node* n = block->get_node(j);
1281 
1282       // See if delay slots are supported
1283       if (valid_bundle_info(n) &&
1284           node_bundling(n)->used_in_unconditional_delay()) {
1285         assert(delay_slot == NULL, "no use of delay slot node");
1286         assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
1287 
1288         delay_slot = n;
1289         continue;
1290       }
1291 
1292       // If this starts a new instruction group, then flush the current one
1293       // (but allow split bundles)
1294       if (Pipeline::requires_bundling() && starts_bundle(n))
1295         cb->flush_bundle(false);
1296 
1297       // The following logic is duplicated in the code ifdeffed for
1298       // ENABLE_ZAP_DEAD_LOCALS which appears above in this file.  It
1299       // should be factored out.  Or maybe dispersed to the nodes?
1300 
1301       // Special handling for SafePoint/Call Nodes
1302       bool is_mcall = false;
1303       if (n->is_Mach()) {
1304         MachNode *mach = n->as_Mach();
1305         is_mcall = n->is_MachCall();
1306         bool is_sfn = n->is_MachSafePoint();
1307 
1308         // If this requires all previous instructions be flushed, then do so
1309         if (is_sfn || is_mcall || mach->alignment_required() != 1) {
1310           cb->flush_bundle(true);
1311           current_offset = cb->insts_size();
1312         }
1313 
1314         // A padding may be needed again since a previous instruction
1315         // could be moved to delay slot.
1316 
1317         // align the instruction if necessary
1318         int padding = mach->compute_padding(current_offset);
1319         // Make sure safepoint node for polling is distinct from a call's
1320         // return by adding a nop if needed.


1347           mcall->method_set((intptr_t)mcall->entry_point());
1348 
1349           // Save the return address
1350           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1351 
1352           if (mcall->is_MachCallLeaf()) {
1353             is_mcall = false;
1354             is_sfn = false;
1355           }
1356         }
1357 
1358         // sfn will be valid whenever mcall is valid now because of inheritance
1359         if (is_sfn || is_mcall) {
1360 
1361           // Handle special safepoint nodes for synchronization
1362           if (!is_mcall) {
1363             MachSafePointNode *sfn = mach->as_MachSafePoint();
1364             // !!!!! Stubs only need an oopmap right now, so bail out
1365             if (sfn->jvms()->method() == NULL) {
1366               // Write the oopmap directly to the code blob??!!
1367 #             ifdef ENABLE_ZAP_DEAD_LOCALS
1368               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
1369 #             endif
1370               continue;
1371             }
1372           } // End synchronization
1373 
1374           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1375                                            current_offset);
1376           Process_OopMap_Node(mach, current_offset);
1377         } // End if safepoint
1378 
1379         // If this is a null check, then add the start of the previous instruction to the list
1380         else if( mach->is_MachNullCheck() ) {
1381           inct_starts[inct_cnt++] = previous_offset;
1382         }
1383 
1384         // If this is a branch, then fill in the label with the target BB's label
1385         else if (mach->is_MachBranch()) {
1386           // This requires the TRUE branch target be in succs[0]
1387           uint block_num = block->non_connector_successor(0)->_pre_order;
1388 
1389           // Try to replace long branch if delay slot is not used,


1537 
1538       // See if this instruction has a delay slot
1539       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1540         assert(delay_slot != NULL, "expecting delay slot node");
1541 
1542         // Back up 1 instruction
1543         cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1544 
1545         // Save the offset for the listing
1546 #ifndef PRODUCT
1547         if (node_offsets && delay_slot->_idx < node_offset_limit)
1548           node_offsets[delay_slot->_idx] = cb->insts_size();
1549 #endif
1550 
1551         // Support a SafePoint in the delay slot
1552         if (delay_slot->is_MachSafePoint()) {
1553           MachNode *mach = delay_slot->as_Mach();
1554           // !!!!! Stubs only need an oopmap right now, so bail out
1555           if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
1556             // Write the oopmap directly to the code blob??!!
1557 #           ifdef ENABLE_ZAP_DEAD_LOCALS
1558             assert( !is_node_getting_a_safepoint(mach),  "logic does not match; false positive");
1559 #           endif
1560             delay_slot = NULL;
1561             continue;
1562           }
1563 
1564           int adjusted_offset = current_offset - Pipeline::instr_unit_size();
1565           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1566                                            adjusted_offset);
1567           // Generate an OopMap entry
1568           Process_OopMap_Node(mach, adjusted_offset);
1569         }
1570 
1571         // Insert the delay slot instruction
1572         delay_slot->emit(*cb, _regalloc);
1573 
1574         // Don't reuse it
1575         delay_slot = NULL;
1576       }
1577 
1578     } // End for all instructions in block
1579 




  99 #endif
 100     ) {
 101     // checking for _method means that OptoBreakpoint does not apply to
 102     // runtime stubs or frame converters
 103     _cfg->insert( entry, 1, new MachBreakpointNode() );
 104   }
 105 
 106   // Insert epilogs before every return
 107   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 108     Block* block = _cfg->get_block(i);
 109     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 110       Node* m = block->end();
 111       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 112         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 113         block->add_inst(epilog);
 114         _cfg->map_node_to_block(epilog, block);
 115       }
 116     }
 117   }
 118 






 119   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 120   blk_starts[0] = 0;
 121 
 122   // Initialize code buffer and process short branches.
 123   CodeBuffer* cb = init_buffer(blk_starts);
 124 
 125   if (cb == NULL || failing()) {
 126     return;
 127   }
 128 
 129   ScheduleAndBundle();
 130 
 131 #ifndef PRODUCT
 132   if (trace_opto_output()) {
 133     tty->print("\n---- After ScheduleAndBundle ----\n");
 134     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 135       tty->print("\nBB#%03d:\n", i);
 136       Block* block = _cfg->get_block(i);
 137       for (uint j = 0; j < block->number_of_nodes(); j++) {
 138         Node* n = block->get_node(j);


 161   // Determine if we need to generate a stack overflow check.
 162   // Do it if the method is not a stub function and
 163   // has java calls or has frame size > vm_page_size/8.
 164   // The debug VM checks that deoptimization doesn't trigger an
 165   // unexpected stack overflow (compiled method stack banging should
 166   // guarantee it doesn't happen) so we always need the stack bang in
 167   // a debug VM.
 168   return (UseStackBanging && stub_function() == NULL &&
 169           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
 170            DEBUG_ONLY(|| true)));
 171 }
 172 
 173 bool Compile::need_register_stack_bang() const {
 174   // Determine if we need to generate a register stack overflow check.
 175   // This is only used on architectures which have split register
 176   // and memory stacks (ie. IA64).
 177   // Bang if the method is not a stub function and has java calls
 178   return (stub_function() == NULL && has_java_calls());
 179 }
 180 











































































































 181 
 182 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
 183 // of a loop. When aligning a loop we need to provide enough instructions
 184 // in cpu's fetch buffer to feed decoders. The loop alignment could be
 185 // avoided if we have enough instructions in fetch buffer at the head of a loop.
 186 // By default, the size is set to 999999 by Block's constructor so that
 187 // a loop will be aligned if the size is not reset here.
 188 //
 189 // Note: Mach instructions could contain several HW instructions
 190 // so the size is estimated only.
 191 //
 192 void Compile::compute_loop_first_inst_sizes() {
 193   // The next condition is used to gate the loop alignment optimization.
 194   // Don't aligned a loop if there are enough instructions at the head of a loop
 195   // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
 196   // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
 197   // equal to 11 bytes which is the largest address NOP instruction.
 198   if (MaxLoopPad < OptoLoopAlignment - 1) {
 199     uint last_block = _cfg->number_of_blocks() - 1;
 200     for (uint i = 1; i <= last_block; i++) {


 704     break;
 705   default:
 706     ShouldNotReachHere();
 707     break;
 708   }
 709 }
 710 
 711 // Determine if this node starts a bundle
 712 bool Compile::starts_bundle(const Node *n) const {
 713   return (_node_bundling_limit > n->_idx &&
 714           _node_bundling_base[n->_idx].starts_bundle());
 715 }
 716 
 717 //--------------------------Process_OopMap_Node--------------------------------
 718 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 719 
 720   // Handle special safepoint nodes for synchronization
 721   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 722   MachCallNode      *mcall;
 723 




 724   int safepoint_pc_offset = current_offset;
 725   bool is_method_handle_invoke = false;
 726   bool return_oop = false;
 727 
 728   // Add the safepoint in the DebugInfoRecorder
 729   if( !mach->is_MachCall() ) {
 730     mcall = NULL;
 731     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 732   } else {
 733     mcall = mach->as_MachCall();
 734 
 735     // Is the call a MethodHandle call?
 736     if (mcall->is_MachCallJava()) {
 737       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 738         assert(has_method_handle_invokes(), "must have been set during call generation");
 739         is_method_handle_invoke = true;
 740       }
 741     }
 742 
 743     // Check if a call returns an object.


1160     for (uint j = 0; j<last_inst; j++) {
1161 
1162       // Get the node
1163       Node* n = block->get_node(j);
1164 
1165       // See if delay slots are supported
1166       if (valid_bundle_info(n) &&
1167           node_bundling(n)->used_in_unconditional_delay()) {
1168         assert(delay_slot == NULL, "no use of delay slot node");
1169         assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
1170 
1171         delay_slot = n;
1172         continue;
1173       }
1174 
1175       // If this starts a new instruction group, then flush the current one
1176       // (but allow split bundles)
1177       if (Pipeline::requires_bundling() && starts_bundle(n))
1178         cb->flush_bundle(false);
1179 




1180       // Special handling for SafePoint/Call Nodes
1181       bool is_mcall = false;
1182       if (n->is_Mach()) {
1183         MachNode *mach = n->as_Mach();
1184         is_mcall = n->is_MachCall();
1185         bool is_sfn = n->is_MachSafePoint();
1186 
1187         // If this requires all previous instructions be flushed, then do so
1188         if (is_sfn || is_mcall || mach->alignment_required() != 1) {
1189           cb->flush_bundle(true);
1190           current_offset = cb->insts_size();
1191         }
1192 
1193         // A padding may be needed again since a previous instruction
1194         // could be moved to delay slot.
1195 
1196         // align the instruction if necessary
1197         int padding = mach->compute_padding(current_offset);
1198         // Make sure safepoint node for polling is distinct from a call's
1199         // return by adding a nop if needed.


1226           mcall->method_set((intptr_t)mcall->entry_point());
1227 
1228           // Save the return address
1229           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1230 
1231           if (mcall->is_MachCallLeaf()) {
1232             is_mcall = false;
1233             is_sfn = false;
1234           }
1235         }
1236 
1237         // sfn will be valid whenever mcall is valid now because of inheritance
1238         if (is_sfn || is_mcall) {
1239 
1240           // Handle special safepoint nodes for synchronization
1241           if (!is_mcall) {
1242             MachSafePointNode *sfn = mach->as_MachSafePoint();
1243             // !!!!! Stubs only need an oopmap right now, so bail out
1244             if (sfn->jvms()->method() == NULL) {
1245               // Write the oopmap directly to the code blob??!!



1246               continue;
1247             }
1248           } // End synchronization
1249 
1250           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1251                                            current_offset);
1252           Process_OopMap_Node(mach, current_offset);
1253         } // End if safepoint
1254 
1255         // If this is a null check, then add the start of the previous instruction to the list
1256         else if( mach->is_MachNullCheck() ) {
1257           inct_starts[inct_cnt++] = previous_offset;
1258         }
1259 
1260         // If this is a branch, then fill in the label with the target BB's label
1261         else if (mach->is_MachBranch()) {
1262           // This requires the TRUE branch target be in succs[0]
1263           uint block_num = block->non_connector_successor(0)->_pre_order;
1264 
1265           // Try to replace long branch if delay slot is not used,


1413 
1414       // See if this instruction has a delay slot
1415       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1416         assert(delay_slot != NULL, "expecting delay slot node");
1417 
1418         // Back up 1 instruction
1419         cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1420 
1421         // Save the offset for the listing
1422 #ifndef PRODUCT
1423         if (node_offsets && delay_slot->_idx < node_offset_limit)
1424           node_offsets[delay_slot->_idx] = cb->insts_size();
1425 #endif
1426 
1427         // Support a SafePoint in the delay slot
1428         if (delay_slot->is_MachSafePoint()) {
1429           MachNode *mach = delay_slot->as_Mach();
1430           // !!!!! Stubs only need an oopmap right now, so bail out
1431           if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
1432             // Write the oopmap directly to the code blob??!!



1433             delay_slot = NULL;
1434             continue;
1435           }
1436 
1437           int adjusted_offset = current_offset - Pipeline::instr_unit_size();
1438           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1439                                            adjusted_offset);
1440           // Generate an OopMap entry
1441           Process_OopMap_Node(mach, adjusted_offset);
1442         }
1443 
1444         // Insert the delay slot instruction
1445         delay_slot->emit(*cb, _regalloc);
1446 
1447         // Don't reuse it
1448         delay_slot = NULL;
1449       }
1450 
1451     } // End for all instructions in block
1452 


src/share/vm/opto/output.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File