1 /*
   2  * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/locknode.hpp"
  36 #include "opto/machnode.hpp"
  37 #include "opto/output.hpp"
  38 #include "opto/regalloc.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "opto/type.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "utilities/xmlstream.hpp"
  44 
  45 extern uint size_exception_handler();
  46 extern uint size_deopt_handler();
  47 
  48 #ifndef PRODUCT
  49 #define DEBUG_ARG(x) , x
  50 #else
  51 #define DEBUG_ARG(x)
  52 #endif
  53 
  54 extern int emit_exception_handler(CodeBuffer &cbuf);
  55 extern int emit_deopt_handler(CodeBuffer &cbuf);
  56 
  57 // Convert Nodes to instruction bits and pass off to the VM
  58 void Compile::Output() {
  59   // RootNode goes
  60   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  61 
  62   // The number of new nodes (mostly MachNop) is proportional to
  63   // the number of java calls and inner loops which are aligned.
  64   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  65                             C->inner_loops()*(OptoLoopAlignment-1)),
  66                            "out of nodes before code generation" ) ) {
  67     return;
  68   }
  69   // Make sure I can find the Start Node
  70   Block *entry = _cfg->get_block(1);
  71   Block *broot = _cfg->get_root_block();
  72 
  73   const StartNode *start = entry->head()->as_Start();
  74 
  75   // Replace StartNode with prolog
  76   MachPrologNode *prolog = new (this) MachPrologNode();
  77   entry->map_node(prolog, 0);
  78   _cfg->map_node_to_block(prolog, entry);
  79   _cfg->unmap_node_from_block(start); // start is no longer in any block
  80 
  81   // Virtual methods need an unverified entry point
  82 
  83   if( is_osr_compilation() ) {
  84     if( PoisonOSREntry ) {
  85       // TODO: Should use a ShouldNotReachHereNode...
  86       _cfg->insert( broot, 0, new (this) MachBreakpointNode() );
  87     }
  88   } else {
  89     if( _method && !_method->flags().is_static() ) {
  90       // Insert unvalidated entry point
  91       _cfg->insert( broot, 0, new (this) MachUEPNode() );
  92     }
  93 
  94   }
  95 
  96 
  97   // Break before main entry point
  98   if( (_method && _method->break_at_execute())
  99 #ifndef PRODUCT
 100     ||(OptoBreakpoint && is_method_compilation())
 101     ||(OptoBreakpointOSR && is_osr_compilation())
 102     ||(OptoBreakpointC2R && !_method)
 103 #endif
 104     ) {
 105     // checking for _method means that OptoBreakpoint does not apply to
 106     // runtime stubs or frame converters
 107     _cfg->insert( entry, 1, new (this) MachBreakpointNode() );
 108   }
 109 
 110   // Insert epilogs before every return
 111   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 112     Block* block = _cfg->get_block(i);
 113     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 114       Node* m = block->end();
 115       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 116         MachEpilogNode* epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 117         block->add_inst(epilog);
 118         _cfg->map_node_to_block(epilog, block);
 119       }
 120     }
 121   }
 122 
 123 # ifdef ENABLE_ZAP_DEAD_LOCALS
 124   if (ZapDeadCompiledLocals) {
 125     Insert_zap_nodes();
 126   }
 127 # endif
 128 
 129   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 130   blk_starts[0] = 0;
 131 
 132   // Initialize code buffer and process short branches.
 133   CodeBuffer* cb = init_buffer(blk_starts);
 134 
 135   if (cb == NULL || failing()) {
 136     return;
 137   }
 138 
 139   ScheduleAndBundle();
 140 
 141 #ifndef PRODUCT
 142   if (trace_opto_output()) {
 143     tty->print("\n---- After ScheduleAndBundle ----\n");
 144     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 145       tty->print("\nBB#%03d:\n", i);
 146       Block* block = _cfg->get_block(i);
 147       for (uint j = 0; j < block->number_of_nodes(); j++) {
 148         Node* n = block->get_node(j);
 149         OptoReg::Name reg = _regalloc->get_reg_first(n);
 150         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 151         n->dump();
 152       }
 153     }
 154   }
 155 #endif
 156 
 157   if (failing()) {
 158     return;
 159   }
 160 
 161   BuildOopMaps();
 162 
 163   if (failing())  {
 164     return;
 165   }
 166 
 167   fill_buffer(cb, blk_starts);
 168 }
 169 
 170 bool Compile::need_stack_bang(int frame_size_in_bytes) const {
 171   // Determine if we need to generate a stack overflow check.
 172   // Do it if the method is not a stub function and
 173   // has java calls or has frame size > vm_page_size/8.
 174   return (UseStackBanging && stub_function() == NULL &&
 175           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
 176 }
 177 
 178 bool Compile::need_register_stack_bang() const {
 179   // Determine if we need to generate a register stack overflow check.
 180   // This is only used on architectures which have split register
 181   // and memory stacks (ie. IA64).
 182   // Bang if the method is not a stub function and has java calls
 183   return (stub_function() == NULL && has_java_calls());
 184 }
 185 
 186 # ifdef ENABLE_ZAP_DEAD_LOCALS
 187 
 188 
 189 // In order to catch compiler oop-map bugs, we have implemented
 190 // a debugging mode called ZapDeadCompilerLocals.
 191 // This mode causes the compiler to insert a call to a runtime routine,
 192 // "zap_dead_locals", right before each place in compiled code
 193 // that could potentially be a gc-point (i.e., a safepoint or oop map point).
 194 // The runtime routine checks that locations mapped as oops are really
 195 // oops, that locations mapped as values do not look like oops,
 196 // and that locations mapped as dead are not used later
 197 // (by zapping them to an invalid address).
 198 
 199 int Compile::_CompiledZap_count = 0;
 200 
 201 void Compile::Insert_zap_nodes() {
 202   bool skip = false;
 203 
 204 
 205   // Dink with static counts because code code without the extra
 206   // runtime calls is MUCH faster for debugging purposes
 207 
 208        if ( CompileZapFirst  ==  0  ) ; // nothing special
 209   else if ( CompileZapFirst  >  CompiledZap_count() )  skip = true;
 210   else if ( CompileZapFirst  == CompiledZap_count() )
 211     warning("starting zap compilation after skipping");
 212 
 213        if ( CompileZapLast  ==  -1  ) ; // nothing special
 214   else if ( CompileZapLast  <   CompiledZap_count() )  skip = true;
 215   else if ( CompileZapLast  ==  CompiledZap_count() )
 216     warning("about to compile last zap");
 217 
 218   ++_CompiledZap_count; // counts skipped zaps, too
 219 
 220   if ( skip )  return;
 221 
 222 
 223   if ( _method == NULL )
 224     return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
 225 
 226   // Insert call to zap runtime stub before every node with an oop map
 227   for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
 228     Block *b = _cfg->get_block(i);
 229     for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
 230       Node *n = b->get_node(j);
 231 
 232       // Determining if we should insert a zap-a-lot node in output.
 233       // We do that for all nodes that has oopmap info, except for calls
 234       // to allocation.  Calls to allocation passes in the old top-of-eden pointer
 235       // and expect the C code to reset it.  Hence, there can be no safepoints between
 236       // the inlined-allocation and the call to new_Java, etc.
 237       // We also cannot zap monitor calls, as they must hold the microlock
 238       // during the call to Zap, which also wants to grab the microlock.
 239       bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
 240       if ( insert ) { // it is MachSafePoint
 241         if ( !n->is_MachCall() ) {
 242           insert = false;
 243         } else if ( n->is_MachCall() ) {
 244           MachCallNode* call = n->as_MachCall();
 245           if (call->entry_point() == OptoRuntime::new_instance_Java() ||
 246               call->entry_point() == OptoRuntime::new_array_Java() ||
 247               call->entry_point() == OptoRuntime::multianewarray2_Java() ||
 248               call->entry_point() == OptoRuntime::multianewarray3_Java() ||
 249               call->entry_point() == OptoRuntime::multianewarray4_Java() ||
 250               call->entry_point() == OptoRuntime::multianewarray5_Java() ||
 251               call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
 252               call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
 253               ) {
 254             insert = false;
 255           }
 256         }
 257         if (insert) {
 258           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
 259           b->insert_node(zap, j);
 260           _cfg->map_node_to_block(zap, b);
 261           ++j;
 262         }
 263       }
 264     }
 265   }
 266 }
 267 
 268 
 269 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
 270   const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
 271   CallStaticJavaNode* ideal_node =
 272     new (this) CallStaticJavaNode( tf,
 273          OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
 274                        "call zap dead locals stub", 0, TypePtr::BOTTOM);
 275   // We need to copy the OopMap from the site we're zapping at.
 276   // We have to make a copy, because the zap site might not be
 277   // a call site, and zap_dead is a call site.
 278   OopMap* clone = node_to_check->oop_map()->deep_copy();
 279 
 280   // Add the cloned OopMap to the zap node
 281   ideal_node->set_oop_map(clone);
 282   return _matcher->match_sfpt(ideal_node);
 283 }
 284 
 285 bool Compile::is_node_getting_a_safepoint( Node* n) {
 286   // This code duplicates the logic prior to the call of add_safepoint
 287   // below in this file.
 288   if( n->is_MachSafePoint() ) return true;
 289   return false;
 290 }
 291 
 292 # endif // ENABLE_ZAP_DEAD_LOCALS
 293 
 294 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
 295 // of a loop. When aligning a loop we need to provide enough instructions
 296 // in cpu's fetch buffer to feed decoders. The loop alignment could be
 297 // avoided if we have enough instructions in fetch buffer at the head of a loop.
 298 // By default, the size is set to 999999 by Block's constructor so that
 299 // a loop will be aligned if the size is not reset here.
 300 //
 301 // Note: Mach instructions could contain several HW instructions
 302 // so the size is estimated only.
 303 //
 304 void Compile::compute_loop_first_inst_sizes() {
 305   // The next condition is used to gate the loop alignment optimization.
 306   // Don't aligned a loop if there are enough instructions at the head of a loop
 307   // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
 308   // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
 309   // equal to 11 bytes which is the largest address NOP instruction.
 310   if (MaxLoopPad < OptoLoopAlignment - 1) {
 311     uint last_block = _cfg->number_of_blocks() - 1;
 312     for (uint i = 1; i <= last_block; i++) {
 313       Block* block = _cfg->get_block(i);
 314       // Check the first loop's block which requires an alignment.
 315       if (block->loop_alignment() > (uint)relocInfo::addr_unit()) {
 316         uint sum_size = 0;
 317         uint inst_cnt = NumberOfLoopInstrToAlign;
 318         inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
 319 
 320         // Check subsequent fallthrough blocks if the loop's first
 321         // block(s) does not have enough instructions.
 322         Block *nb = block;
 323         while(inst_cnt > 0 &&
 324               i < last_block &&
 325               !_cfg->get_block(i + 1)->has_loop_alignment() &&
 326               !nb->has_successor(block)) {
 327           i++;
 328           nb = _cfg->get_block(i);
 329           inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
 330         } // while( inst_cnt > 0 && i < last_block  )
 331 
 332         block->set_first_inst_size(sum_size);
 333       } // f( b->head()->is_Loop() )
 334     } // for( i <= last_block )
 335   } // if( MaxLoopPad < OptoLoopAlignment-1 )
 336 }
 337 
 338 // The architecture description provides short branch variants for some long
 339 // branch instructions. Replace eligible long branches with short branches.
 340 void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
 341   // Compute size of each block, method size, and relocation information size
 342   uint nblocks  = _cfg->number_of_blocks();
 343 
 344   uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
 345   uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
 346   int*       jmp_nidx   = NEW_RESOURCE_ARRAY(int ,nblocks);
 347   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
 348   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
 349 
 350   bool has_short_branch_candidate = false;
 351 
 352   // Initialize the sizes to 0
 353   code_size  = 0;          // Size in bytes of generated code
 354   stub_size  = 0;          // Size in bytes of all stub entries
 355   // Size in bytes of all relocation entries, including those in local stubs.
 356   // Start with 2-bytes of reloc info for the unvalidated entry point
 357   reloc_size = 1;          // Number of relocation entries
 358 
 359   // Make three passes.  The first computes pessimistic blk_starts,
 360   // relative jmp_offset and reloc_size information.  The second performs
 361   // short branch substitution using the pessimistic sizing.  The
 362   // third inserts nops where needed.
 363 
 364   // Step one, perform a pessimistic sizing pass.
 365   uint last_call_adr = max_uint;
 366   uint last_avoid_back_to_back_adr = max_uint;
 367   uint nop_size = (new (this) MachNopNode())->size(_regalloc);
 368   for (uint i = 0; i < nblocks; i++) { // For all blocks
 369     Block* block = _cfg->get_block(i);
 370 
 371     // During short branch replacement, we store the relative (to blk_starts)
 372     // offset of jump in jmp_offset, rather than the absolute offset of jump.
 373     // This is so that we do not need to recompute sizes of all nodes when
 374     // we compute correct blk_starts in our next sizing pass.
 375     jmp_offset[i] = 0;
 376     jmp_size[i]   = 0;
 377     jmp_nidx[i]   = -1;
 378     DEBUG_ONLY( jmp_target[i] = 0; )
 379     DEBUG_ONLY( jmp_rule[i]   = 0; )
 380 
 381     // Sum all instruction sizes to compute block size
 382     uint last_inst = block->number_of_nodes();
 383     uint blk_size = 0;
 384     for (uint j = 0; j < last_inst; j++) {
 385       Node* nj = block->get_node(j);
 386       // Handle machine instruction nodes
 387       if (nj->is_Mach()) {
 388         MachNode *mach = nj->as_Mach();
 389         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 390         reloc_size += mach->reloc();
 391         if (mach->is_MachCall()) {
 392           MachCallNode *mcall = mach->as_MachCall();
 393           // This destination address is NOT PC-relative
 394 
 395           mcall->method_set((intptr_t)mcall->entry_point());
 396 
 397           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 398             stub_size  += CompiledStaticCall::to_interp_stub_size();
 399             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 400           }
 401         } else if (mach->is_MachSafePoint()) {
 402           // If call/safepoint are adjacent, account for possible
 403           // nop to disambiguate the two safepoints.
 404           // ScheduleAndBundle() can rearrange nodes in a block,
 405           // check for all offsets inside this block.
 406           if (last_call_adr >= blk_starts[i]) {
 407             blk_size += nop_size;
 408           }
 409         }
 410         if (mach->avoid_back_to_back()) {
 411           // Nop is inserted between "avoid back to back" instructions.
 412           // ScheduleAndBundle() can rearrange nodes in a block,
 413           // check for all offsets inside this block.
 414           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 415             blk_size += nop_size;
 416           }
 417         }
 418         if (mach->may_be_short_branch()) {
 419           if (!nj->is_MachBranch()) {
 420 #ifndef PRODUCT
 421             nj->dump(3);
 422 #endif
 423             Unimplemented();
 424           }
 425           assert(jmp_nidx[i] == -1, "block should have only one branch");
 426           jmp_offset[i] = blk_size;
 427           jmp_size[i]   = nj->size(_regalloc);
 428           jmp_nidx[i]   = j;
 429           has_short_branch_candidate = true;
 430         }
 431       }
 432       blk_size += nj->size(_regalloc);
 433       // Remember end of call offset
 434       if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
 435         last_call_adr = blk_starts[i]+blk_size;
 436       }
 437       // Remember end of avoid_back_to_back offset
 438       if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
 439         last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
 440       }
 441     }
 442 
 443     // When the next block starts a loop, we may insert pad NOP
 444     // instructions.  Since we cannot know our future alignment,
 445     // assume the worst.
 446     if (i < nblocks - 1) {
 447       Block* nb = _cfg->get_block(i + 1);
 448       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
 449       if (max_loop_pad > 0) {
 450         assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
 451         // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
 452         // If either is the last instruction in this block, bump by
 453         // max_loop_pad in lock-step with blk_size, so sizing
 454         // calculations in subsequent blocks still can conservatively
 455         // detect that it may the last instruction in this block.
 456         if (last_call_adr == blk_starts[i]+blk_size) {
 457           last_call_adr += max_loop_pad;
 458         }
 459         if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
 460           last_avoid_back_to_back_adr += max_loop_pad;
 461         }
 462         blk_size += max_loop_pad;
 463       }
 464     }
 465 
 466     // Save block size; update total method size
 467     blk_starts[i+1] = blk_starts[i]+blk_size;
 468   }
 469 
 470   // Step two, replace eligible long jumps.
 471   bool progress = true;
 472   uint last_may_be_short_branch_adr = max_uint;
 473   while (has_short_branch_candidate && progress) {
 474     progress = false;
 475     has_short_branch_candidate = false;
 476     int adjust_block_start = 0;
 477     for (uint i = 0; i < nblocks; i++) {
 478       Block* block = _cfg->get_block(i);
 479       int idx = jmp_nidx[i];
 480       MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
 481       if (mach != NULL && mach->may_be_short_branch()) {
 482 #ifdef ASSERT
 483         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
 484         int j;
 485         // Find the branch; ignore trailing NOPs.
 486         for (j = block->number_of_nodes()-1; j>=0; j--) {
 487           Node* n = block->get_node(j);
 488           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
 489             break;
 490         }
 491         assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 492 #endif
 493         int br_size = jmp_size[i];
 494         int br_offs = blk_starts[i] + jmp_offset[i];
 495 
 496         // This requires the TRUE branch target be in succs[0]
 497         uint bnum = block->non_connector_successor(0)->_pre_order;
 498         int offset = blk_starts[bnum] - br_offs;
 499         if (bnum > i) { // adjust following block's offset
 500           offset -= adjust_block_start;
 501         }
 502         // In the following code a nop could be inserted before
 503         // the branch which will increase the backward distance.
 504         bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
 505         if (needs_padding && offset <= 0)
 506           offset -= nop_size;
 507 
 508         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
 509           // We've got a winner.  Replace this branch.
 510           MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
 511 
 512           // Update the jmp_size.
 513           int new_size = replacement->size(_regalloc);
 514           int diff     = br_size - new_size;
 515           assert(diff >= (int)nop_size, "short_branch size should be smaller");
 516           // Conservatively take into accound padding between
 517           // avoid_back_to_back branches. Previous branch could be
 518           // converted into avoid_back_to_back branch during next
 519           // rounds.
 520           if (needs_padding && replacement->avoid_back_to_back()) {
 521             jmp_offset[i] += nop_size;
 522             diff -= nop_size;
 523           }
 524           adjust_block_start += diff;
 525           block->map_node(replacement, idx);
 526           mach->subsume_by(replacement, C);
 527           mach = replacement;
 528           progress = true;
 529 
 530           jmp_size[i] = new_size;
 531           DEBUG_ONLY( jmp_target[i] = bnum; );
 532           DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
 533         } else {
 534           // The jump distance is not short, try again during next iteration.
 535           has_short_branch_candidate = true;
 536         }
 537       } // (mach->may_be_short_branch())
 538       if (mach != NULL && (mach->may_be_short_branch() ||
 539                            mach->avoid_back_to_back())) {
 540         last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
 541       }
 542       blk_starts[i+1] -= adjust_block_start;
 543     }
 544   }
 545 
 546 #ifdef ASSERT
 547   for (uint i = 0; i < nblocks; i++) { // For all blocks
 548     if (jmp_target[i] != 0) {
 549       int br_size = jmp_size[i];
 550       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
 551       if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
 552         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
 553       }
 554       assert(_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
 555     }
 556   }
 557 #endif
 558 
 559   // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
 560   // after ScheduleAndBundle().
 561 
 562   // ------------------
 563   // Compute size for code buffer
 564   code_size = blk_starts[nblocks];
 565 
 566   // Relocation records
 567   reloc_size += 1;              // Relo entry for exception handler
 568 
 569   // Adjust reloc_size to number of record of relocation info
 570   // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
 571   // a relocation index.
 572   // The CodeBuffer will expand the locs array if this estimate is too low.
 573   reloc_size *= 10 / sizeof(relocInfo);
 574 }
 575 
 576 //------------------------------FillLocArray-----------------------------------
 577 // Create a bit of debug info and append it to the array.  The mapping is from
 578 // Java local or expression stack to constant, register or stack-slot.  For
 579 // doubles, insert 2 mappings and return 1 (to tell the caller that the next
 580 // entry has been taken care of and caller should skip it).
 581 static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
 582   // This should never have accepted Bad before
 583   assert(OptoReg::is_valid(regnum), "location must be valid");
 584   return (OptoReg::is_reg(regnum))
 585     ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
 586     : new LocationValue(Location::new_stk_loc(l_type,  ra->reg2offset(regnum)));
 587 }
 588 
 589 
 590 ObjectValue*
 591 Compile::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) {
 592   for (int i = 0; i < objs->length(); i++) {
 593     assert(objs->at(i)->is_object(), "corrupt object cache");
 594     ObjectValue* sv = (ObjectValue*) objs->at(i);
 595     if (sv->id() == id) {
 596       return sv;
 597     }
 598   }
 599   // Otherwise..
 600   return NULL;
 601 }
 602 
 603 void Compile::set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
 604                                      ObjectValue* sv ) {
 605   assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition");
 606   objs->append(sv);
 607 }
 608 
 609 
 610 void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
 611                             GrowableArray<ScopeValue*> *array,
 612                             GrowableArray<ScopeValue*> *objs ) {
 613   assert( local, "use _top instead of null" );
 614   if (array->length() != idx) {
 615     assert(array->length() == idx + 1, "Unexpected array count");
 616     // Old functionality:
 617     //   return
 618     // New functionality:
 619     //   Assert if the local is not top. In product mode let the new node
 620     //   override the old entry.
 621     assert(local == top(), "LocArray collision");
 622     if (local == top()) {
 623       return;
 624     }
 625     array->pop();
 626   }
 627   const Type *t = local->bottom_type();
 628 
 629   // Is it a safepoint scalar object node?
 630   if (local->is_SafePointScalarObject()) {
 631     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 632 
 633     ObjectValue* sv = Compile::sv_for_node_id(objs, spobj->_idx);
 634     if (sv == NULL) {
 635       ciKlass* cik = t->is_oopptr()->klass();
 636       assert(cik->is_instance_klass() ||
 637              cik->is_array_klass(), "Not supported allocation.");
 638       sv = new ObjectValue(spobj->_idx,
 639                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 640       Compile::set_sv_for_object_node(objs, sv);
 641 
 642       uint first_ind = spobj->first_index(sfpt->jvms());
 643       for (uint i = 0; i < spobj->n_fields(); i++) {
 644         Node* fld_node = sfpt->in(first_ind+i);
 645         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 646       }
 647     }
 648     array->append(sv);
 649     return;
 650   }
 651 
 652   // Grab the register number for the local
 653   OptoReg::Name regnum = _regalloc->get_reg_first(local);
 654   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 655     // Record the double as two float registers.
 656     // The register mask for such a value always specifies two adjacent
 657     // float registers, with the lower register number even.
 658     // Normally, the allocation of high and low words to these registers
 659     // is irrelevant, because nearly all operations on register pairs
 660     // (e.g., StoreD) treat them as a single unit.
 661     // Here, we assume in addition that the words in these two registers
 662     // stored "naturally" (by operations like StoreD and double stores
 663     // within the interpreter) such that the lower-numbered register
 664     // is written to the lower memory address.  This may seem like
 665     // a machine dependency, but it is not--it is a requirement on
 666     // the author of the <arch>.ad file to ensure that, for every
 667     // even/odd double-register pair to which a double may be allocated,
 668     // the word in the even single-register is stored to the first
 669     // memory word.  (Note that register numbers are completely
 670     // arbitrary, and are not tied to any machine-level encodings.)
 671 #ifdef _LP64
 672     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
 673       array->append(new ConstantIntValue(0));
 674       array->append(new_loc_value( _regalloc, regnum, Location::dbl ));
 675     } else if ( t->base() == Type::Long ) {
 676       array->append(new ConstantIntValue(0));
 677       array->append(new_loc_value( _regalloc, regnum, Location::lng ));
 678     } else if ( t->base() == Type::RawPtr ) {
 679       // jsr/ret return address which must be restored into a the full
 680       // width 64-bit stack slot.
 681       array->append(new_loc_value( _regalloc, regnum, Location::lng ));
 682     }
 683 #else //_LP64
 684 #ifdef SPARC
 685     if (t->base() == Type::Long && OptoReg::is_reg(regnum)) {
 686       // For SPARC we have to swap high and low words for
 687       // long values stored in a single-register (g0-g7).
 688       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
 689       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
 690     } else
 691 #endif //SPARC
 692     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
 693       // Repack the double/long as two jints.
 694       // The convention the interpreter uses is that the second local
 695       // holds the first raw word of the native double representation.
 696       // This is actually reasonable, since locals and stack arrays
 697       // grow downwards in all implementations.
 698       // (If, on some machine, the interpreter's Java locals or stack
 699       // were to grow upwards, the embedded doubles would be word-swapped.)
 700       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
 701       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
 702     }
 703 #endif //_LP64
 704     else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
 705                OptoReg::is_reg(regnum) ) {
 706       array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double()
 707                                    ? Location::float_in_dbl : Location::normal ));
 708     } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
 709       array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
 710                                    ? Location::int_in_long : Location::normal ));
 711     } else if( t->base() == Type::NarrowOop ) {
 712       array->append(new_loc_value( _regalloc, regnum, Location::narrowoop ));
 713     } else {
 714       array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal ));
 715     }
 716     return;
 717   }
 718 
 719   // No register.  It must be constant data.
 720   switch (t->base()) {
 721   case Type::Half:              // Second half of a double
 722     ShouldNotReachHere();       // Caller should skip 2nd halves
 723     break;
 724   case Type::AnyPtr:
 725     array->append(new ConstantOopWriteValue(NULL));
 726     break;
 727   case Type::AryPtr:
 728   case Type::InstPtr:          // fall through
 729     array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
 730     break;
 731   case Type::NarrowOop:
 732     if (t == TypeNarrowOop::NULL_PTR) {
 733       array->append(new ConstantOopWriteValue(NULL));
 734     } else {
 735       array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
 736     }
 737     break;
 738   case Type::Int:
 739     array->append(new ConstantIntValue(t->is_int()->get_con()));
 740     break;
 741   case Type::RawPtr:
 742     // A return address (T_ADDRESS).
 743     assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
 744 #ifdef _LP64
 745     // Must be restored to the full-width 64-bit stack slot.
 746     array->append(new ConstantLongValue(t->is_ptr()->get_con()));
 747 #else
 748     array->append(new ConstantIntValue(t->is_ptr()->get_con()));
 749 #endif
 750     break;
 751   case Type::FloatCon: {
 752     float f = t->is_float_constant()->getf();
 753     array->append(new ConstantIntValue(jint_cast(f)));
 754     break;
 755   }
 756   case Type::DoubleCon: {
 757     jdouble d = t->is_double_constant()->getd();
 758 #ifdef _LP64
 759     array->append(new ConstantIntValue(0));
 760     array->append(new ConstantDoubleValue(d));
 761 #else
 762     // Repack the double as two jints.
 763     // The convention the interpreter uses is that the second local
 764     // holds the first raw word of the native double representation.
 765     // This is actually reasonable, since locals and stack arrays
 766     // grow downwards in all implementations.
 767     // (If, on some machine, the interpreter's Java locals or stack
 768     // were to grow upwards, the embedded doubles would be word-swapped.)
 769     jint   *dp = (jint*)&d;
 770     array->append(new ConstantIntValue(dp[1]));
 771     array->append(new ConstantIntValue(dp[0]));
 772 #endif
 773     break;
 774   }
 775   case Type::Long: {
 776     jlong d = t->is_long()->get_con();
 777 #ifdef _LP64
 778     array->append(new ConstantIntValue(0));
 779     array->append(new ConstantLongValue(d));
 780 #else
 781     // Repack the long as two jints.
 782     // The convention the interpreter uses is that the second local
 783     // holds the first raw word of the native double representation.
 784     // This is actually reasonable, since locals and stack arrays
 785     // grow downwards in all implementations.
 786     // (If, on some machine, the interpreter's Java locals or stack
 787     // were to grow upwards, the embedded doubles would be word-swapped.)
 788     jint *dp = (jint*)&d;
 789     array->append(new ConstantIntValue(dp[1]));
 790     array->append(new ConstantIntValue(dp[0]));
 791 #endif
 792     break;
 793   }
 794   case Type::Top:               // Add an illegal value here
 795     array->append(new LocationValue(Location()));
 796     break;
 797   default:
 798     ShouldNotReachHere();
 799     break;
 800   }
 801 }
 802 
 803 // Determine if this node starts a bundle
 804 bool Compile::starts_bundle(const Node *n) const {
 805   return (_node_bundling_limit > n->_idx &&
 806           _node_bundling_base[n->_idx].starts_bundle());
 807 }
 808 
 809 //--------------------------Process_OopMap_Node--------------------------------
 810 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 811 
 812   // Handle special safepoint nodes for synchronization
 813   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 814   MachCallNode      *mcall;
 815 
 816 #ifdef ENABLE_ZAP_DEAD_LOCALS
 817   assert( is_node_getting_a_safepoint(mach),  "logic does not match; false negative");
 818 #endif
 819 
 820   int safepoint_pc_offset = current_offset;
 821   bool is_method_handle_invoke = false;
 822   bool return_oop = false;
 823 
 824   // Add the safepoint in the DebugInfoRecorder
 825   if( !mach->is_MachCall() ) {
 826     mcall = NULL;
 827     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 828   } else {
 829     mcall = mach->as_MachCall();
 830 
 831     // Is the call a MethodHandle call?
 832     if (mcall->is_MachCallJava()) {
 833       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 834         assert(has_method_handle_invokes(), "must have been set during call generation");
 835         is_method_handle_invoke = true;
 836       }
 837     }
 838 
 839     // Check if a call returns an object.
 840     if (mcall->return_value_is_used() &&
 841         mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) {
 842       return_oop = true;
 843     }
 844     safepoint_pc_offset += mcall->ret_addr_offset();
 845     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
 846   }
 847 
 848   // Loop over the JVMState list to add scope information
 849   // Do not skip safepoints with a NULL method, they need monitor info
 850   JVMState* youngest_jvms = sfn->jvms();
 851   int max_depth = youngest_jvms->depth();
 852 
 853   // Allocate the object pool for scalar-replaced objects -- the map from
 854   // small-integer keys (which can be recorded in the local and ostack
 855   // arrays) to descriptions of the object state.
 856   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
 857 
 858   // Visit scopes from oldest to youngest.
 859   for (int depth = 1; depth <= max_depth; depth++) {
 860     JVMState* jvms = youngest_jvms->of_depth(depth);
 861     int idx;
 862     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
 863     // Safepoints that do not have method() set only provide oop-map and monitor info
 864     // to support GC; these do not support deoptimization.
 865     int num_locs = (method == NULL) ? 0 : jvms->loc_size();
 866     int num_exps = (method == NULL) ? 0 : jvms->stk_size();
 867     int num_mon  = jvms->nof_monitors();
 868     assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(),
 869            "JVMS local count must match that of the method");
 870 
 871     // Add Local and Expression Stack Information
 872 
 873     // Insert locals into the locarray
 874     GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs);
 875     for( idx = 0; idx < num_locs; idx++ ) {
 876       FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs );
 877     }
 878 
 879     // Insert expression stack entries into the exparray
 880     GrowableArray<ScopeValue*> *exparray = new GrowableArray<ScopeValue*>(num_exps);
 881     for( idx = 0; idx < num_exps; idx++ ) {
 882       FillLocArray( idx,  sfn, sfn->stack(jvms, idx), exparray, objs );
 883     }
 884 
 885     // Add in mappings of the monitors
 886     assert( !method ||
 887             !method->is_synchronized() ||
 888             method->is_native() ||
 889             num_mon > 0 ||
 890             !GenerateSynchronizationCode,
 891             "monitors must always exist for synchronized methods");
 892 
 893     // Build the growable array of ScopeValues for exp stack
 894     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
 895 
 896     // Loop over monitors and insert into array
 897     for (idx = 0; idx < num_mon; idx++) {
 898       // Grab the node that defines this monitor
 899       Node* box_node = sfn->monitor_box(jvms, idx);
 900       Node* obj_node = sfn->monitor_obj(jvms, idx);
 901 
 902       // Create ScopeValue for object
 903       ScopeValue *scval = NULL;
 904 
 905       if (obj_node->is_SafePointScalarObject()) {
 906         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
 907         scval = Compile::sv_for_node_id(objs, spobj->_idx);
 908         if (scval == NULL) {
 909           const Type *t = spobj->bottom_type();
 910           ciKlass* cik = t->is_oopptr()->klass();
 911           assert(cik->is_instance_klass() ||
 912                  cik->is_array_klass(), "Not supported allocation.");
 913           ObjectValue* sv = new ObjectValue(spobj->_idx,
 914                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 915           Compile::set_sv_for_object_node(objs, sv);
 916 
 917           uint first_ind = spobj->first_index(youngest_jvms);
 918           for (uint i = 0; i < spobj->n_fields(); i++) {
 919             Node* fld_node = sfn->in(first_ind+i);
 920             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
 921           }
 922           scval = sv;
 923         }
 924       } else if (!obj_node->is_Con()) {
 925         OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
 926         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
 927           scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
 928         } else {
 929           scval = new_loc_value( _regalloc, obj_reg, Location::oop );
 930         }
 931       } else {
 932         const TypePtr *tp = obj_node->get_ptr_type();
 933         scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
 934       }
 935 
 936       OptoReg::Name box_reg = BoxLockNode::reg(box_node);
 937       Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg));
 938       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
 939       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
 940     }
 941 
 942     // We dump the object pool first, since deoptimization reads it in first.
 943     debug_info()->dump_object_pool(objs);
 944 
 945     // Build first class objects to pass to scope
 946     DebugToken *locvals = debug_info()->create_scope_values(locarray);
 947     DebugToken *expvals = debug_info()->create_scope_values(exparray);
 948     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
 949 
 950     // Make method available for all Safepoints
 951     ciMethod* scope_method = method ? method : _method;
 952     // Describe the scope here
 953     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
 954     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
 955     // Now we can describe the scope.
 956     debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, return_oop, locvals, expvals, monvals);
 957   } // End jvms loop
 958 
 959   // Mark the end of the scope set.
 960   debug_info()->end_safepoint(safepoint_pc_offset);
 961 }
 962 
 963 
 964 
 965 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
 966 class NonSafepointEmitter {
 967   Compile*  C;
 968   JVMState* _pending_jvms;
 969   int       _pending_offset;
 970 
 971   void emit_non_safepoint();
 972 
 973  public:
 974   NonSafepointEmitter(Compile* compile) {
 975     this->C = compile;
 976     _pending_jvms = NULL;
 977     _pending_offset = 0;
 978   }
 979 
 980   void observe_instruction(Node* n, int pc_offset) {
 981     if (!C->debug_info()->recording_non_safepoints())  return;
 982 
 983     Node_Notes* nn = C->node_notes_at(n->_idx);
 984     if (nn == NULL || nn->jvms() == NULL)  return;
 985     if (_pending_jvms != NULL &&
 986         _pending_jvms->same_calls_as(nn->jvms())) {
 987       // Repeated JVMS?  Stretch it up here.
 988       _pending_offset = pc_offset;
 989     } else {
 990       if (_pending_jvms != NULL &&
 991           _pending_offset < pc_offset) {
 992         emit_non_safepoint();
 993       }
 994       _pending_jvms = NULL;
 995       if (pc_offset > C->debug_info()->last_pc_offset()) {
 996         // This is the only way _pending_jvms can become non-NULL:
 997         _pending_jvms = nn->jvms();
 998         _pending_offset = pc_offset;
 999       }
1000     }
1001   }
1002 
1003   // Stay out of the way of real safepoints:
1004   void observe_safepoint(JVMState* jvms, int pc_offset) {
1005     if (_pending_jvms != NULL &&
1006         !_pending_jvms->same_calls_as(jvms) &&
1007         _pending_offset < pc_offset) {
1008       emit_non_safepoint();
1009     }
1010     _pending_jvms = NULL;
1011   }
1012 
1013   void flush_at_end() {
1014     if (_pending_jvms != NULL) {
1015       emit_non_safepoint();
1016     }
1017     _pending_jvms = NULL;
1018   }
1019 };
1020 
1021 void NonSafepointEmitter::emit_non_safepoint() {
1022   JVMState* youngest_jvms = _pending_jvms;
1023   int       pc_offset     = _pending_offset;
1024 
1025   // Clear it now:
1026   _pending_jvms = NULL;
1027 
1028   DebugInformationRecorder* debug_info = C->debug_info();
1029   assert(debug_info->recording_non_safepoints(), "sanity");
1030 
1031   debug_info->add_non_safepoint(pc_offset);
1032   int max_depth = youngest_jvms->depth();
1033 
1034   // Visit scopes from oldest to youngest.
1035   for (int depth = 1; depth <= max_depth; depth++) {
1036     JVMState* jvms = youngest_jvms->of_depth(depth);
1037     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1038     assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
1039     debug_info->describe_scope(pc_offset, method, jvms->bci(), jvms->should_reexecute());
1040   }
1041 
1042   // Mark the end of the scope set.
1043   debug_info->end_non_safepoint(pc_offset);
1044 }
1045 
1046 //------------------------------init_buffer------------------------------------
1047 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
1048 
1049   // Set the initially allocated size
1050   int  code_req   = initial_code_capacity;
1051   int  locs_req   = initial_locs_capacity;
1052   int  stub_req   = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
1053   int  const_req  = initial_const_capacity;
1054 
1055   int  pad_req    = NativeCall::instruction_size;
1056   // The extra spacing after the code is necessary on some platforms.
1057   // Sometimes we need to patch in a jump after the last instruction,
1058   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
1059 
1060   // Compute the byte offset where we can store the deopt pc.
1061   if (fixed_slots() != 0) {
1062     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
1063   }
1064 
1065   // Compute prolog code size
1066   _method_size = 0;
1067   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
1068 #if defined(IA64) && !defined(AIX)
1069   if (save_argument_registers()) {
1070     // 4815101: this is a stub with implicit and unknown precision fp args.
1071     // The usual spill mechanism can only generate stfd's in this case, which
1072     // doesn't work if the fp reg to spill contains a single-precision denorm.
1073     // Instead, we hack around the normal spill mechanism using stfspill's and
1074     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
1075     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1076     //
1077     // If we ever implement 16-byte 'registers' == stack slots, we can
1078     // get rid of this hack and have SpillCopy generate stfspill/ldffill
1079     // instead of stfd/stfs/ldfd/ldfs.
1080     _frame_slots += 8*(16/BytesPerInt);
1081   }
1082 #endif
1083   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
1084 
1085   if (has_mach_constant_base_node()) {
1086     uint add_size = 0;
1087     // Fill the constant table.
1088     // Note:  This must happen before shorten_branches.
1089     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1090       Block* b = _cfg->get_block(i);
1091 
1092       for (uint j = 0; j < b->number_of_nodes(); j++) {
1093         Node* n = b->get_node(j);
1094 
1095         // If the node is a MachConstantNode evaluate the constant
1096         // value section.
1097         if (n->is_MachConstant()) {
1098           MachConstantNode* machcon = n->as_MachConstant();
1099           machcon->eval_constant(C);
1100         } else if (n->is_Mach()) {
1101           // On Power there are more nodes that issue constants.
1102           add_size += (n->as_Mach()->ins_num_consts() * 8);
1103         }
1104       }
1105     }
1106 
1107     // Calculate the offsets of the constants and the size of the
1108     // constant table (including the padding to the next section).
1109     constant_table().calculate_offsets_and_size();
1110     const_req = constant_table().size() + add_size;
1111   }
1112 
1113   // Initialize the space for the BufferBlob used to find and verify
1114   // instruction size in MachNode::emit_size()
1115   init_scratch_buffer_blob(const_req);
1116   if (failing())  return NULL; // Out of memory
1117 
1118   // Pre-compute the length of blocks and replace
1119   // long branches with short if machine supports it.
1120   shorten_branches(blk_starts, code_req, locs_req, stub_req);
1121 
1122   // nmethod and CodeBuffer count stubs & constants as part of method's code.
1123   int exception_handler_req = size_exception_handler();
1124   int deopt_handler_req = size_deopt_handler();
1125   exception_handler_req += MAX_stubs_size; // add marginal slop for handler
1126   deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
1127   stub_req += MAX_stubs_size;   // ensure per-stub margin
1128   code_req += MAX_inst_size;    // ensure per-instruction margin
1129 
1130   if (StressCodeBuffers)
1131     code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10;  // force expansion
1132 
1133   int total_req =
1134     const_req +
1135     code_req +
1136     pad_req +
1137     stub_req +
1138     exception_handler_req +
1139     deopt_handler_req;               // deopt handler
1140 
1141   if (has_method_handle_invokes())
1142     total_req += deopt_handler_req;  // deopt MH handler
1143 
1144   CodeBuffer* cb = code_buffer();
1145   cb->initialize(total_req, locs_req);
1146 
1147   // Have we run out of code space?
1148   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1149     C->record_failure("CodeCache is full");
1150     return NULL;
1151   }
1152   // Configure the code buffer.
1153   cb->initialize_consts_size(const_req);
1154   cb->initialize_stubs_size(stub_req);
1155   cb->initialize_oop_recorder(env()->oop_recorder());
1156 
1157   // fill in the nop array for bundling computations
1158   MachNode *_nop_list[Bundle::_nop_count];
1159   Bundle::initialize_nops(_nop_list, this);
1160 
1161   return cb;
1162 }
1163 
1164 //------------------------------fill_buffer------------------------------------
1165 void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
1166   // blk_starts[] contains offsets calculated during short branches processing,
1167   // offsets should not be increased during following steps.
1168 
1169   // Compute the size of first NumberOfLoopInstrToAlign instructions at head
1170   // of a loop. It is used to determine the padding for loop alignment.
1171   compute_loop_first_inst_sizes();
1172 
1173   // Create oopmap set.
1174   _oop_map_set = new OopMapSet();
1175 
1176   // !!!!! This preserves old handling of oopmaps for now
1177   debug_info()->set_oopmaps(_oop_map_set);
1178 
1179   uint nblocks  = _cfg->number_of_blocks();
1180   // Count and start of implicit null check instructions
1181   uint inct_cnt = 0;
1182   uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1183 
1184   // Count and start of calls
1185   uint *call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1186 
1187   uint  return_offset = 0;
1188   int nop_size = (new (this) MachNopNode())->size(_regalloc);
1189 
1190   int previous_offset = 0;
1191   int current_offset  = 0;
1192   int last_call_offset = -1;
1193   int last_avoid_back_to_back_offset = -1;
1194 #ifdef ASSERT
1195   uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
1196   uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
1197   uint* jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
1198   uint* jmp_rule   = NEW_RESOURCE_ARRAY(uint,nblocks);
1199 #endif
1200 
1201   // Create an array of unused labels, one for each basic block, if printing is enabled
1202 #ifndef PRODUCT
1203   int *node_offsets      = NULL;
1204   uint node_offset_limit = unique();
1205 
1206   if (print_assembly())
1207     node_offsets         = NEW_RESOURCE_ARRAY(int, node_offset_limit);
1208 #endif
1209 
1210   NonSafepointEmitter non_safepoints(this);  // emit non-safepoints lazily
1211 
1212   // Emit the constant table.
1213   if (has_mach_constant_base_node()) {
1214     constant_table().emit(*cb);
1215   }
1216 
1217   // Create an array of labels, one for each basic block
1218   Label *blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
1219   for (uint i=0; i <= nblocks; i++) {
1220     blk_labels[i].init();
1221   }
1222 
1223   // ------------------
1224   // Now fill in the code buffer
1225   Node *delay_slot = NULL;
1226 
1227   for (uint i = 0; i < nblocks; i++) {
1228     Block* block = _cfg->get_block(i);
1229     Node* head = block->head();
1230 
1231     // If this block needs to start aligned (i.e, can be reached other
1232     // than by falling-thru from the previous block), then force the
1233     // start of a new bundle.
1234     if (Pipeline::requires_bundling() && starts_bundle(head)) {
1235       cb->flush_bundle(true);
1236     }
1237 
1238 #ifdef ASSERT
1239     if (!block->is_connector()) {
1240       stringStream st;
1241       block->dump_head(_cfg, &st);
1242       MacroAssembler(cb).block_comment(st.as_string());
1243     }
1244     jmp_target[i] = 0;
1245     jmp_offset[i] = 0;
1246     jmp_size[i]   = 0;
1247     jmp_rule[i]   = 0;
1248 #endif
1249     int blk_offset = current_offset;
1250 
1251     // Define the label at the beginning of the basic block
1252     MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
1253 
1254     uint last_inst = block->number_of_nodes();
1255 
1256     // Emit block normally, except for last instruction.
1257     // Emit means "dump code bits into code buffer".
1258     for (uint j = 0; j<last_inst; j++) {
1259 
1260       // Get the node
1261       Node* n = block->get_node(j);
1262 
1263       // See if delay slots are supported
1264       if (valid_bundle_info(n) &&
1265           node_bundling(n)->used_in_unconditional_delay()) {
1266         assert(delay_slot == NULL, "no use of delay slot node");
1267         assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
1268 
1269         delay_slot = n;
1270         continue;
1271       }
1272 
1273       // If this starts a new instruction group, then flush the current one
1274       // (but allow split bundles)
1275       if (Pipeline::requires_bundling() && starts_bundle(n))
1276         cb->flush_bundle(false);
1277 
1278       // The following logic is duplicated in the code ifdeffed for
1279       // ENABLE_ZAP_DEAD_LOCALS which appears above in this file.  It
1280       // should be factored out.  Or maybe dispersed to the nodes?
1281 
1282       // Special handling for SafePoint/Call Nodes
1283       bool is_mcall = false;
1284       if (n->is_Mach()) {
1285         MachNode *mach = n->as_Mach();
1286         is_mcall = n->is_MachCall();
1287         bool is_sfn = n->is_MachSafePoint();
1288 
1289         // If this requires all previous instructions be flushed, then do so
1290         if (is_sfn || is_mcall || mach->alignment_required() != 1) {
1291           cb->flush_bundle(true);
1292           current_offset = cb->insts_size();
1293         }
1294 
1295         // A padding may be needed again since a previous instruction
1296         // could be moved to delay slot.
1297 
1298         // align the instruction if necessary
1299         int padding = mach->compute_padding(current_offset);
1300         // Make sure safepoint node for polling is distinct from a call's
1301         // return by adding a nop if needed.
1302         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1303           padding = nop_size;
1304         }
1305         if (padding == 0 && mach->avoid_back_to_back() &&
1306             current_offset == last_avoid_back_to_back_offset) {
1307           // Avoid back to back some instructions.
1308           padding = nop_size;
1309         }
1310 
1311         if(padding > 0) {
1312           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1313           int nops_cnt = padding / nop_size;
1314           MachNode *nop = new (this) MachNopNode(nops_cnt);
1315           block->insert_node(nop, j++);
1316           last_inst++;
1317           _cfg->map_node_to_block(nop, block);
1318           nop->emit(*cb, _regalloc);
1319           cb->flush_bundle(true);
1320           current_offset = cb->insts_size();
1321         }
1322 
1323         // Remember the start of the last call in a basic block
1324         if (is_mcall) {
1325           MachCallNode *mcall = mach->as_MachCall();
1326 
1327           // This destination address is NOT PC-relative
1328           mcall->method_set((intptr_t)mcall->entry_point());
1329 
1330           // Save the return address
1331           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1332 
1333           if (mcall->is_MachCallLeaf()) {
1334             is_mcall = false;
1335             is_sfn = false;
1336           }
1337         }
1338 
1339         // sfn will be valid whenever mcall is valid now because of inheritance
1340         if (is_sfn || is_mcall) {
1341 
1342           // Handle special safepoint nodes for synchronization
1343           if (!is_mcall) {
1344             MachSafePointNode *sfn = mach->as_MachSafePoint();
1345             // !!!!! Stubs only need an oopmap right now, so bail out
1346             if (sfn->jvms()->method() == NULL) {
1347               // Write the oopmap directly to the code blob??!!
1348 #             ifdef ENABLE_ZAP_DEAD_LOCALS
1349               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
1350 #             endif
1351               continue;
1352             }
1353           } // End synchronization
1354 
1355           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1356                                            current_offset);
1357           Process_OopMap_Node(mach, current_offset);
1358         } // End if safepoint
1359 
1360         // If this is a null check, then add the start of the previous instruction to the list
1361         else if( mach->is_MachNullCheck() ) {
1362           inct_starts[inct_cnt++] = previous_offset;
1363         }
1364 
1365         // If this is a branch, then fill in the label with the target BB's label
1366         else if (mach->is_MachBranch()) {
1367           // This requires the TRUE branch target be in succs[0]
1368           uint block_num = block->non_connector_successor(0)->_pre_order;
1369 
1370           // Try to replace long branch if delay slot is not used,
1371           // it is mostly for back branches since forward branch's
1372           // distance is not updated yet.
1373           bool delay_slot_is_used = valid_bundle_info(n) &&
1374                                     node_bundling(n)->use_unconditional_delay();
1375           if (!delay_slot_is_used && mach->may_be_short_branch()) {
1376            assert(delay_slot == NULL, "not expecting delay slot node");
1377            int br_size = n->size(_regalloc);
1378             int offset = blk_starts[block_num] - current_offset;
1379             if (block_num >= i) {
1380               // Current and following block's offset are not
1381               // finalized yet, adjust distance by the difference
1382               // between calculated and final offsets of current block.
1383               offset -= (blk_starts[i] - blk_offset);
1384             }
1385             // In the following code a nop could be inserted before
1386             // the branch which will increase the backward distance.
1387             bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1388             if (needs_padding && offset <= 0)
1389               offset -= nop_size;
1390 
1391             if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1392               // We've got a winner.  Replace this branch.
1393               MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
1394 
1395               // Update the jmp_size.
1396               int new_size = replacement->size(_regalloc);
1397               assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1398               // Insert padding between avoid_back_to_back branches.
1399               if (needs_padding && replacement->avoid_back_to_back()) {
1400                 MachNode *nop = new (this) MachNopNode();
1401                 block->insert_node(nop, j++);
1402                 _cfg->map_node_to_block(nop, block);
1403                 last_inst++;
1404                 nop->emit(*cb, _regalloc);
1405                 cb->flush_bundle(true);
1406                 current_offset = cb->insts_size();
1407               }
1408 #ifdef ASSERT
1409               jmp_target[i] = block_num;
1410               jmp_offset[i] = current_offset - blk_offset;
1411               jmp_size[i]   = new_size;
1412               jmp_rule[i]   = mach->rule();
1413 #endif
1414               block->map_node(replacement, j);
1415               mach->subsume_by(replacement, C);
1416               n    = replacement;
1417               mach = replacement;
1418             }
1419           }
1420           mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
1421         } else if (mach->ideal_Opcode() == Op_Jump) {
1422           for (uint h = 0; h < block->_num_succs; h++) {
1423             Block* succs_block = block->_succs[h];
1424             for (uint j = 1; j < succs_block->num_preds(); j++) {
1425               Node* jpn = succs_block->pred(j);
1426               if (jpn->is_JumpProj() && jpn->in(0) == mach) {
1427                 uint block_num = succs_block->non_connector()->_pre_order;
1428                 Label *blkLabel = &blk_labels[block_num];
1429                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1430               }
1431             }
1432           }
1433         }
1434 #ifdef ASSERT
1435         // Check that oop-store precedes the card-mark
1436         else if (mach->ideal_Opcode() == Op_StoreCM) {
1437           uint storeCM_idx = j;
1438           int count = 0;
1439           for (uint prec = mach->req(); prec < mach->len(); prec++) {
1440             Node *oop_store = mach->in(prec);  // Precedence edge
1441             if (oop_store == NULL) continue;
1442             count++;
1443             uint i4;
1444             for (i4 = 0; i4 < last_inst; ++i4) {
1445               if (block->get_node(i4) == oop_store) {
1446                 break;
1447               }
1448             }
1449             // Note: This test can provide a false failure if other precedence
1450             // edges have been added to the storeCMNode.
1451             assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
1452           }
1453           assert(count > 0, "storeCM expects at least one precedence edge");
1454         }
1455 #endif
1456         else if (!n->is_Proj()) {
1457           // Remember the beginning of the previous instruction, in case
1458           // it's followed by a flag-kill and a null-check.  Happens on
1459           // Intel all the time, with add-to-memory kind of opcodes.
1460           previous_offset = current_offset;
1461         }
1462 
1463         // Not an else-if!
1464         // If this is a trap based cmp then add its offset to the list.
1465         if (mach->is_TrapBasedCheckNode()) {
1466           inct_starts[inct_cnt++] = current_offset;
1467         }
1468       }
1469 
1470       // Verify that there is sufficient space remaining
1471       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1472       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1473         C->record_failure("CodeCache is full");
1474         return;
1475       }
1476 
1477       // Save the offset for the listing
1478 #ifndef PRODUCT
1479       if (node_offsets && n->_idx < node_offset_limit)
1480         node_offsets[n->_idx] = cb->insts_size();
1481 #endif
1482 
1483       // "Normal" instruction case
1484       DEBUG_ONLY( uint instr_offset = cb->insts_size(); )
1485       n->emit(*cb, _regalloc);
1486       current_offset  = cb->insts_size();
1487 
1488 #ifdef ASSERT
1489       if (n->size(_regalloc) < (current_offset-instr_offset)) {
1490         n->dump();
1491         assert(false, "wrong size of mach node");
1492       }
1493 #endif
1494       non_safepoints.observe_instruction(n, current_offset);
1495 
1496       // mcall is last "call" that can be a safepoint
1497       // record it so we can see if a poll will directly follow it
1498       // in which case we'll need a pad to make the PcDesc sites unique
1499       // see  5010568. This can be slightly inaccurate but conservative
1500       // in the case that return address is not actually at current_offset.
1501       // This is a small price to pay.
1502 
1503       if (is_mcall) {
1504         last_call_offset = current_offset;
1505       }
1506 
1507       if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
1508         // Avoid back to back some instructions.
1509         last_avoid_back_to_back_offset = current_offset;
1510       }
1511 
1512       // See if this instruction has a delay slot
1513       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1514         assert(delay_slot != NULL, "expecting delay slot node");
1515 
1516         // Back up 1 instruction
1517         cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1518 
1519         // Save the offset for the listing
1520 #ifndef PRODUCT
1521         if (node_offsets && delay_slot->_idx < node_offset_limit)
1522           node_offsets[delay_slot->_idx] = cb->insts_size();
1523 #endif
1524 
1525         // Support a SafePoint in the delay slot
1526         if (delay_slot->is_MachSafePoint()) {
1527           MachNode *mach = delay_slot->as_Mach();
1528           // !!!!! Stubs only need an oopmap right now, so bail out
1529           if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
1530             // Write the oopmap directly to the code blob??!!
1531 #           ifdef ENABLE_ZAP_DEAD_LOCALS
1532             assert( !is_node_getting_a_safepoint(mach),  "logic does not match; false positive");
1533 #           endif
1534             delay_slot = NULL;
1535             continue;
1536           }
1537 
1538           int adjusted_offset = current_offset - Pipeline::instr_unit_size();
1539           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1540                                            adjusted_offset);
1541           // Generate an OopMap entry
1542           Process_OopMap_Node(mach, adjusted_offset);
1543         }
1544 
1545         // Insert the delay slot instruction
1546         delay_slot->emit(*cb, _regalloc);
1547 
1548         // Don't reuse it
1549         delay_slot = NULL;
1550       }
1551 
1552     } // End for all instructions in block
1553 
1554     // If the next block is the top of a loop, pad this block out to align
1555     // the loop top a little. Helps prevent pipe stalls at loop back branches.
1556     if (i < nblocks-1) {
1557       Block *nb = _cfg->get_block(i + 1);
1558       int padding = nb->alignment_padding(current_offset);
1559       if( padding > 0 ) {
1560         MachNode *nop = new (this) MachNopNode(padding / nop_size);
1561         block->insert_node(nop, block->number_of_nodes());
1562         _cfg->map_node_to_block(nop, block);
1563         nop->emit(*cb, _regalloc);
1564         current_offset = cb->insts_size();
1565       }
1566     }
1567     // Verify that the distance for generated before forward
1568     // short branches is still valid.
1569     guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
1570 
1571     // Save new block start offset
1572     blk_starts[i] = blk_offset;
1573   } // End of for all blocks
1574   blk_starts[nblocks] = current_offset;
1575 
1576   non_safepoints.flush_at_end();
1577 
1578   // Offset too large?
1579   if (failing())  return;
1580 
1581   // Define a pseudo-label at the end of the code
1582   MacroAssembler(cb).bind( blk_labels[nblocks] );
1583 
1584   // Compute the size of the first block
1585   _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
1586 
1587   assert(cb->insts_size() < 500000, "method is unreasonably large");
1588 
1589 #ifdef ASSERT
1590   for (uint i = 0; i < nblocks; i++) { // For all blocks
1591     if (jmp_target[i] != 0) {
1592       int br_size = jmp_size[i];
1593       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
1594       if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
1595         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
1596         assert(false, "Displacement too large for short jmp");
1597       }
1598     }
1599   }
1600 #endif
1601 
1602 #ifndef PRODUCT
1603   // Information on the size of the method, without the extraneous code
1604   Scheduling::increment_method_size(cb->insts_size());
1605 #endif
1606 
1607   // ------------------
1608   // Fill in exception table entries.
1609   FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
1610 
1611   // Only java methods have exception handlers and deopt handlers
1612   if (_method) {
1613     // Emit the exception handler code.
1614     _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
1615     // Emit the deopt handler code.
1616     _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
1617 
1618     // Emit the MethodHandle deopt handler code (if required).
1619     if (has_method_handle_invokes()) {
1620       // We can use the same code as for the normal deopt handler, we
1621       // just need a different entry point address.
1622       _code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
1623     }
1624   }
1625 
1626   // One last check for failed CodeBuffer::expand:
1627   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1628     C->record_failure("CodeCache is full");
1629     return;
1630   }
1631 
1632 #ifndef PRODUCT
1633   // Dump the assembly code, including basic-block numbers
1634   if (print_assembly()) {
1635     ttyLocker ttyl;  // keep the following output all in one block
1636     if (!VMThread::should_terminate()) {  // test this under the tty lock
1637       // This output goes directly to the tty, not the compiler log.
1638       // To enable tools to match it up with the compilation activity,
1639       // be sure to tag this tty output with the compile ID.
1640       if (xtty != NULL) {
1641         xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
1642                    is_osr_compilation()    ? " compile_kind='osr'" :
1643                    "");
1644       }
1645       if (method() != NULL) {
1646         method()->print_metadata();
1647       }
1648       dump_asm(node_offsets, node_offset_limit);
1649       if (xtty != NULL) {
1650         xtty->tail("opto_assembly");
1651       }
1652     }
1653   }
1654 #endif
1655 
1656 }
1657 
1658 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1659   _inc_table.set_size(cnt);
1660 
1661   uint inct_cnt = 0;
1662   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1663     Block* block = _cfg->get_block(i);
1664     Node *n = NULL;
1665     int j;
1666 
1667     // Find the branch; ignore trailing NOPs.
1668     for (j = block->number_of_nodes() - 1; j >= 0; j--) {
1669       n = block->get_node(j);
1670       if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1671         break;
1672       }
1673     }
1674 
1675     // If we didn't find anything, continue
1676     if (j < 0) {
1677       continue;
1678     }
1679 
1680     // Compute ExceptionHandlerTable subtable entry and add it
1681     // (skip empty blocks)
1682     if (n->is_Catch()) {
1683 
1684       // Get the offset of the return from the call
1685       uint call_return = call_returns[block->_pre_order];
1686 #ifdef ASSERT
1687       assert( call_return > 0, "no call seen for this basic block" );
1688       while (block->get_node(--j)->is_MachProj()) ;
1689       assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
1690 #endif
1691       // last instruction is a CatchNode, find it's CatchProjNodes
1692       int nof_succs = block->_num_succs;
1693       // allocate space
1694       GrowableArray<intptr_t> handler_bcis(nof_succs);
1695       GrowableArray<intptr_t> handler_pcos(nof_succs);
1696       // iterate through all successors
1697       for (int j = 0; j < nof_succs; j++) {
1698         Block* s = block->_succs[j];
1699         bool found_p = false;
1700         for (uint k = 1; k < s->num_preds(); k++) {
1701           Node* pk = s->pred(k);
1702           if (pk->is_CatchProj() && pk->in(0) == n) {
1703             const CatchProjNode* p = pk->as_CatchProj();
1704             found_p = true;
1705             // add the corresponding handler bci & pco information
1706             if (p->_con != CatchProjNode::fall_through_index) {
1707               // p leads to an exception handler (and is not fall through)
1708               assert(s == _cfg->get_block(s->_pre_order), "bad numbering");
1709               // no duplicates, please
1710               if (!handler_bcis.contains(p->handler_bci())) {
1711                 uint block_num = s->non_connector()->_pre_order;
1712                 handler_bcis.append(p->handler_bci());
1713                 handler_pcos.append(blk_labels[block_num].loc_pos());
1714               }
1715             }
1716           }
1717         }
1718         assert(found_p, "no matching predecessor found");
1719         // Note:  Due to empty block removal, one block may have
1720         // several CatchProj inputs, from the same Catch.
1721       }
1722 
1723       // Set the offset of the return from the call
1724       _handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos);
1725       continue;
1726     }
1727 
1728     // Handle implicit null exception table updates
1729     if (n->is_MachNullCheck()) {
1730       uint block_num = block->non_connector_successor(0)->_pre_order;
1731       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
1732       continue;
1733     }
1734     // Handle implicit exception table updates: trap instructions.
1735     if (n->is_Mach() && n->as_Mach()->is_TrapBasedCheckNode()) {
1736       uint block_num = block->non_connector_successor(0)->_pre_order;
1737       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
1738       continue;
1739     }
1740   } // End of for all blocks fill in exception table entries
1741 }
1742 
1743 // Static Variables
1744 #ifndef PRODUCT
1745 uint Scheduling::_total_nop_size = 0;
1746 uint Scheduling::_total_method_size = 0;
1747 uint Scheduling::_total_branches = 0;
1748 uint Scheduling::_total_unconditional_delays = 0;
1749 uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
1750 #endif
1751 
1752 // Initializer for class Scheduling
1753 
1754 Scheduling::Scheduling(Arena *arena, Compile &compile)
1755   : _arena(arena),
1756     _cfg(compile.cfg()),
1757     _regalloc(compile.regalloc()),
1758     _reg_node(arena),
1759     _bundle_instr_count(0),
1760     _bundle_cycle_number(0),
1761     _scheduled(arena),
1762     _available(arena),
1763     _next_node(NULL),
1764     _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]),
1765     _pinch_free_list(arena)
1766 #ifndef PRODUCT
1767   , _branches(0)
1768   , _unconditional_delays(0)
1769 #endif
1770 {
1771   // Create a MachNopNode
1772   _nop = new (&compile) MachNopNode();
1773 
1774   // Now that the nops are in the array, save the count
1775   // (but allow entries for the nops)
1776   _node_bundling_limit = compile.unique();
1777   uint node_max = _regalloc->node_regs_max_index();
1778 
1779   compile.set_node_bundling_limit(_node_bundling_limit);
1780 
1781   // This one is persistent within the Compile class
1782   _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
1783 
1784   // Allocate space for fixed-size arrays
1785   _node_latency    = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1786   _uses            = NEW_ARENA_ARRAY(arena, short,          node_max);
1787   _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1788 
1789   // Clear the arrays
1790   memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
1791   memset(_node_latency,       0, node_max * sizeof(unsigned short));
1792   memset(_uses,               0, node_max * sizeof(short));
1793   memset(_current_latency,    0, node_max * sizeof(unsigned short));
1794 
1795   // Clear the bundling information
1796   memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
1797 
1798   // Get the last node
1799   Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
1800 
1801   _next_node = block->get_node(block->number_of_nodes() - 1);
1802 }
1803 
1804 #ifndef PRODUCT
1805 // Scheduling destructor
1806 Scheduling::~Scheduling() {
1807   _total_branches             += _branches;
1808   _total_unconditional_delays += _unconditional_delays;
1809 }
1810 #endif
1811 
1812 // Step ahead "i" cycles
1813 void Scheduling::step(uint i) {
1814 
1815   Bundle *bundle = node_bundling(_next_node);
1816   bundle->set_starts_bundle();
1817 
1818   // Update the bundle record, but leave the flags information alone
1819   if (_bundle_instr_count > 0) {
1820     bundle->set_instr_count(_bundle_instr_count);
1821     bundle->set_resources_used(_bundle_use.resourcesUsed());
1822   }
1823 
1824   // Update the state information
1825   _bundle_instr_count = 0;
1826   _bundle_cycle_number += i;
1827   _bundle_use.step(i);
1828 }
1829 
1830 void Scheduling::step_and_clear() {
1831   Bundle *bundle = node_bundling(_next_node);
1832   bundle->set_starts_bundle();
1833 
1834   // Update the bundle record
1835   if (_bundle_instr_count > 0) {
1836     bundle->set_instr_count(_bundle_instr_count);
1837     bundle->set_resources_used(_bundle_use.resourcesUsed());
1838 
1839     _bundle_cycle_number += 1;
1840   }
1841 
1842   // Clear the bundling information
1843   _bundle_instr_count = 0;
1844   _bundle_use.reset();
1845 
1846   memcpy(_bundle_use_elements,
1847     Pipeline_Use::elaborated_elements,
1848     sizeof(Pipeline_Use::elaborated_elements));
1849 }
1850 
1851 // Perform instruction scheduling and bundling over the sequence of
1852 // instructions in backwards order.
1853 void Compile::ScheduleAndBundle() {
1854 
1855   // Don't optimize this if it isn't a method
1856   if (!_method)
1857     return;
1858 
1859   // Don't optimize this if scheduling is disabled
1860   if (!do_scheduling())
1861     return;
1862 
1863   // Scheduling code works only with pairs (8 bytes) maximum.
1864   if (max_vector_size() > 8)
1865     return;
1866 
1867   NOT_PRODUCT( TracePhase t2("isched", &_t_instrSched, TimeCompiler); )
1868 
1869   // Create a data structure for all the scheduling information
1870   Scheduling scheduling(Thread::current()->resource_area(), *this);
1871 
1872   // Walk backwards over each basic block, computing the needed alignment
1873   // Walk over all the basic blocks
1874   scheduling.DoScheduling();
1875 }
1876 
1877 // Compute the latency of all the instructions.  This is fairly simple,
1878 // because we already have a legal ordering.  Walk over the instructions
1879 // from first to last, and compute the latency of the instruction based
1880 // on the latency of the preceding instruction(s).
1881 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1882 #ifndef PRODUCT
1883   if (_cfg->C->trace_opto_output())
1884     tty->print("# -> ComputeLocalLatenciesForward\n");
1885 #endif
1886 
1887   // Walk over all the schedulable instructions
1888   for( uint j=_bb_start; j < _bb_end; j++ ) {
1889 
1890     // This is a kludge, forcing all latency calculations to start at 1.
1891     // Used to allow latency 0 to force an instruction to the beginning
1892     // of the bb
1893     uint latency = 1;
1894     Node *use = bb->get_node(j);
1895     uint nlen = use->len();
1896 
1897     // Walk over all the inputs
1898     for ( uint k=0; k < nlen; k++ ) {
1899       Node *def = use->in(k);
1900       if (!def)
1901         continue;
1902 
1903       uint l = _node_latency[def->_idx] + use->latency(k);
1904       if (latency < l)
1905         latency = l;
1906     }
1907 
1908     _node_latency[use->_idx] = latency;
1909 
1910 #ifndef PRODUCT
1911     if (_cfg->C->trace_opto_output()) {
1912       tty->print("# latency %4d: ", latency);
1913       use->dump();
1914     }
1915 #endif
1916   }
1917 
1918 #ifndef PRODUCT
1919   if (_cfg->C->trace_opto_output())
1920     tty->print("# <- ComputeLocalLatenciesForward\n");
1921 #endif
1922 
1923 } // end ComputeLocalLatenciesForward
1924 
1925 // See if this node fits into the present instruction bundle
1926 bool Scheduling::NodeFitsInBundle(Node *n) {
1927   uint n_idx = n->_idx;
1928 
1929   // If this is the unconditional delay instruction, then it fits
1930   if (n == _unconditional_delay_slot) {
1931 #ifndef PRODUCT
1932     if (_cfg->C->trace_opto_output())
1933       tty->print("#     NodeFitsInBundle [%4d]: TRUE; is in unconditional delay slot\n", n->_idx);
1934 #endif
1935     return (true);
1936   }
1937 
1938   // If the node cannot be scheduled this cycle, skip it
1939   if (_current_latency[n_idx] > _bundle_cycle_number) {
1940 #ifndef PRODUCT
1941     if (_cfg->C->trace_opto_output())
1942       tty->print("#     NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
1943         n->_idx, _current_latency[n_idx], _bundle_cycle_number);
1944 #endif
1945     return (false);
1946   }
1947 
1948   const Pipeline *node_pipeline = n->pipeline();
1949 
1950   uint instruction_count = node_pipeline->instructionCount();
1951   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
1952     instruction_count = 0;
1953   else if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
1954     instruction_count++;
1955 
1956   if (_bundle_instr_count + instruction_count > Pipeline::_max_instrs_per_cycle) {
1957 #ifndef PRODUCT
1958     if (_cfg->C->trace_opto_output())
1959       tty->print("#     NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
1960         n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
1961 #endif
1962     return (false);
1963   }
1964 
1965   // Don't allow non-machine nodes to be handled this way
1966   if (!n->is_Mach() && instruction_count == 0)
1967     return (false);
1968 
1969   // See if there is any overlap
1970   uint delay = _bundle_use.full_latency(0, node_pipeline->resourceUse());
1971 
1972   if (delay > 0) {
1973 #ifndef PRODUCT
1974     if (_cfg->C->trace_opto_output())
1975       tty->print("#     NodeFitsInBundle [%4d]: FALSE; functional units overlap\n", n_idx);
1976 #endif
1977     return false;
1978   }
1979 
1980 #ifndef PRODUCT
1981   if (_cfg->C->trace_opto_output())
1982     tty->print("#     NodeFitsInBundle [%4d]:  TRUE\n", n_idx);
1983 #endif
1984 
1985   return true;
1986 }
1987 
1988 Node * Scheduling::ChooseNodeToBundle() {
1989   uint siz = _available.size();
1990 
1991   if (siz == 0) {
1992 
1993 #ifndef PRODUCT
1994     if (_cfg->C->trace_opto_output())
1995       tty->print("#   ChooseNodeToBundle: NULL\n");
1996 #endif
1997     return (NULL);
1998   }
1999 
2000   // Fast path, if only 1 instruction in the bundle
2001   if (siz == 1) {
2002 #ifndef PRODUCT
2003     if (_cfg->C->trace_opto_output()) {
2004       tty->print("#   ChooseNodeToBundle (only 1): ");
2005       _available[0]->dump();
2006     }
2007 #endif
2008     return (_available[0]);
2009   }
2010 
2011   // Don't bother, if the bundle is already full
2012   if (_bundle_instr_count < Pipeline::_max_instrs_per_cycle) {
2013     for ( uint i = 0; i < siz; i++ ) {
2014       Node *n = _available[i];
2015 
2016       // Skip projections, we'll handle them another way
2017       if (n->is_Proj())
2018         continue;
2019 
2020       // This presupposed that instructions are inserted into the
2021       // available list in a legality order; i.e. instructions that
2022       // must be inserted first are at the head of the list
2023       if (NodeFitsInBundle(n)) {
2024 #ifndef PRODUCT
2025         if (_cfg->C->trace_opto_output()) {
2026           tty->print("#   ChooseNodeToBundle: ");
2027           n->dump();
2028         }
2029 #endif
2030         return (n);
2031       }
2032     }
2033   }
2034 
2035   // Nothing fits in this bundle, choose the highest priority
2036 #ifndef PRODUCT
2037   if (_cfg->C->trace_opto_output()) {
2038     tty->print("#   ChooseNodeToBundle: ");
2039     _available[0]->dump();
2040   }
2041 #endif
2042 
2043   return _available[0];
2044 }
2045 
2046 void Scheduling::AddNodeToAvailableList(Node *n) {
2047   assert( !n->is_Proj(), "projections never directly made available" );
2048 #ifndef PRODUCT
2049   if (_cfg->C->trace_opto_output()) {
2050     tty->print("#   AddNodeToAvailableList: ");
2051     n->dump();
2052   }
2053 #endif
2054 
2055   int latency = _current_latency[n->_idx];
2056 
2057   // Insert in latency order (insertion sort)
2058   uint i;
2059   for ( i=0; i < _available.size(); i++ )
2060     if (_current_latency[_available[i]->_idx] > latency)
2061       break;
2062 
2063   // Special Check for compares following branches
2064   if( n->is_Mach() && _scheduled.size() > 0 ) {
2065     int op = n->as_Mach()->ideal_Opcode();
2066     Node *last = _scheduled[0];
2067     if( last->is_MachIf() && last->in(1) == n &&
2068         ( op == Op_CmpI ||
2069           op == Op_CmpU ||
2070           op == Op_CmpP ||
2071           op == Op_CmpF ||
2072           op == Op_CmpD ||
2073           op == Op_CmpL ) ) {
2074 
2075       // Recalculate position, moving to front of same latency
2076       for ( i=0 ; i < _available.size(); i++ )
2077         if (_current_latency[_available[i]->_idx] >= latency)
2078           break;
2079     }
2080   }
2081 
2082   // Insert the node in the available list
2083   _available.insert(i, n);
2084 
2085 #ifndef PRODUCT
2086   if (_cfg->C->trace_opto_output())
2087     dump_available();
2088 #endif
2089 }
2090 
2091 void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
2092   for ( uint i=0; i < n->len(); i++ ) {
2093     Node *def = n->in(i);
2094     if (!def) continue;
2095     if( def->is_Proj() )        // If this is a machine projection, then
2096       def = def->in(0);         // propagate usage thru to the base instruction
2097 
2098     if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
2099       continue;
2100     }
2101 
2102     // Compute the latency
2103     uint l = _bundle_cycle_number + n->latency(i);
2104     if (_current_latency[def->_idx] < l)
2105       _current_latency[def->_idx] = l;
2106 
2107     // If this does not have uses then schedule it
2108     if ((--_uses[def->_idx]) == 0)
2109       AddNodeToAvailableList(def);
2110   }
2111 }
2112 
2113 void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
2114 #ifndef PRODUCT
2115   if (_cfg->C->trace_opto_output()) {
2116     tty->print("#   AddNodeToBundle: ");
2117     n->dump();
2118   }
2119 #endif
2120 
2121   // Remove this from the available list
2122   uint i;
2123   for (i = 0; i < _available.size(); i++)
2124     if (_available[i] == n)
2125       break;
2126   assert(i < _available.size(), "entry in _available list not found");
2127   _available.remove(i);
2128 
2129   // See if this fits in the current bundle
2130   const Pipeline *node_pipeline = n->pipeline();
2131   const Pipeline_Use& node_usage = node_pipeline->resourceUse();
2132 
2133   // Check for instructions to be placed in the delay slot. We
2134   // do this before we actually schedule the current instruction,
2135   // because the delay slot follows the current instruction.
2136   if (Pipeline::_branch_has_delay_slot &&
2137       node_pipeline->hasBranchDelay() &&
2138       !_unconditional_delay_slot) {
2139 
2140     uint siz = _available.size();
2141 
2142     // Conditional branches can support an instruction that
2143     // is unconditionally executed and not dependent by the
2144     // branch, OR a conditionally executed instruction if
2145     // the branch is taken.  In practice, this means that
2146     // the first instruction at the branch target is
2147     // copied to the delay slot, and the branch goes to
2148     // the instruction after that at the branch target
2149     if ( n->is_MachBranch() ) {
2150 
2151       assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
2152       assert( !n->is_Catch(),         "should not look for delay slot for Catch" );
2153 
2154 #ifndef PRODUCT
2155       _branches++;
2156 #endif
2157 
2158       // At least 1 instruction is on the available list
2159       // that is not dependent on the branch
2160       for (uint i = 0; i < siz; i++) {
2161         Node *d = _available[i];
2162         const Pipeline *avail_pipeline = d->pipeline();
2163 
2164         // Don't allow safepoints in the branch shadow, that will
2165         // cause a number of difficulties
2166         if ( avail_pipeline->instructionCount() == 1 &&
2167             !avail_pipeline->hasMultipleBundles() &&
2168             !avail_pipeline->hasBranchDelay() &&
2169             Pipeline::instr_has_unit_size() &&
2170             d->size(_regalloc) == Pipeline::instr_unit_size() &&
2171             NodeFitsInBundle(d) &&
2172             !node_bundling(d)->used_in_delay()) {
2173 
2174           if (d->is_Mach() && !d->is_MachSafePoint()) {
2175             // A node that fits in the delay slot was found, so we need to
2176             // set the appropriate bits in the bundle pipeline information so
2177             // that it correctly indicates resource usage.  Later, when we
2178             // attempt to add this instruction to the bundle, we will skip
2179             // setting the resource usage.
2180             _unconditional_delay_slot = d;
2181             node_bundling(n)->set_use_unconditional_delay();
2182             node_bundling(d)->set_used_in_unconditional_delay();
2183             _bundle_use.add_usage(avail_pipeline->resourceUse());
2184             _current_latency[d->_idx] = _bundle_cycle_number;
2185             _next_node = d;
2186             ++_bundle_instr_count;
2187 #ifndef PRODUCT
2188             _unconditional_delays++;
2189 #endif
2190             break;
2191           }
2192         }
2193       }
2194     }
2195 
2196     // No delay slot, add a nop to the usage
2197     if (!_unconditional_delay_slot) {
2198       // See if adding an instruction in the delay slot will overflow
2199       // the bundle.
2200       if (!NodeFitsInBundle(_nop)) {
2201 #ifndef PRODUCT
2202         if (_cfg->C->trace_opto_output())
2203           tty->print("#  *** STEP(1 instruction for delay slot) ***\n");
2204 #endif
2205         step(1);
2206       }
2207 
2208       _bundle_use.add_usage(_nop->pipeline()->resourceUse());
2209       _next_node = _nop;
2210       ++_bundle_instr_count;
2211     }
2212 
2213     // See if the instruction in the delay slot requires a
2214     // step of the bundles
2215     if (!NodeFitsInBundle(n)) {
2216 #ifndef PRODUCT
2217         if (_cfg->C->trace_opto_output())
2218           tty->print("#  *** STEP(branch won't fit) ***\n");
2219 #endif
2220         // Update the state information
2221         _bundle_instr_count = 0;
2222         _bundle_cycle_number += 1;
2223         _bundle_use.step(1);
2224     }
2225   }
2226 
2227   // Get the number of instructions
2228   uint instruction_count = node_pipeline->instructionCount();
2229   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
2230     instruction_count = 0;
2231 
2232   // Compute the latency information
2233   uint delay = 0;
2234 
2235   if (instruction_count > 0 || !node_pipeline->mayHaveNoCode()) {
2236     int relative_latency = _current_latency[n->_idx] - _bundle_cycle_number;
2237     if (relative_latency < 0)
2238       relative_latency = 0;
2239 
2240     delay = _bundle_use.full_latency(relative_latency, node_usage);
2241 
2242     // Does not fit in this bundle, start a new one
2243     if (delay > 0) {
2244       step(delay);
2245 
2246 #ifndef PRODUCT
2247       if (_cfg->C->trace_opto_output())
2248         tty->print("#  *** STEP(%d) ***\n", delay);
2249 #endif
2250     }
2251   }
2252 
2253   // If this was placed in the delay slot, ignore it
2254   if (n != _unconditional_delay_slot) {
2255 
2256     if (delay == 0) {
2257       if (node_pipeline->hasMultipleBundles()) {
2258 #ifndef PRODUCT
2259         if (_cfg->C->trace_opto_output())
2260           tty->print("#  *** STEP(multiple instructions) ***\n");
2261 #endif
2262         step(1);
2263       }
2264 
2265       else if (instruction_count + _bundle_instr_count > Pipeline::_max_instrs_per_cycle) {
2266 #ifndef PRODUCT
2267         if (_cfg->C->trace_opto_output())
2268           tty->print("#  *** STEP(%d >= %d instructions) ***\n",
2269             instruction_count + _bundle_instr_count,
2270             Pipeline::_max_instrs_per_cycle);
2271 #endif
2272         step(1);
2273       }
2274     }
2275 
2276     if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
2277       _bundle_instr_count++;
2278 
2279     // Set the node's latency
2280     _current_latency[n->_idx] = _bundle_cycle_number;
2281 
2282     // Now merge the functional unit information
2283     if (instruction_count > 0 || !node_pipeline->mayHaveNoCode())
2284       _bundle_use.add_usage(node_usage);
2285 
2286     // Increment the number of instructions in this bundle
2287     _bundle_instr_count += instruction_count;
2288 
2289     // Remember this node for later
2290     if (n->is_Mach())
2291       _next_node = n;
2292   }
2293 
2294   // It's possible to have a BoxLock in the graph and in the _bbs mapping but
2295   // not in the bb->_nodes array.  This happens for debug-info-only BoxLocks.
2296   // 'Schedule' them (basically ignore in the schedule) but do not insert them
2297   // into the block.  All other scheduled nodes get put in the schedule here.
2298   int op = n->Opcode();
2299   if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
2300       (op != Op_Node &&         // Not an unused antidepedence node and
2301        // not an unallocated boxlock
2302        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
2303 
2304     // Push any trailing projections
2305     if( bb->get_node(bb->number_of_nodes()-1) != n ) {
2306       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2307         Node *foi = n->fast_out(i);
2308         if( foi->is_Proj() )
2309           _scheduled.push(foi);
2310       }
2311     }
2312 
2313     // Put the instruction in the schedule list
2314     _scheduled.push(n);
2315   }
2316 
2317 #ifndef PRODUCT
2318   if (_cfg->C->trace_opto_output())
2319     dump_available();
2320 #endif
2321 
2322   // Walk all the definitions, decrementing use counts, and
2323   // if a definition has a 0 use count, place it in the available list.
2324   DecrementUseCounts(n,bb);
2325 }
2326 
2327 // This method sets the use count within a basic block.  We will ignore all
2328 // uses outside the current basic block.  As we are doing a backwards walk,
2329 // any node we reach that has a use count of 0 may be scheduled.  This also
2330 // avoids the problem of cyclic references from phi nodes, as long as phi
2331 // nodes are at the front of the basic block.  This method also initializes
2332 // the available list to the set of instructions that have no uses within this
2333 // basic block.
2334 void Scheduling::ComputeUseCount(const Block *bb) {
2335 #ifndef PRODUCT
2336   if (_cfg->C->trace_opto_output())
2337     tty->print("# -> ComputeUseCount\n");
2338 #endif
2339 
2340   // Clear the list of available and scheduled instructions, just in case
2341   _available.clear();
2342   _scheduled.clear();
2343 
2344   // No delay slot specified
2345   _unconditional_delay_slot = NULL;
2346 
2347 #ifdef ASSERT
2348   for( uint i=0; i < bb->number_of_nodes(); i++ )
2349     assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
2350 #endif
2351 
2352   // Force the _uses count to never go to zero for unscheduable pieces
2353   // of the block
2354   for( uint k = 0; k < _bb_start; k++ )
2355     _uses[bb->get_node(k)->_idx] = 1;
2356   for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
2357     _uses[bb->get_node(l)->_idx] = 1;
2358 
2359   // Iterate backwards over the instructions in the block.  Don't count the
2360   // branch projections at end or the block header instructions.
2361   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
2362     Node *n = bb->get_node(j);
2363     if( n->is_Proj() ) continue; // Projections handled another way
2364 
2365     // Account for all uses
2366     for ( uint k = 0; k < n->len(); k++ ) {
2367       Node *inp = n->in(k);
2368       if (!inp) continue;
2369       assert(inp != n, "no cycles allowed" );
2370       if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
2371         if (inp->is_Proj()) { // Skip through Proj's
2372           inp = inp->in(0);
2373         }
2374         ++_uses[inp->_idx];     // Count 1 block-local use
2375       }
2376     }
2377 
2378     // If this instruction has a 0 use count, then it is available
2379     if (!_uses[n->_idx]) {
2380       _current_latency[n->_idx] = _bundle_cycle_number;
2381       AddNodeToAvailableList(n);
2382     }
2383 
2384 #ifndef PRODUCT
2385     if (_cfg->C->trace_opto_output()) {
2386       tty->print("#   uses: %3d: ", _uses[n->_idx]);
2387       n->dump();
2388     }
2389 #endif
2390   }
2391 
2392 #ifndef PRODUCT
2393   if (_cfg->C->trace_opto_output())
2394     tty->print("# <- ComputeUseCount\n");
2395 #endif
2396 }
2397 
2398 // This routine performs scheduling on each basic block in reverse order,
2399 // using instruction latencies and taking into account function unit
2400 // availability.
2401 void Scheduling::DoScheduling() {
2402 #ifndef PRODUCT
2403   if (_cfg->C->trace_opto_output())
2404     tty->print("# -> DoScheduling\n");
2405 #endif
2406 
2407   Block *succ_bb = NULL;
2408   Block *bb;
2409 
2410   // Walk over all the basic blocks in reverse order
2411   for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
2412     bb = _cfg->get_block(i);
2413 
2414 #ifndef PRODUCT
2415     if (_cfg->C->trace_opto_output()) {
2416       tty->print("#  Schedule BB#%03d (initial)\n", i);
2417       for (uint j = 0; j < bb->number_of_nodes(); j++) {
2418         bb->get_node(j)->dump();
2419       }
2420     }
2421 #endif
2422 
2423     // On the head node, skip processing
2424     if (bb == _cfg->get_root_block()) {
2425       continue;
2426     }
2427 
2428     // Skip empty, connector blocks
2429     if (bb->is_connector())
2430       continue;
2431 
2432     // If the following block is not the sole successor of
2433     // this one, then reset the pipeline information
2434     if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
2435 #ifndef PRODUCT
2436       if (_cfg->C->trace_opto_output()) {
2437         tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
2438                    _next_node->_idx, _bundle_instr_count);
2439       }
2440 #endif
2441       step_and_clear();
2442     }
2443 
2444     // Leave untouched the starting instruction, any Phis, a CreateEx node
2445     // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
2446     _bb_end = bb->number_of_nodes()-1;
2447     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
2448       Node *n = bb->get_node(_bb_start);
2449       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
2450       // Also, MachIdealNodes do not get scheduled
2451       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
2452       MachNode *mach = n->as_Mach();
2453       int iop = mach->ideal_Opcode();
2454       if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2455       if( iop == Op_Con ) continue;      // Do not schedule Top
2456       if( iop == Op_Node &&     // Do not schedule PhiNodes, ProjNodes
2457           mach->pipeline() == MachNode::pipeline_class() &&
2458           !n->is_SpillCopy() )  // Breakpoints, Prolog, etc
2459         continue;
2460       break;                    // Funny loop structure to be sure...
2461     }
2462     // Compute last "interesting" instruction in block - last instruction we
2463     // might schedule.  _bb_end points just after last schedulable inst.  We
2464     // normally schedule conditional branches (despite them being forced last
2465     // in the block), because they have delay slots we can fill.  Calls all
2466     // have their delay slots filled in the template expansions, so we don't
2467     // bother scheduling them.
2468     Node *last = bb->get_node(_bb_end);
2469     // Ignore trailing NOPs.
2470     while (_bb_end > 0 && last->is_Mach() &&
2471            last->as_Mach()->ideal_Opcode() == Op_Con) {
2472       last = bb->get_node(--_bb_end);
2473     }
2474     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
2475     if( last->is_Catch() ||
2476        // Exclude unreachable path case when Halt node is in a separate block.
2477        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2478       // There must be a prior call.  Skip it.
2479       while( !bb->get_node(--_bb_end)->is_MachCall() ) {
2480         assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
2481       }
2482     } else if( last->is_MachNullCheck() ) {
2483       // Backup so the last null-checked memory instruction is
2484       // outside the schedulable range. Skip over the nullcheck,
2485       // projection, and the memory nodes.
2486       Node *mem = last->in(1);
2487       do {
2488         _bb_end--;
2489       } while (mem != bb->get_node(_bb_end));
2490     } else {
2491       // Set _bb_end to point after last schedulable inst.
2492       _bb_end++;
2493     }
2494 
2495     assert( _bb_start <= _bb_end, "inverted block ends" );
2496 
2497     // Compute the register antidependencies for the basic block
2498     ComputeRegisterAntidependencies(bb);
2499     if (_cfg->C->failing())  return;  // too many D-U pinch points
2500 
2501     // Compute intra-bb latencies for the nodes
2502     ComputeLocalLatenciesForward(bb);
2503 
2504     // Compute the usage within the block, and set the list of all nodes
2505     // in the block that have no uses within the block.
2506     ComputeUseCount(bb);
2507 
2508     // Schedule the remaining instructions in the block
2509     while ( _available.size() > 0 ) {
2510       Node *n = ChooseNodeToBundle();
2511       guarantee(n != NULL, "no nodes available");
2512       AddNodeToBundle(n,bb);
2513     }
2514 
2515     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
2516 #ifdef ASSERT
2517     for( uint l = _bb_start; l < _bb_end; l++ ) {
2518       Node *n = bb->get_node(l);
2519       uint m;
2520       for( m = 0; m < _bb_end-_bb_start; m++ )
2521         if( _scheduled[m] == n )
2522           break;
2523       assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
2524     }
2525 #endif
2526 
2527     // Now copy the instructions (in reverse order) back to the block
2528     for ( uint k = _bb_start; k < _bb_end; k++ )
2529       bb->map_node(_scheduled[_bb_end-k-1], k);
2530 
2531 #ifndef PRODUCT
2532     if (_cfg->C->trace_opto_output()) {
2533       tty->print("#  Schedule BB#%03d (final)\n", i);
2534       uint current = 0;
2535       for (uint j = 0; j < bb->number_of_nodes(); j++) {
2536         Node *n = bb->get_node(j);
2537         if( valid_bundle_info(n) ) {
2538           Bundle *bundle = node_bundling(n);
2539           if (bundle->instr_count() > 0 || bundle->flags() > 0) {
2540             tty->print("*** Bundle: ");
2541             bundle->dump();
2542           }
2543           n->dump();
2544         }
2545       }
2546     }
2547 #endif
2548 #ifdef ASSERT
2549   verify_good_schedule(bb,"after block local scheduling");
2550 #endif
2551   }
2552 
2553 #ifndef PRODUCT
2554   if (_cfg->C->trace_opto_output())
2555     tty->print("# <- DoScheduling\n");
2556 #endif
2557 
2558   // Record final node-bundling array location
2559   _regalloc->C->set_node_bundling_base(_node_bundling_base);
2560 
2561 } // end DoScheduling
2562 
2563 // Verify that no live-range used in the block is killed in the block by a
2564 // wrong DEF.  This doesn't verify live-ranges that span blocks.
2565 
2566 // Check for edge existence.  Used to avoid adding redundant precedence edges.
2567 static bool edge_from_to( Node *from, Node *to ) {
2568   for( uint i=0; i<from->len(); i++ )
2569     if( from->in(i) == to )
2570       return true;
2571   return false;
2572 }
2573 
2574 #ifdef ASSERT
2575 void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
2576   // Check for bad kills
2577   if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
2578     Node *prior_use = _reg_node[def];
2579     if( prior_use && !edge_from_to(prior_use,n) ) {
2580       tty->print("%s = ",OptoReg::as_VMReg(def)->name());
2581       n->dump();
2582       tty->print_cr("...");
2583       prior_use->dump();
2584       assert(edge_from_to(prior_use,n),msg);
2585     }
2586     _reg_node.map(def,NULL); // Kill live USEs
2587   }
2588 }
2589 
2590 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
2591 
2592   // Zap to something reasonable for the verify code
2593   _reg_node.clear();
2594 
2595   // Walk over the block backwards.  Check to make sure each DEF doesn't
2596   // kill a live value (other than the one it's supposed to).  Add each
2597   // USE to the live set.
2598   for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
2599     Node *n = b->get_node(i);
2600     int n_op = n->Opcode();
2601     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2602       // Fat-proj kills a slew of registers
2603       RegMask rm = n->out_RegMask();// Make local copy
2604       while( rm.is_NotEmpty() ) {
2605         OptoReg::Name kill = rm.find_first_elem();
2606         rm.Remove(kill);
2607         verify_do_def( n, kill, msg );
2608       }
2609     } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
2610       // Get DEF'd registers the normal way
2611       verify_do_def( n, _regalloc->get_reg_first(n), msg );
2612       verify_do_def( n, _regalloc->get_reg_second(n), msg );
2613     }
2614 
2615     // Now make all USEs live
2616     for( uint i=1; i<n->req(); i++ ) {
2617       Node *def = n->in(i);
2618       assert(def != 0, "input edge required");
2619       OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
2620       OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
2621       if( OptoReg::is_valid(reg_lo) ) {
2622         assert(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg);
2623         _reg_node.map(reg_lo,n);
2624       }
2625       if( OptoReg::is_valid(reg_hi) ) {
2626         assert(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg);
2627         _reg_node.map(reg_hi,n);
2628       }
2629     }
2630 
2631   }
2632 
2633   // Zap to something reasonable for the Antidependence code
2634   _reg_node.clear();
2635 }
2636 #endif
2637 
2638 // Conditionally add precedence edges.  Avoid putting edges on Projs.
2639 static void add_prec_edge_from_to( Node *from, Node *to ) {
2640   if( from->is_Proj() ) {       // Put precedence edge on Proj's input
2641     assert( from->req() == 1 && (from->len() == 1 || from->in(1)==0), "no precedence edges on projections" );
2642     from = from->in(0);
2643   }
2644   if( from != to &&             // No cycles (for things like LD L0,[L0+4] )
2645       !edge_from_to( from, to ) ) // Avoid duplicate edge
2646     from->add_prec(to);
2647 }
2648 
2649 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
2650   if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
2651     return;
2652 
2653   Node *pinch = _reg_node[def_reg]; // Get pinch point
2654   if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
2655       is_def ) {    // Check for a true def (not a kill)
2656     _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
2657     return;
2658   }
2659 
2660   Node *kill = def;             // Rename 'def' to more descriptive 'kill'
2661   debug_only( def = (Node*)0xdeadbeef; )
2662 
2663   // After some number of kills there _may_ be a later def
2664   Node *later_def = NULL;
2665 
2666   // Finding a kill requires a real pinch-point.
2667   // Check for not already having a pinch-point.
2668   // Pinch points are Op_Node's.
2669   if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
2670     later_def = pinch;            // Must be def/kill as optimistic pinch-point
2671     if ( _pinch_free_list.size() > 0) {
2672       pinch = _pinch_free_list.pop();
2673     } else {
2674       pinch = new (_cfg->C) Node(1); // Pinch point to-be
2675     }
2676     if (pinch->_idx >= _regalloc->node_regs_max_index()) {
2677       _cfg->C->record_method_not_compilable("too many D-U pinch points");
2678       return;
2679     }
2680     _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
2681     _reg_node.map(def_reg,pinch); // Record pinch-point
2682     //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
2683     if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
2684       pinch->init_req(0, _cfg->C->top());     // set not NULL for the next call
2685       add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
2686       later_def = NULL;           // and no later def
2687     }
2688     pinch->set_req(0,later_def);  // Hook later def so we can find it
2689   } else {                        // Else have valid pinch point
2690     if( pinch->in(0) )            // If there is a later-def
2691       later_def = pinch->in(0);   // Get it
2692   }
2693 
2694   // Add output-dependence edge from later def to kill
2695   if( later_def )               // If there is some original def
2696     add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
2697 
2698   // See if current kill is also a use, and so is forced to be the pinch-point.
2699   if( pinch->Opcode() == Op_Node ) {
2700     Node *uses = kill->is_Proj() ? kill->in(0) : kill;
2701     for( uint i=1; i<uses->req(); i++ ) {
2702       if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
2703           _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
2704         // Yes, found a use/kill pinch-point
2705         pinch->set_req(0,NULL);  //
2706         pinch->replace_by(kill); // Move anti-dep edges up
2707         pinch = kill;
2708         _reg_node.map(def_reg,pinch);
2709         return;
2710       }
2711     }
2712   }
2713 
2714   // Add edge from kill to pinch-point
2715   add_prec_edge_from_to(kill,pinch);
2716 }
2717 
2718 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
2719   if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
2720     return;
2721   Node *pinch = _reg_node[use_reg]; // Get pinch point
2722   // Check for no later def_reg/kill in block
2723   if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
2724       // Use has to be block-local as well
2725       _cfg->get_block_for_node(use) == b) {
2726     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2727         pinch->req() == 1 ) {   // pinch not yet in block?
2728       pinch->del_req(0);        // yank pointer to later-def, also set flag
2729       // Insert the pinch-point in the block just after the last use
2730       b->insert_node(pinch, b->find_node(use) + 1);
2731       _bb_end++;                // Increase size scheduled region in block
2732     }
2733 
2734     add_prec_edge_from_to(pinch,use);
2735   }
2736 }
2737 
2738 // We insert antidependences between the reads and following write of
2739 // allocated registers to prevent illegal code motion. Hopefully, the
2740 // number of added references should be fairly small, especially as we
2741 // are only adding references within the current basic block.
2742 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
2743 
2744 #ifdef ASSERT
2745   verify_good_schedule(b,"before block local scheduling");
2746 #endif
2747 
2748   // A valid schedule, for each register independently, is an endless cycle
2749   // of: a def, then some uses (connected to the def by true dependencies),
2750   // then some kills (defs with no uses), finally the cycle repeats with a new
2751   // def.  The uses are allowed to float relative to each other, as are the
2752   // kills.  No use is allowed to slide past a kill (or def).  This requires
2753   // antidependencies between all uses of a single def and all kills that
2754   // follow, up to the next def.  More edges are redundant, because later defs
2755   // & kills are already serialized with true or antidependencies.  To keep
2756   // the edge count down, we add a 'pinch point' node if there's more than
2757   // one use or more than one kill/def.
2758 
2759   // We add dependencies in one bottom-up pass.
2760 
2761   // For each instruction we handle it's DEFs/KILLs, then it's USEs.
2762 
2763   // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
2764   // register.  If not, we record the DEF/KILL in _reg_node, the
2765   // register-to-def mapping.  If there is a prior DEF/KILL, we insert a
2766   // "pinch point", a new Node that's in the graph but not in the block.
2767   // We put edges from the prior and current DEF/KILLs to the pinch point.
2768   // We put the pinch point in _reg_node.  If there's already a pinch point
2769   // we merely add an edge from the current DEF/KILL to the pinch point.
2770 
2771   // After doing the DEF/KILLs, we handle USEs.  For each used register, we
2772   // put an edge from the pinch point to the USE.
2773 
2774   // To be expedient, the _reg_node array is pre-allocated for the whole
2775   // compilation.  _reg_node is lazily initialized; it either contains a NULL,
2776   // or a valid def/kill/pinch-point, or a leftover node from some prior
2777   // block.  Leftover node from some prior block is treated like a NULL (no
2778   // prior def, so no anti-dependence needed).  Valid def is distinguished by
2779   // it being in the current block.
2780   bool fat_proj_seen = false;
2781   uint last_safept = _bb_end-1;
2782   Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
2783   Node* last_safept_node = end_node;
2784   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2785     Node *n = b->get_node(i);
2786     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
2787     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2788       // Fat-proj kills a slew of registers
2789       // This can add edges to 'n' and obscure whether or not it was a def,
2790       // hence the is_def flag.
2791       fat_proj_seen = true;
2792       RegMask rm = n->out_RegMask();// Make local copy
2793       while( rm.is_NotEmpty() ) {
2794         OptoReg::Name kill = rm.find_first_elem();
2795         rm.Remove(kill);
2796         anti_do_def( b, n, kill, is_def );
2797       }
2798     } else {
2799       // Get DEF'd registers the normal way
2800       anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2801       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2802     }
2803 
2804     // Kill projections on a branch should appear to occur on the
2805     // branch, not afterwards, so grab the masks from the projections
2806     // and process them.
2807     if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
2808       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2809         Node* use = n->fast_out(i);
2810         if (use->is_Proj()) {
2811           RegMask rm = use->out_RegMask();// Make local copy
2812           while( rm.is_NotEmpty() ) {
2813             OptoReg::Name kill = rm.find_first_elem();
2814             rm.Remove(kill);
2815             anti_do_def( b, n, kill, false );
2816           }
2817         }
2818       }
2819     }
2820 
2821     // Check each register used by this instruction for a following DEF/KILL
2822     // that must occur afterward and requires an anti-dependence edge.
2823     for( uint j=0; j<n->req(); j++ ) {
2824       Node *def = n->in(j);
2825       if( def ) {
2826         assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
2827         anti_do_use( b, n, _regalloc->get_reg_first(def) );
2828         anti_do_use( b, n, _regalloc->get_reg_second(def) );
2829       }
2830     }
2831     // Do not allow defs of new derived values to float above GC
2832     // points unless the base is definitely available at the GC point.
2833 
2834     Node *m = b->get_node(i);
2835 
2836     // Add precedence edge from following safepoint to use of derived pointer
2837     if( last_safept_node != end_node &&
2838         m != last_safept_node) {
2839       for (uint k = 1; k < m->req(); k++) {
2840         const Type *t = m->in(k)->bottom_type();
2841         if( t->isa_oop_ptr() &&
2842             t->is_ptr()->offset() != 0 ) {
2843           last_safept_node->add_prec( m );
2844           break;
2845         }
2846       }
2847     }
2848 
2849     if( n->jvms() ) {           // Precedence edge from derived to safept
2850       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2851       if( b->get_node(last_safept) != last_safept_node ) {
2852         last_safept = b->find_node(last_safept_node);
2853       }
2854       for( uint j=last_safept; j > i; j-- ) {
2855         Node *mach = b->get_node(j);
2856         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2857           mach->add_prec( n );
2858       }
2859       last_safept = i;
2860       last_safept_node = m;
2861     }
2862   }
2863 
2864   if (fat_proj_seen) {
2865     // Garbage collect pinch nodes that were not consumed.
2866     // They are usually created by a fat kill MachProj for a call.
2867     garbage_collect_pinch_nodes();
2868   }
2869 }
2870 
2871 // Garbage collect pinch nodes for reuse by other blocks.
2872 //
2873 // The block scheduler's insertion of anti-dependence
2874 // edges creates many pinch nodes when the block contains
2875 // 2 or more Calls.  A pinch node is used to prevent a
2876 // combinatorial explosion of edges.  If a set of kills for a
2877 // register is anti-dependent on a set of uses (or defs), rather
2878 // than adding an edge in the graph between each pair of kill
2879 // and use (or def), a pinch is inserted between them:
2880 //
2881 //            use1   use2  use3
2882 //                \   |   /
2883 //                 \  |  /
2884 //                  pinch
2885 //                 /  |  \
2886 //                /   |   \
2887 //            kill1 kill2 kill3
2888 //
2889 // One pinch node is created per register killed when
2890 // the second call is encountered during a backwards pass
2891 // over the block.  Most of these pinch nodes are never
2892 // wired into the graph because the register is never
2893 // used or def'ed in the block.
2894 //
2895 void Scheduling::garbage_collect_pinch_nodes() {
2896 #ifndef PRODUCT
2897     if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
2898 #endif
2899     int trace_cnt = 0;
2900     for (uint k = 0; k < _reg_node.Size(); k++) {
2901       Node* pinch = _reg_node[k];
2902       if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
2903           // no predecence input edges
2904           (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
2905         cleanup_pinch(pinch);
2906         _pinch_free_list.push(pinch);
2907         _reg_node.map(k, NULL);
2908 #ifndef PRODUCT
2909         if (_cfg->C->trace_opto_output()) {
2910           trace_cnt++;
2911           if (trace_cnt > 40) {
2912             tty->print("\n");
2913             trace_cnt = 0;
2914           }
2915           tty->print(" %d", pinch->_idx);
2916         }
2917 #endif
2918       }
2919     }
2920 #ifndef PRODUCT
2921     if (_cfg->C->trace_opto_output()) tty->print("\n");
2922 #endif
2923 }
2924 
2925 // Clean up a pinch node for reuse.
2926 void Scheduling::cleanup_pinch( Node *pinch ) {
2927   assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
2928 
2929   for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
2930     Node* use = pinch->last_out(i);
2931     uint uses_found = 0;
2932     for (uint j = use->req(); j < use->len(); j++) {
2933       if (use->in(j) == pinch) {
2934         use->rm_prec(j);
2935         uses_found++;
2936       }
2937     }
2938     assert(uses_found > 0, "must be a precedence edge");
2939     i -= uses_found;    // we deleted 1 or more copies of this edge
2940   }
2941   // May have a later_def entry
2942   pinch->set_req(0, NULL);
2943 }
2944 
2945 #ifndef PRODUCT
2946 
2947 void Scheduling::dump_available() const {
2948   tty->print("#Availist  ");
2949   for (uint i = 0; i < _available.size(); i++)
2950     tty->print(" N%d/l%d", _available[i]->_idx,_current_latency[_available[i]->_idx]);
2951   tty->cr();
2952 }
2953 
2954 // Print Scheduling Statistics
2955 void Scheduling::print_statistics() {
2956   // Print the size added by nops for bundling
2957   tty->print("Nops added %d bytes to total of %d bytes",
2958     _total_nop_size, _total_method_size);
2959   if (_total_method_size > 0)
2960     tty->print(", for %.2f%%",
2961       ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
2962   tty->print("\n");
2963 
2964   // Print the number of branch shadows filled
2965   if (Pipeline::_branch_has_delay_slot) {
2966     tty->print("Of %d branches, %d had unconditional delay slots filled",
2967       _total_branches, _total_unconditional_delays);
2968     if (_total_branches > 0)
2969       tty->print(", for %.2f%%",
2970         ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
2971     tty->print("\n");
2972   }
2973 
2974   uint total_instructions = 0, total_bundles = 0;
2975 
2976   for (uint i = 1; i <= Pipeline::_max_instrs_per_cycle; i++) {
2977     uint bundle_count   = _total_instructions_per_bundle[i];
2978     total_instructions += bundle_count * i;
2979     total_bundles      += bundle_count;
2980   }
2981 
2982   if (total_bundles > 0)
2983     tty->print("Average ILP (excluding nops) is %.2f\n",
2984       ((double)total_instructions) / ((double)total_bundles));
2985 }
2986 #endif