< prev index next >

src/hotspot/share/opto/output.cpp

Print this page




  54 
  55 // Convert Nodes to instruction bits and pass off to the VM
  56 void Compile::Output() {
  57   // RootNode goes
  58   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  59 
  60   // The number of new nodes (mostly MachNop) is proportional to
  61   // the number of java calls and inner loops which are aligned.
  62   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  63                             C->inner_loops()*(OptoLoopAlignment-1)),
  64                            "out of nodes before code generation" ) ) {
  65     return;
  66   }
  67   // Make sure I can find the Start Node
  68   Block *entry = _cfg->get_block(1);
  69   Block *broot = _cfg->get_root_block();
  70 
  71   const StartNode *start = entry->head()->as_Start();
  72 
  73   // Replace StartNode with prolog
  74   MachPrologNode *prolog = new MachPrologNode();

  75   entry->map_node(prolog, 0);
  76   _cfg->map_node_to_block(prolog, entry);
  77   _cfg->unmap_node_from_block(start); // start is no longer in any block
  78 
  79   // Virtual methods need an unverified entry point
  80 
  81   if( is_osr_compilation() ) {
  82     if( PoisonOSREntry ) {
  83       // TODO: Should use a ShouldNotReachHereNode...
  84       _cfg->insert( broot, 0, new MachBreakpointNode() );
  85     }
  86   } else {
  87     if( _method && !_method->flags().is_static() ) {
  88       // Insert unvalidated entry point
  89       _cfg->insert( broot, 0, new MachUEPNode() );









  90     }
  91 
  92   }
  93 
  94   // Break before main entry point
  95   if ((_method && C->directive()->BreakAtExecuteOption) ||
  96       (OptoBreakpoint && is_method_compilation())       ||
  97       (OptoBreakpointOSR && is_osr_compilation())       ||
  98       (OptoBreakpointC2R && !_method)                   ) {
  99     // checking for _method means that OptoBreakpoint does not apply to
 100     // runtime stubs or frame converters
 101     _cfg->insert( entry, 1, new MachBreakpointNode() );
 102   }
 103 
 104   // Insert epilogs before every return
 105   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 106     Block* block = _cfg->get_block(i);
 107     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 108       Node* m = block->end();
 109       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 110         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 111         block->add_inst(epilog);
 112         _cfg->map_node_to_block(epilog, block);
 113       }
 114     }
 115   }
 116 
 117   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 118   blk_starts[0] = 0;
 119 
 120   // Initialize code buffer and process short branches.
 121   CodeBuffer* cb = init_buffer(blk_starts);
 122 
 123   if (cb == NULL || failing()) {
 124     return;
 125   }
 126 













 127   ScheduleAndBundle();
 128 
 129 #ifndef PRODUCT
 130   if (trace_opto_output()) {
 131     tty->print("\n---- After ScheduleAndBundle ----\n");
 132     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 133       tty->print("\nBB#%03d:\n", i);
 134       Block* block = _cfg->get_block(i);
 135       for (uint j = 0; j < block->number_of_nodes(); j++) {
 136         Node* n = block->get_node(j);
 137         OptoReg::Name reg = _regalloc->get_reg_first(n);
 138         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 139         n->dump();
 140       }
 141     }
 142   }
 143 #endif
 144 
 145   if (failing()) {
 146     return;


 271 
 272     // Sum all instruction sizes to compute block size
 273     uint last_inst = block->number_of_nodes();
 274     uint blk_size = 0;
 275     for (uint j = 0; j < last_inst; j++) {
 276       Node* nj = block->get_node(j);
 277       // Handle machine instruction nodes
 278       if (nj->is_Mach()) {
 279         MachNode *mach = nj->as_Mach();
 280         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 281         reloc_size += mach->reloc();
 282         if (mach->is_MachCall()) {
 283           // add size information for trampoline stub
 284           // class CallStubImpl is platform-specific and defined in the *.ad files.
 285           stub_size  += CallStubImpl::size_call_trampoline();
 286           reloc_size += CallStubImpl::reloc_call_trampoline();
 287 
 288           MachCallNode *mcall = mach->as_MachCall();
 289           // This destination address is NOT PC-relative
 290 

 291           mcall->method_set((intptr_t)mcall->entry_point());

 292 
 293           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 294             stub_size  += CompiledStaticCall::to_interp_stub_size();
 295             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 296 #if INCLUDE_AOT
 297             stub_size  += CompiledStaticCall::to_aot_stub_size();
 298             reloc_size += CompiledStaticCall::reloc_to_aot_stub();
 299 #endif
 300           }
 301         } else if (mach->is_MachSafePoint()) {
 302           // If call/safepoint are adjacent, account for possible
 303           // nop to disambiguate the two safepoints.
 304           // ScheduleAndBundle() can rearrange nodes in a block,
 305           // check for all offsets inside this block.
 306           if (last_call_adr >= blk_starts[i]) {
 307             blk_size += nop_size;
 308           }
 309         }
 310         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 311           // Nop is inserted between "avoid back to back" instructions.


 709     break;
 710   }
 711 }
 712 
 713 // Determine if this node starts a bundle
 714 bool Compile::starts_bundle(const Node *n) const {
 715   return (_node_bundling_limit > n->_idx &&
 716           _node_bundling_base[n->_idx].starts_bundle());
 717 }
 718 
 719 //--------------------------Process_OopMap_Node--------------------------------
 720 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 721 
 722   // Handle special safepoint nodes for synchronization
 723   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 724   MachCallNode      *mcall;
 725 
 726   int safepoint_pc_offset = current_offset;
 727   bool is_method_handle_invoke = false;
 728   bool return_oop = false;

 729 
 730   // Add the safepoint in the DebugInfoRecorder
 731   if( !mach->is_MachCall() ) {
 732     mcall = NULL;
 733     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 734   } else {
 735     mcall = mach->as_MachCall();
 736 
 737     // Is the call a MethodHandle call?
 738     if (mcall->is_MachCallJava()) {
 739       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 740         assert(has_method_handle_invokes(), "must have been set during call generation");
 741         is_method_handle_invoke = true;
 742       }
 743     }
 744 
 745     // Check if a call returns an object.
 746     if (mcall->returns_pointer()) {
 747       return_oop = true;
 748     }



 749     safepoint_pc_offset += mcall->ret_addr_offset();
 750     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
 751   }
 752 
 753   // Loop over the JVMState list to add scope information
 754   // Do not skip safepoints with a NULL method, they need monitor info
 755   JVMState* youngest_jvms = sfn->jvms();
 756   int max_depth = youngest_jvms->depth();
 757 
 758   // Allocate the object pool for scalar-replaced objects -- the map from
 759   // small-integer keys (which can be recorded in the local and ostack
 760   // arrays) to descriptions of the object state.
 761   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
 762 
 763   // Visit scopes from oldest to youngest.
 764   for (int depth = 1; depth <= max_depth; depth++) {
 765     JVMState* jvms = youngest_jvms->of_depth(depth);
 766     int idx;
 767     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
 768     // Safepoints that do not have method() set only provide oop-map and monitor info


 843       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
 844       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
 845     }
 846 
 847     // We dump the object pool first, since deoptimization reads it in first.
 848     debug_info()->dump_object_pool(objs);
 849 
 850     // Build first class objects to pass to scope
 851     DebugToken *locvals = debug_info()->create_scope_values(locarray);
 852     DebugToken *expvals = debug_info()->create_scope_values(exparray);
 853     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
 854 
 855     // Make method available for all Safepoints
 856     ciMethod* scope_method = method ? method : _method;
 857     // Describe the scope here
 858     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
 859     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
 860     // Now we can describe the scope.
 861     methodHandle null_mh;
 862     bool rethrow_exception = false;
 863     debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(), jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
 864   } // End jvms loop
 865 
 866   // Mark the end of the scope set.
 867   debug_info()->end_safepoint(safepoint_pc_offset);
 868 }
 869 
 870 
 871 
 872 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
 873 class NonSafepointEmitter {
 874   Compile*  C;
 875   JVMState* _pending_jvms;
 876   int       _pending_offset;
 877 
 878   void emit_non_safepoint();
 879 
 880  public:
 881   NonSafepointEmitter(Compile* compile) {
 882     this->C = compile;
 883     _pending_jvms = NULL;


 952 }
 953 
 954 //------------------------------init_buffer------------------------------------
 955 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 956 
 957   // Set the initially allocated size
 958   int  code_req   = initial_code_capacity;
 959   int  locs_req   = initial_locs_capacity;
 960   int  stub_req   = initial_stub_capacity;
 961   int  const_req  = initial_const_capacity;
 962 
 963   int  pad_req    = NativeCall::instruction_size;
 964   // The extra spacing after the code is necessary on some platforms.
 965   // Sometimes we need to patch in a jump after the last instruction,
 966   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
 967 
 968   // Compute the byte offset where we can store the deopt pc.
 969   if (fixed_slots() != 0) {
 970     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
 971   }




 972 
 973   // Compute prolog code size
 974   _method_size = 0;
 975   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
 976 #if defined(IA64) && !defined(AIX)
 977   if (save_argument_registers()) {
 978     // 4815101: this is a stub with implicit and unknown precision fp args.
 979     // The usual spill mechanism can only generate stfd's in this case, which
 980     // doesn't work if the fp reg to spill contains a single-precision denorm.
 981     // Instead, we hack around the normal spill mechanism using stfspill's and
 982     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
 983     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
 984     //
 985     // If we ever implement 16-byte 'registers' == stack slots, we can
 986     // get rid of this hack and have SpillCopy generate stfspill/ldffill
 987     // instead of stfd/stfs/ldfd/ldfs.
 988     _frame_slots += 8*(16/BytesPerInt);
 989   }
 990 #endif
 991   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");


1216           int nops_cnt = padding / nop_size;
1217           MachNode *nop = new MachNopNode(nops_cnt);
1218           block->insert_node(nop, j++);
1219           last_inst++;
1220           _cfg->map_node_to_block(nop, block);
1221           // Ensure enough space.
1222           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1223           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1224             C->record_failure("CodeCache is full");
1225             return;
1226           }
1227           nop->emit(*cb, _regalloc);
1228           cb->flush_bundle(true);
1229           current_offset = cb->insts_size();
1230         }
1231 
1232         // Remember the start of the last call in a basic block
1233         if (is_mcall) {
1234           MachCallNode *mcall = mach->as_MachCall();
1235 

1236           // This destination address is NOT PC-relative
1237           mcall->method_set((intptr_t)mcall->entry_point());

1238 
1239           // Save the return address
1240           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1241 
1242           if (mcall->is_MachCallLeaf()) {
1243             is_mcall = false;
1244             is_sfn = false;
1245           }
1246         }
1247 
1248         // sfn will be valid whenever mcall is valid now because of inheritance
1249         if (is_sfn || is_mcall) {
1250 
1251           // Handle special safepoint nodes for synchronization
1252           if (!is_mcall) {
1253             MachSafePointNode *sfn = mach->as_MachSafePoint();
1254             // !!!!! Stubs only need an oopmap right now, so bail out
1255             if (sfn->jvms()->method() == NULL) {
1256               // Write the oopmap directly to the code blob??!!
1257               continue;




  54 
  55 // Convert Nodes to instruction bits and pass off to the VM
  56 void Compile::Output() {
  57   // RootNode goes
  58   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  59 
  60   // The number of new nodes (mostly MachNop) is proportional to
  61   // the number of java calls and inner loops which are aligned.
  62   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  63                             C->inner_loops()*(OptoLoopAlignment-1)),
  64                            "out of nodes before code generation" ) ) {
  65     return;
  66   }
  67   // Make sure I can find the Start Node
  68   Block *entry = _cfg->get_block(1);
  69   Block *broot = _cfg->get_root_block();
  70 
  71   const StartNode *start = entry->head()->as_Start();
  72 
  73   // Replace StartNode with prolog
  74   Label verified_entry;
  75   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
  76   entry->map_node(prolog, 0);
  77   _cfg->map_node_to_block(prolog, entry);
  78   _cfg->unmap_node_from_block(start); // start is no longer in any block
  79 
  80   // Virtual methods need an unverified entry point
  81   if (is_osr_compilation()) {
  82     if (PoisonOSREntry) {

  83       // TODO: Should use a ShouldNotReachHereNode...
  84       _cfg->insert( broot, 0, new MachBreakpointNode() );
  85     }
  86   } else {
  87     if (_method && !_method->is_static()) {
  88       // Insert unvalidated entry point
  89       _cfg->insert(broot, 0, new MachUEPNode());
  90     }
  91     if (_method && _method->has_scalarized_args()) {
  92       // Add entry point to unpack all value type arguments
  93       _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
  94       if (!_method->is_static()) {
  95         // Add verified/unverified entry points to only unpack value type receiver at interface calls
  96         _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
  97         _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
  98       }
  99     }

 100   }
 101 
 102   // Break before main entry point
 103   if ((_method && C->directive()->BreakAtExecuteOption) ||
 104       (OptoBreakpoint && is_method_compilation())       ||
 105       (OptoBreakpointOSR && is_osr_compilation())       ||
 106       (OptoBreakpointC2R && !_method)                   ) {
 107     // checking for _method means that OptoBreakpoint does not apply to
 108     // runtime stubs or frame converters
 109     _cfg->insert( entry, 1, new MachBreakpointNode() );
 110   }
 111 
 112   // Insert epilogs before every return
 113   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 114     Block* block = _cfg->get_block(i);
 115     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 116       Node* m = block->end();
 117       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 118         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 119         block->add_inst(epilog);
 120         _cfg->map_node_to_block(epilog, block);
 121       }
 122     }
 123   }
 124 
 125   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 126   blk_starts[0] = 0;
 127 
 128   // Initialize code buffer and process short branches.
 129   CodeBuffer* cb = init_buffer(blk_starts);
 130 
 131   if (cb == NULL || failing()) {
 132     return;
 133   }
 134 
 135   if (_method && _method->has_scalarized_args()) {
 136     // Compute the offsets of the entry points required by the value type calling convention
 137     if (!_method->is_static()) {
 138       uint vep_ro_size  = ((MachVEPNode*)broot->get_node(0))->size(_regalloc);
 139       uint vvep_ro_size = ((MachVEPNode*)broot->get_node(1))->size(_regalloc);
 140       _code_offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, vep_ro_size);
 141       _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, vep_ro_size + vvep_ro_size);
 142     } else {
 143       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 144       _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, 0);
 145     }
 146   }
 147 
 148   ScheduleAndBundle();
 149 
 150 #ifndef PRODUCT
 151   if (trace_opto_output()) {
 152     tty->print("\n---- After ScheduleAndBundle ----\n");
 153     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 154       tty->print("\nBB#%03d:\n", i);
 155       Block* block = _cfg->get_block(i);
 156       for (uint j = 0; j < block->number_of_nodes(); j++) {
 157         Node* n = block->get_node(j);
 158         OptoReg::Name reg = _regalloc->get_reg_first(n);
 159         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 160         n->dump();
 161       }
 162     }
 163   }
 164 #endif
 165 
 166   if (failing()) {
 167     return;


 292 
 293     // Sum all instruction sizes to compute block size
 294     uint last_inst = block->number_of_nodes();
 295     uint blk_size = 0;
 296     for (uint j = 0; j < last_inst; j++) {
 297       Node* nj = block->get_node(j);
 298       // Handle machine instruction nodes
 299       if (nj->is_Mach()) {
 300         MachNode *mach = nj->as_Mach();
 301         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 302         reloc_size += mach->reloc();
 303         if (mach->is_MachCall()) {
 304           // add size information for trampoline stub
 305           // class CallStubImpl is platform-specific and defined in the *.ad files.
 306           stub_size  += CallStubImpl::size_call_trampoline();
 307           reloc_size += CallStubImpl::reloc_call_trampoline();
 308 
 309           MachCallNode *mcall = mach->as_MachCall();
 310           // This destination address is NOT PC-relative
 311 
 312           if (mcall->entry_point() != NULL) {
 313             mcall->method_set((intptr_t)mcall->entry_point());
 314           }
 315 
 316           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 317             stub_size  += CompiledStaticCall::to_interp_stub_size();
 318             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 319 #if INCLUDE_AOT
 320             stub_size  += CompiledStaticCall::to_aot_stub_size();
 321             reloc_size += CompiledStaticCall::reloc_to_aot_stub();
 322 #endif
 323           }
 324         } else if (mach->is_MachSafePoint()) {
 325           // If call/safepoint are adjacent, account for possible
 326           // nop to disambiguate the two safepoints.
 327           // ScheduleAndBundle() can rearrange nodes in a block,
 328           // check for all offsets inside this block.
 329           if (last_call_adr >= blk_starts[i]) {
 330             blk_size += nop_size;
 331           }
 332         }
 333         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 334           // Nop is inserted between "avoid back to back" instructions.


 732     break;
 733   }
 734 }
 735 
 736 // Determine if this node starts a bundle
 737 bool Compile::starts_bundle(const Node *n) const {
 738   return (_node_bundling_limit > n->_idx &&
 739           _node_bundling_base[n->_idx].starts_bundle());
 740 }
 741 
 742 //--------------------------Process_OopMap_Node--------------------------------
 743 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 744 
 745   // Handle special safepoint nodes for synchronization
 746   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 747   MachCallNode      *mcall;
 748 
 749   int safepoint_pc_offset = current_offset;
 750   bool is_method_handle_invoke = false;
 751   bool return_oop = false;
 752   bool return_vt = false;
 753 
 754   // Add the safepoint in the DebugInfoRecorder
 755   if( !mach->is_MachCall() ) {
 756     mcall = NULL;
 757     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 758   } else {
 759     mcall = mach->as_MachCall();
 760 
 761     // Is the call a MethodHandle call?
 762     if (mcall->is_MachCallJava()) {
 763       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 764         assert(has_method_handle_invokes(), "must have been set during call generation");
 765         is_method_handle_invoke = true;
 766       }
 767     }
 768 
 769     // Check if a call returns an object.
 770     if (mcall->returns_pointer() || mcall->returns_vt()) {
 771       return_oop = true;
 772     }
 773     if (mcall->returns_vt()) {
 774       return_vt = true;
 775     }
 776     safepoint_pc_offset += mcall->ret_addr_offset();
 777     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
 778   }
 779 
 780   // Loop over the JVMState list to add scope information
 781   // Do not skip safepoints with a NULL method, they need monitor info
 782   JVMState* youngest_jvms = sfn->jvms();
 783   int max_depth = youngest_jvms->depth();
 784 
 785   // Allocate the object pool for scalar-replaced objects -- the map from
 786   // small-integer keys (which can be recorded in the local and ostack
 787   // arrays) to descriptions of the object state.
 788   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
 789 
 790   // Visit scopes from oldest to youngest.
 791   for (int depth = 1; depth <= max_depth; depth++) {
 792     JVMState* jvms = youngest_jvms->of_depth(depth);
 793     int idx;
 794     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
 795     // Safepoints that do not have method() set only provide oop-map and monitor info


 870       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
 871       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
 872     }
 873 
 874     // We dump the object pool first, since deoptimization reads it in first.
 875     debug_info()->dump_object_pool(objs);
 876 
 877     // Build first class objects to pass to scope
 878     DebugToken *locvals = debug_info()->create_scope_values(locarray);
 879     DebugToken *expvals = debug_info()->create_scope_values(exparray);
 880     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
 881 
 882     // Make method available for all Safepoints
 883     ciMethod* scope_method = method ? method : _method;
 884     // Describe the scope here
 885     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
 886     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
 887     // Now we can describe the scope.
 888     methodHandle null_mh;
 889     bool rethrow_exception = false;
 890     debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(), jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke, return_oop, return_vt, locvals, expvals, monvals);
 891   } // End jvms loop
 892 
 893   // Mark the end of the scope set.
 894   debug_info()->end_safepoint(safepoint_pc_offset);
 895 }
 896 
 897 
 898 
 899 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
 900 class NonSafepointEmitter {
 901   Compile*  C;
 902   JVMState* _pending_jvms;
 903   int       _pending_offset;
 904 
 905   void emit_non_safepoint();
 906 
 907  public:
 908   NonSafepointEmitter(Compile* compile) {
 909     this->C = compile;
 910     _pending_jvms = NULL;


 979 }
 980 
 981 //------------------------------init_buffer------------------------------------
 982 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 983 
 984   // Set the initially allocated size
 985   int  code_req   = initial_code_capacity;
 986   int  locs_req   = initial_locs_capacity;
 987   int  stub_req   = initial_stub_capacity;
 988   int  const_req  = initial_const_capacity;
 989 
 990   int  pad_req    = NativeCall::instruction_size;
 991   // The extra spacing after the code is necessary on some platforms.
 992   // Sometimes we need to patch in a jump after the last instruction,
 993   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
 994 
 995   // Compute the byte offset where we can store the deopt pc.
 996   if (fixed_slots() != 0) {
 997     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
 998   }
 999   if (C->needs_stack_repair()) {
1000     // Compute the byte offset of the stack increment value
1001     _sp_inc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_sp_inc_slot));
1002   }
1003 
1004   // Compute prolog code size
1005   _method_size = 0;
1006   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
1007 #if defined(IA64) && !defined(AIX)
1008   if (save_argument_registers()) {
1009     // 4815101: this is a stub with implicit and unknown precision fp args.
1010     // The usual spill mechanism can only generate stfd's in this case, which
1011     // doesn't work if the fp reg to spill contains a single-precision denorm.
1012     // Instead, we hack around the normal spill mechanism using stfspill's and
1013     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
1014     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1015     //
1016     // If we ever implement 16-byte 'registers' == stack slots, we can
1017     // get rid of this hack and have SpillCopy generate stfspill/ldffill
1018     // instead of stfd/stfs/ldfd/ldfs.
1019     _frame_slots += 8*(16/BytesPerInt);
1020   }
1021 #endif
1022   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");


1247           int nops_cnt = padding / nop_size;
1248           MachNode *nop = new MachNopNode(nops_cnt);
1249           block->insert_node(nop, j++);
1250           last_inst++;
1251           _cfg->map_node_to_block(nop, block);
1252           // Ensure enough space.
1253           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1254           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1255             C->record_failure("CodeCache is full");
1256             return;
1257           }
1258           nop->emit(*cb, _regalloc);
1259           cb->flush_bundle(true);
1260           current_offset = cb->insts_size();
1261         }
1262 
1263         // Remember the start of the last call in a basic block
1264         if (is_mcall) {
1265           MachCallNode *mcall = mach->as_MachCall();
1266 
1267           if (mcall->entry_point() != NULL) {
1268             // This destination address is NOT PC-relative
1269             mcall->method_set((intptr_t)mcall->entry_point());
1270           }
1271 
1272           // Save the return address
1273           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1274 
1275           if (mcall->is_MachCallLeaf()) {
1276             is_mcall = false;
1277             is_sfn = false;
1278           }
1279         }
1280 
1281         // sfn will be valid whenever mcall is valid now because of inheritance
1282         if (is_sfn || is_mcall) {
1283 
1284           // Handle special safepoint nodes for synchronization
1285           if (!is_mcall) {
1286             MachSafePointNode *sfn = mach->as_MachSafePoint();
1287             // !!!!! Stubs only need an oopmap right now, so bail out
1288             if (sfn->jvms()->method() == NULL) {
1289               // Write the oopmap directly to the code blob??!!
1290               continue;


< prev index next >