< prev index next >

src/hotspot/share/opto/output.cpp

Print this page




  54 
  55 // Convert Nodes to instruction bits and pass off to the VM
  56 void Compile::Output() {
  57   // RootNode goes
  58   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  59 
  60   // The number of new nodes (mostly MachNop) is proportional to
  61   // the number of java calls and inner loops which are aligned.
  62   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  63                             C->inner_loops()*(OptoLoopAlignment-1)),
  64                            "out of nodes before code generation" ) ) {
  65     return;
  66   }
  67   // Make sure I can find the Start Node
  68   Block *entry = _cfg->get_block(1);
  69   Block *broot = _cfg->get_root_block();
  70 
  71   const StartNode *start = entry->head()->as_Start();
  72 
  73   // Replace StartNode with prolog
  74   MachPrologNode *prolog = new MachPrologNode();

  75   entry->map_node(prolog, 0);
  76   _cfg->map_node_to_block(prolog, entry);
  77   _cfg->unmap_node_from_block(start); // start is no longer in any block
  78 
  79   // Virtual methods need an unverified entry point

  80   if (is_osr_compilation()) {
  81     if (PoisonOSREntry) {
  82       // TODO: Should use a ShouldNotReachHereNode...
  83       _cfg->insert( broot, 0, new MachBreakpointNode() );
  84     }
  85   } else {
  86     if (_method && !_method->flags().is_static()) {
  87       // Insert unvalidated entry point
  88       _cfg->insert(broot, 0, new MachUEPNode());
  89     }





  90   }
  91 
  92   // Break before main entry point
  93   if ((_method && C->directive()->BreakAtExecuteOption) ||
  94       (OptoBreakpoint && is_method_compilation())       ||
  95       (OptoBreakpointOSR && is_osr_compilation())       ||
  96       (OptoBreakpointC2R && !_method)                   ) {
  97     // checking for _method means that OptoBreakpoint does not apply to
  98     // runtime stubs or frame converters
  99     _cfg->insert( entry, 1, new MachBreakpointNode() );
 100   }
 101 
 102   // Insert epilogs before every return
 103   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 104     Block* block = _cfg->get_block(i);
 105     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 106       Node* m = block->end();
 107       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 108         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 109         block->add_inst(epilog);
 110         _cfg->map_node_to_block(epilog, block);
 111       }
 112     }
 113   }
 114 
 115   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 116   blk_starts[0] = 0;
 117 
 118   // Initialize code buffer and process short branches.
 119   CodeBuffer* cb = init_buffer(blk_starts);
 120 
 121   if (cb == NULL || failing()) {
 122     return;
 123   }
 124 












 125   ScheduleAndBundle();
 126 
 127 #ifndef PRODUCT
 128   if (trace_opto_output()) {
 129     tty->print("\n---- After ScheduleAndBundle ----\n");
 130     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 131       tty->print("\nBB#%03d:\n", i);
 132       Block* block = _cfg->get_block(i);
 133       for (uint j = 0; j < block->number_of_nodes(); j++) {
 134         Node* n = block->get_node(j);
 135         OptoReg::Name reg = _regalloc->get_reg_first(n);
 136         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 137         n->dump();
 138       }
 139     }
 140   }
 141 #endif
 142 
 143   if (failing()) {
 144     return;


 955   debug_info->end_non_safepoint(pc_offset);
 956 }
 957 
 958 //------------------------------init_buffer------------------------------------
 959 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 960 
 961   // Set the initially allocated size
 962   int  code_req   = initial_code_capacity;
 963   int  locs_req   = initial_locs_capacity;
 964   int  stub_req   = initial_stub_capacity;
 965   int  const_req  = initial_const_capacity;
 966 
 967   int  pad_req    = NativeCall::instruction_size;
 968   // The extra spacing after the code is necessary on some platforms.
 969   // Sometimes we need to patch in a jump after the last instruction,
 970   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
 971 
 972   // Compute the byte offset where we can store the deopt pc.
 973   if (fixed_slots() != 0) {
 974     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));




 975   }
 976 
 977   // Compute prolog code size
 978   _method_size = 0;
 979   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
 980 #if defined(IA64) && !defined(AIX)
 981   if (save_argument_registers()) {
 982     // 4815101: this is a stub with implicit and unknown precision fp args.
 983     // The usual spill mechanism can only generate stfd's in this case, which
 984     // doesn't work if the fp reg to spill contains a single-precision denorm.
 985     // Instead, we hack around the normal spill mechanism using stfspill's and
 986     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
 987     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
 988     //
 989     // If we ever implement 16-byte 'registers' == stack slots, we can
 990     // get rid of this hack and have SpillCopy generate stfspill/ldffill
 991     // instead of stfd/stfs/ldfd/ldfs.
 992     _frame_slots += 8*(16/BytesPerInt);
 993   }
 994 #endif




  54 
  55 // Convert Nodes to instruction bits and pass off to the VM
  56 void Compile::Output() {
  57   // RootNode goes
  58   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  59 
  60   // The number of new nodes (mostly MachNop) is proportional to
  61   // the number of java calls and inner loops which are aligned.
  62   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  63                             C->inner_loops()*(OptoLoopAlignment-1)),
  64                            "out of nodes before code generation" ) ) {
  65     return;
  66   }
  67   // Make sure I can find the Start Node
  68   Block *entry = _cfg->get_block(1);
  69   Block *broot = _cfg->get_root_block();
  70 
  71   const StartNode *start = entry->head()->as_Start();
  72 
  73   // Replace StartNode with prolog
  74   Label verified_entry;
  75   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
  76   entry->map_node(prolog, 0);
  77   _cfg->map_node_to_block(prolog, entry);
  78   _cfg->unmap_node_from_block(start); // start is no longer in any block
  79 
  80   // Virtual methods need an unverified entry point
  81   bool has_value_entry = false;
  82   if (is_osr_compilation()) {
  83     if (PoisonOSREntry) {
  84       // TODO: Should use a ShouldNotReachHereNode...
  85       _cfg->insert( broot, 0, new MachBreakpointNode() );
  86     }
  87   } else {
  88     if (_method && !_method->flags().is_static()) {
  89       // Insert unvalidated entry point
  90       _cfg->insert(broot, 0, new MachUEPNode());
  91     }
  92     if (_method && _method->get_Method()->has_scalarized_args()) {
  93       // Insert value type entry point
  94       _cfg->insert(broot, 0, new MachVVEPNode(&verified_entry));
  95       has_value_entry = true;
  96     }
  97   }
  98 
  99   // Break before main entry point
 100   if ((_method && C->directive()->BreakAtExecuteOption) ||
 101       (OptoBreakpoint && is_method_compilation())       ||
 102       (OptoBreakpointOSR && is_osr_compilation())       ||
 103       (OptoBreakpointC2R && !_method)                   ) {
 104     // checking for _method means that OptoBreakpoint does not apply to
 105     // runtime stubs or frame converters
 106     _cfg->insert( entry, 1, new MachBreakpointNode() );
 107   }
 108 
 109   // Insert epilogs before every return
 110   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 111     Block* block = _cfg->get_block(i);
 112     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 113       Node* m = block->end();
 114       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 115         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 116         block->add_inst(epilog);
 117         _cfg->map_node_to_block(epilog, block);
 118       }
 119     }
 120   }
 121 
 122   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 123   blk_starts[0] = 0;
 124 
 125   // Initialize code buffer and process short branches.
 126   CodeBuffer* cb = init_buffer(blk_starts);
 127 
 128   if (cb == NULL || failing()) {
 129     return;
 130   }
 131 
 132   if (has_value_entry) {
 133     // We added an entry point for unscalarized value types
 134     // Compute offset of "normal" entry point
 135     _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, 0);
 136     uint entry_offset = -1; // will be patched later
 137     if (!_method->flags().is_static()) {
 138       MachVVEPNode* vvep = (MachVVEPNode*)broot->get_node(0);
 139       entry_offset = vvep->size(_regalloc);
 140     }
 141     _code_offsets.set_value(CodeOffsets::Entry, entry_offset);
 142   }
 143 
 144   ScheduleAndBundle();
 145 
 146 #ifndef PRODUCT
 147   if (trace_opto_output()) {
 148     tty->print("\n---- After ScheduleAndBundle ----\n");
 149     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 150       tty->print("\nBB#%03d:\n", i);
 151       Block* block = _cfg->get_block(i);
 152       for (uint j = 0; j < block->number_of_nodes(); j++) {
 153         Node* n = block->get_node(j);
 154         OptoReg::Name reg = _regalloc->get_reg_first(n);
 155         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 156         n->dump();
 157       }
 158     }
 159   }
 160 #endif
 161 
 162   if (failing()) {
 163     return;


 974   debug_info->end_non_safepoint(pc_offset);
 975 }
 976 
 977 //------------------------------init_buffer------------------------------------
 978 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 979 
 980   // Set the initially allocated size
 981   int  code_req   = initial_code_capacity;
 982   int  locs_req   = initial_locs_capacity;
 983   int  stub_req   = initial_stub_capacity;
 984   int  const_req  = initial_const_capacity;
 985 
 986   int  pad_req    = NativeCall::instruction_size;
 987   // The extra spacing after the code is necessary on some platforms.
 988   // Sometimes we need to patch in a jump after the last instruction,
 989   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
 990 
 991   // Compute the byte offset where we can store the deopt pc.
 992   if (fixed_slots() != 0) {
 993     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
 994   }
 995   if (C->needs_stack_repair()) {
 996     // Compute the byte offset of the stack increment value
 997     _sp_inc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_sp_inc_slot));
 998   }
 999 
1000   // Compute prolog code size
1001   _method_size = 0;
1002   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
1003 #if defined(IA64) && !defined(AIX)
1004   if (save_argument_registers()) {
1005     // 4815101: this is a stub with implicit and unknown precision fp args.
1006     // The usual spill mechanism can only generate stfd's in this case, which
1007     // doesn't work if the fp reg to spill contains a single-precision denorm.
1008     // Instead, we hack around the normal spill mechanism using stfspill's and
1009     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
1010     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1011     //
1012     // If we ever implement 16-byte 'registers' == stack slots, we can
1013     // get rid of this hack and have SpillCopy generate stfspill/ldffill
1014     // instead of stfd/stfs/ldfd/ldfs.
1015     _frame_slots += 8*(16/BytesPerInt);
1016   }
1017 #endif


< prev index next >