1 /*
   2  * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 class Arena;
  26 class Bundle;
  27 class Block;
  28 class Block_Array;
  29 class Node;
  30 class Node_Array;
  31 class Node_List;
  32 class PhaseCFG;
  33 class PhaseChaitin;
  34 class Pipeline_Use_Element;
  35 class Pipeline_Use;
  36 
  37 #ifndef PRODUCT
  38 #define DEBUG_ARG(x) , x
  39 #else
  40 #define DEBUG_ARG(x)
  41 #endif
  42 
  43 // Define the initial sizes for allocation of the resizable code buffer
  44 enum {
  45   initial_code_capacity  =  16 * 1024,
  46   initial_stub_capacity  =   4 * 1024,
  47   initial_const_capacity =   4 * 1024,
  48   initial_locs_capacity  =   3 * 1024
  49 };
  50 
  51 //------------------------------Scheduling----------------------------------
  52 // This class contains all the information necessary to implement instruction
  53 // scheduling and bundling.
  54 class Scheduling {
  55 
  56 private:
  57   // Arena to use
  58   Arena *_arena;
  59 
  60   // Control-Flow Graph info
  61   PhaseCFG *_cfg;
  62 
  63   // Register Allocation info
  64   PhaseRegAlloc *_regalloc;
  65 
  66   // Number of nodes in the method
  67   uint _node_bundling_limit;
  68 
  69   // List of scheduled nodes. Generated in reverse order
  70   Node_List _scheduled;
  71 
  72   // List of nodes currently available for choosing for scheduling
  73   Node_List _available;
  74 
  75   // Mapping from node (index) to basic block
  76   Block_Array& _bbs;
  77 
  78   // For each instruction beginning a bundle, the number of following
  79   // nodes to be bundled with it.
  80   Bundle *_node_bundling_base;
  81 
  82   // Mapping from register to Node
  83   Node_List _reg_node;
  84 
  85   // Free list for pinch nodes.
  86   Node_List _pinch_free_list;
  87 
  88   // Latency from the beginning of the containing basic block (base 1)
  89   // for each node.
  90   unsigned short *_node_latency;
  91 
  92   // Number of uses of this node within the containing basic block.
  93   short *_uses;
  94 
  95   // Schedulable portion of current block.  Skips Region/Phi/CreateEx up
  96   // front, branch+proj at end.  Also skips Catch/CProj (same as
  97   // branch-at-end), plus just-prior exception-throwing call.
  98   uint _bb_start, _bb_end;
  99 
 100   // Latency from the end of the basic block as scheduled
 101   unsigned short *_current_latency;
 102 
 103   // Remember the next node
 104   Node *_next_node;
 105 
 106   // Use this for an unconditional branch delay slot
 107   Node *_unconditional_delay_slot;
 108 
 109   // Pointer to a Nop
 110   MachNopNode *_nop;
 111 
 112   // Length of the current bundle, in instructions
 113   uint _bundle_instr_count;
 114 
 115   // Current Cycle number, for computing latencies and bundling
 116   uint _bundle_cycle_number;
 117 
 118   // Bundle information
 119   Pipeline_Use_Element _bundle_use_elements[resource_count];
 120   Pipeline_Use         _bundle_use;
 121 
 122   // Dump the available list
 123   void dump_available() const;
 124 
 125 public:
 126   Scheduling(Arena *arena, Compile &compile);
 127 
 128   // Destructor
 129   NOT_PRODUCT( ~Scheduling(); )
 130 
 131   // Step ahead "i" cycles
 132   void step(uint i);
 133 
 134   // Step ahead 1 cycle, and clear the bundle state (for example,
 135   // at a branch target)
 136   void step_and_clear();
 137 
 138   Bundle* node_bundling(const Node *n) {
 139     assert(valid_bundle_info(n), "oob");
 140     return (&_node_bundling_base[n->_idx]);
 141   }
 142 
 143   bool valid_bundle_info(const Node *n) const {
 144     return (_node_bundling_limit > n->_idx);
 145   }
 146 
 147   bool starts_bundle(const Node *n) const {
 148     return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
 149   }
 150 
 151   // Do the scheduling
 152   void DoScheduling();
 153 
 154   // Compute the local latencies walking forward over the list of
 155   // nodes for a basic block
 156   void ComputeLocalLatenciesForward(const Block *bb);
 157 
 158   // Compute the register antidependencies within a basic block
 159   void ComputeRegisterAntidependencies(Block *bb);
 160   void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
 161   void verify_good_schedule( Block *b, const char *msg );
 162   void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
 163   void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
 164 
 165   // Add a node to the current bundle
 166   void AddNodeToBundle(Node *n, const Block *bb);
 167 
 168   // Add a node to the list of available nodes
 169   void AddNodeToAvailableList(Node *n);
 170 
 171   // Compute the local use count for the nodes in a block, and compute
 172   // the list of instructions with no uses in the block as available
 173   void ComputeUseCount(const Block *bb);
 174 
 175   // Choose an instruction from the available list to add to the bundle
 176   Node * ChooseNodeToBundle();
 177 
 178   // See if this Node fits into the currently accumulating bundle
 179   bool NodeFitsInBundle(Node *n);
 180 
 181   // Decrement the use count for a node
 182  void DecrementUseCounts(Node *n, const Block *bb);
 183 
 184   // Garbage collect pinch nodes for reuse by other blocks.
 185   void garbage_collect_pinch_nodes();
 186   // Clean up a pinch node for reuse (helper for above).
 187   void cleanup_pinch( Node *pinch );
 188 
 189   // Information for statistics gathering
 190 #ifndef PRODUCT
 191 private:
 192   // Gather information on size of nops relative to total
 193   uint _branches, _unconditional_delays;
 194 
 195   static uint _total_nop_size, _total_method_size;
 196   static uint _total_branches, _total_unconditional_delays;
 197   static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
 198 
 199 public:
 200   static void print_statistics();
 201 
 202   static void increment_instructions_per_bundle(uint i) {
 203     _total_instructions_per_bundle[i]++;
 204   }
 205 
 206   static void increment_nop_size(uint s) {
 207     _total_nop_size += s;
 208   }
 209 
 210   static void increment_method_size(uint s) {
 211     _total_method_size += s;
 212   }
 213 #endif
 214 
 215 };