1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_OUTPUT_HPP
  26 #define SHARE_VM_OPTO_OUTPUT_HPP
  27 
  28 #include "opto/block.hpp"
  29 #include "opto/node.hpp"
  30 #if defined AD_MD_HPP
  31 # include AD_MD_HPP
  32 #elif defined TARGET_ARCH_MODEL_x86_32
  33 # include "adfiles/ad_x86_32.hpp"
  34 #elif defined TARGET_ARCH_MODEL_x86_64
  35 # include "adfiles/ad_x86_64.hpp"
  36 #elif defined TARGET_ARCH_MODEL_sparc
  37 # include "adfiles/ad_sparc.hpp"
  38 #elif defined TARGET_ARCH_MODEL_zero
  39 # include "adfiles/ad_zero.hpp"
  40 #elif defined TARGET_ARCH_MODEL_ppc_64
  41 # include "adfiles/ad_ppc_64.hpp"
  42 #endif
  43 
  44 class Arena;
  45 class Bundle;
  46 class Block;
  47 class Block_Array;
  48 class Node;
  49 class Node_Array;
  50 class Node_List;
  51 class PhaseCFG;
  52 class PhaseChaitin;
  53 class Pipeline_Use_Element;
  54 class Pipeline_Use;
  55 
  56 #ifndef PRODUCT
  57 #define DEBUG_ARG(x) , x
  58 #else
  59 #define DEBUG_ARG(x)
  60 #endif
  61 
  62 // Define the initial sizes for allocation of the resizable code buffer
  63 enum {
  64   initial_code_capacity  =  16 * 1024,
  65   initial_stub_capacity  =   4 * 1024,
  66   initial_const_capacity =   4 * 1024,
  67   initial_locs_capacity  =   3 * 1024
  68 };
  69 
  70 //------------------------------Scheduling----------------------------------
  71 // This class contains all the information necessary to implement instruction
  72 // scheduling and bundling.
  73 class Scheduling {
  74 
  75 private:
  76   // Arena to use
  77   Arena *_arena;
  78 
  79   // Control-Flow Graph info
  80   PhaseCFG *_cfg;
  81 
  82   // Register Allocation info
  83   PhaseRegAlloc *_regalloc;
  84 
  85   // Number of nodes in the method
  86   uint _node_bundling_limit;
  87 
  88   // List of scheduled nodes. Generated in reverse order
  89   Node_List _scheduled;
  90 
  91   // List of nodes currently available for choosing for scheduling
  92   Node_List _available;
  93 
  94   // For each instruction beginning a bundle, the number of following
  95   // nodes to be bundled with it.
  96   Bundle *_node_bundling_base;
  97 
  98   // Mapping from register to Node
  99   Node_List _reg_node;
 100 
 101   // Free list for pinch nodes.
 102   Node_List _pinch_free_list;
 103 
 104   // Latency from the beginning of the containing basic block (base 1)
 105   // for each node.
 106   unsigned short *_node_latency;
 107 
 108   // Number of uses of this node within the containing basic block.
 109   short *_uses;
 110 
 111   // Schedulable portion of current block.  Skips Region/Phi/CreateEx up
 112   // front, branch+proj at end.  Also skips Catch/CProj (same as
 113   // branch-at-end), plus just-prior exception-throwing call.
 114   uint _bb_start, _bb_end;
 115 
 116   // Latency from the end of the basic block as scheduled
 117   unsigned short *_current_latency;
 118 
 119   // Remember the next node
 120   Node *_next_node;
 121 
 122   // Use this for an unconditional branch delay slot
 123   Node *_unconditional_delay_slot;
 124 
 125   // Pointer to a Nop
 126   MachNopNode *_nop;
 127 
 128   // Length of the current bundle, in instructions
 129   uint _bundle_instr_count;
 130 
 131   // Current Cycle number, for computing latencies and bundling
 132   uint _bundle_cycle_number;
 133 
 134   // Bundle information
 135   Pipeline_Use_Element _bundle_use_elements[resource_count];
 136   Pipeline_Use         _bundle_use;
 137 
 138   // Dump the available list
 139   void dump_available() const;
 140 
 141 public:
 142   Scheduling(Arena *arena, Compile &compile);
 143 
 144   // Destructor
 145   NOT_PRODUCT( ~Scheduling(); )
 146 
 147   // Step ahead "i" cycles
 148   void step(uint i);
 149 
 150   // Step ahead 1 cycle, and clear the bundle state (for example,
 151   // at a branch target)
 152   void step_and_clear();
 153 
 154   Bundle* node_bundling(const Node *n) {
 155     assert(valid_bundle_info(n), "oob");
 156     return (&_node_bundling_base[n->_idx]);
 157   }
 158 
 159   bool valid_bundle_info(const Node *n) const {
 160     return (_node_bundling_limit > n->_idx);
 161   }
 162 
 163   bool starts_bundle(const Node *n) const {
 164     return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
 165   }
 166 
 167   // Do the scheduling
 168   void DoScheduling();
 169 
 170   // Compute the local latencies walking forward over the list of
 171   // nodes for a basic block
 172   void ComputeLocalLatenciesForward(const Block *bb);
 173 
 174   // Compute the register antidependencies within a basic block
 175   void ComputeRegisterAntidependencies(Block *bb);
 176   void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
 177   void verify_good_schedule( Block *b, const char *msg );
 178   void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
 179   void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
 180 
 181   // Add a node to the current bundle
 182   void AddNodeToBundle(Node *n, const Block *bb);
 183 
 184   // Add a node to the list of available nodes
 185   void AddNodeToAvailableList(Node *n);
 186 
 187   // Compute the local use count for the nodes in a block, and compute
 188   // the list of instructions with no uses in the block as available
 189   void ComputeUseCount(const Block *bb);
 190 
 191   // Choose an instruction from the available list to add to the bundle
 192   Node * ChooseNodeToBundle();
 193 
 194   // See if this Node fits into the currently accumulating bundle
 195   bool NodeFitsInBundle(Node *n);
 196 
 197   // Decrement the use count for a node
 198  void DecrementUseCounts(Node *n, const Block *bb);
 199 
 200   // Garbage collect pinch nodes for reuse by other blocks.
 201   void garbage_collect_pinch_nodes();
 202   // Clean up a pinch node for reuse (helper for above).
 203   void cleanup_pinch( Node *pinch );
 204 
 205   // Information for statistics gathering
 206 #ifndef PRODUCT
 207 private:
 208   // Gather information on size of nops relative to total
 209   uint _branches, _unconditional_delays;
 210 
 211   static uint _total_nop_size, _total_method_size;
 212   static uint _total_branches, _total_unconditional_delays;
 213   static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
 214 
 215 public:
 216   static void print_statistics();
 217 
 218   static void increment_instructions_per_bundle(uint i) {
 219     _total_instructions_per_bundle[i]++;
 220   }
 221 
 222   static void increment_nop_size(uint s) {
 223     _total_nop_size += s;
 224   }
 225 
 226   static void increment_method_size(uint s) {
 227     _total_method_size += s;
 228   }
 229 #endif
 230 
 231 };
 232 
 233 #endif // SHARE_VM_OPTO_OUTPUT_HPP