1 /*
   2  * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP
  26 #define SHARE_VM_C1_C1_LINEARSCAN_HPP
  27 
  28 #include "c1/c1_FpuStackSim.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_IR.hpp"
  31 #include "c1/c1_Instruction.hpp"
  32 #include "c1/c1_LIR.hpp"
  33 #include "c1/c1_LIRGenerator.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 class DebugInfoCache;
  37 class FpuStackAllocator;
  38 class IRScopeDebugInfo;
  39 class Interval;
  40 class IntervalWalker;
  41 class LIRGenerator;
  42 class LinearScan;
  43 class MoveResolver;
  44 class Range;
  45 
  46 typedef GrowableArray<Interval*> IntervalArray;
  47 typedef GrowableArray<Interval*> IntervalList;
  48 typedef GrowableArray<IntervalList*> IntervalsList;
  49 typedef GrowableArray<ScopeValue*> ScopeValueArray;
  50 typedef GrowableArray<LIR_OpList*> LIR_OpListStack;
  51 
  52 enum IntervalUseKind {
  53   // priority of use kinds must be ascending
  54   noUse = 0,
  55   loopEndMarker = 1,
  56   shouldHaveRegister = 2,
  57   mustHaveRegister = 3,
  58 
  59   firstValidKind = 1,
  60   lastValidKind = 3
  61 };
  62 
  63 enum IntervalKind {
  64   fixedKind = 0,  // interval pre-colored by LIR_Generator
  65   anyKind   = 1,  // no register/memory allocated by LIR_Generator
  66   nofKinds,
  67   firstKind = fixedKind
  68 };
  69 
  70 
  71 // during linear scan an interval is in one of four states in
  72 enum IntervalState {
  73   unhandledState = 0, // unhandled state (not processed yet)
  74   activeState   = 1,  // life and is in a physical register
  75   inactiveState = 2,  // in a life time hole and is in a physical register
  76   handledState  = 3,  // spilled or not life again
  77   invalidState = -1
  78 };
  79 
  80 
  81 enum IntervalSpillState {
  82   noDefinitionFound,  // starting state of calculation: no definition found yet
  83   oneDefinitionFound, // one definition has already been found.
  84                       // Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)
  85                       // the position of this definition is stored in _definition_pos
  86   oneMoveInserted,    // one spill move has already been inserted.
  87   storeAtDefinition,  // the interval should be stored immediately after its definition because otherwise
  88                       // there would be multiple redundant stores
  89   startInMemory,      // the interval starts in memory (e.g. method parameter), so a store is never necessary
  90   noOptimization      // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized
  91 };
  92 
  93 
  94 #define for_each_interval_kind(kind) \
  95   for (IntervalKind kind = firstKind; kind < nofKinds; kind = (IntervalKind)(kind + 1))
  96 
  97 #define for_each_visitor_mode(mode) \
  98   for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1))
  99 
 100 
 101 class LinearScan : public CompilationResourceObj {
 102   // declare classes used by LinearScan as friends because they
 103   // need a wide variety of functions declared here
 104   //
 105   // Only the small interface to the rest of the compiler is public
 106   friend class Interval;
 107   friend class IntervalWalker;
 108   friend class LinearScanWalker;
 109   friend class FpuStackAllocator;
 110   friend class MoveResolver;
 111   friend class LinearScanStatistic;
 112   friend class LinearScanTimers;
 113   friend class RegisterVerifier;
 114 
 115  public:
 116   enum {
 117     any_reg = -1,
 118     nof_cpu_regs = pd_nof_cpu_regs_linearscan,
 119     nof_fpu_regs = pd_nof_fpu_regs_linearscan,
 120     nof_xmm_regs = pd_nof_xmm_regs_linearscan,
 121     nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
 122   };
 123 
 124  private:
 125   Compilation*              _compilation;
 126   IR*                       _ir;
 127   LIRGenerator*             _gen;
 128   FrameMap*                 _frame_map;
 129 
 130   BlockList                 _cached_blocks;     // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
 131   int                       _num_virtual_regs;  // number of virtual registers (without new registers introduced because of splitting intervals)
 132   bool                      _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
 133   int                       _num_calls;         // total number of calls in this method
 134   int                       _max_spills;        // number of stack slots used for intervals allocated to memory
 135   int                       _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
 136 
 137   IntervalList              _intervals;         // mapping from register number to interval
 138   IntervalList*             _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
 139   IntervalArray*            _sorted_intervals;  // intervals sorted by Interval::from()
 140   bool                      _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
 141 
 142   LIR_OpArray               _lir_ops;           // mapping from LIR_Op id to LIR_Op node
 143   BlockBeginArray           _block_of_op;       // mapping from LIR_Op id to the BlockBegin containing this instruction
 144   ResourceBitMap            _has_info;          // bit set for each LIR_Op id that has a CodeEmitInfo
 145   ResourceBitMap            _has_call;          // bit set for each LIR_Op id that destroys all caller save registers
 146   BitMap2D                  _interval_in_loop;  // bit set for each virtual register that is contained in each loop
 147 
 148   // cached debug info to prevent multiple creation of same object
 149   // TODO: cached scope values for registers could be static
 150   ScopeValueArray           _scope_value_cache;
 151 
 152   static ConstantOopWriteValue* _oop_null_scope_value;
 153   static ConstantIntValue*    _int_m1_scope_value;
 154   static ConstantIntValue*    _int_0_scope_value;
 155   static ConstantIntValue*    _int_1_scope_value;
 156   static ConstantIntValue*    _int_2_scope_value;
 157 
 158   // accessors
 159   IR*           ir() const                       { return _ir; }
 160   Compilation*  compilation() const              { return _compilation; }
 161   LIRGenerator* gen() const                      { return _gen; }
 162   FrameMap*     frame_map() const                { return _frame_map; }
 163 
 164   // unified bailout support
 165   void          bailout(const char* msg) const   { compilation()->bailout(msg); }
 166   bool          bailed_out() const               { return compilation()->bailed_out(); }
 167 
 168   // access to block list (sorted in linear scan order)
 169   int           block_count() const              { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
 170   BlockBegin*   block_at(int idx) const          { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list");   return _cached_blocks.at(idx); }
 171 
 172   int           num_virtual_regs() const         { return _num_virtual_regs; }
 173   // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
 174   int           live_set_size() const            { return align_up(_num_virtual_regs, BitsPerWord); }
 175   bool          has_fpu_registers() const        { return _has_fpu_registers; }
 176   int           num_loops() const                { return ir()->num_loops(); }
 177   bool          is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
 178 
 179   // handling of fpu stack allocation (platform dependent, needed for debug information generation)
 180 #ifdef X86
 181   FpuStackAllocator* _fpu_stack_allocator;
 182   bool use_fpu_stack_allocation() const          { return UseSSE < 2 && has_fpu_registers(); }
 183 #else
 184   bool use_fpu_stack_allocation() const          { return false; }
 185 #endif
 186 
 187 
 188   // access to interval list
 189   int           interval_count() const           { return _intervals.length(); }
 190   Interval*     interval_at(int reg_num) const   { return _intervals.at(reg_num); }
 191 
 192   IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }
 193 
 194   // access to LIR_Ops and Blocks indexed by op_id
 195   int          max_lir_op_id() const                { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
 196   LIR_Op*      lir_op_with_id(int op_id) const      { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
 197   BlockBegin*  block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
 198 
 199   bool is_block_begin(int op_id)                    { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
 200   bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
 201 
 202   bool has_call(int op_id)                          { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
 203   bool has_info(int op_id)                          { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
 204 
 205 
 206   // functions for converting LIR-Operands to register numbers
 207   static bool is_valid_reg_num(int reg_num)         { return reg_num >= 0; }
 208   static int  reg_num(LIR_Opr opr);
 209   static int  reg_numHi(LIR_Opr opr);
 210 
 211   // functions for classification of intervals
 212   static bool is_precolored_interval(const Interval* i);
 213   static bool is_virtual_interval(const Interval* i);
 214 
 215   static bool is_precolored_cpu_interval(const Interval* i);
 216   static bool is_virtual_cpu_interval(const Interval* i);
 217   static bool is_precolored_fpu_interval(const Interval* i);
 218   static bool is_virtual_fpu_interval(const Interval* i);
 219 
 220   static bool is_in_fpu_register(const Interval* i);
 221   static bool is_oop_interval(const Interval* i);
 222 
 223 
 224   // General helper functions
 225   int         allocate_spill_slot(bool double_word);
 226   void        assign_spill_slot(Interval* it);
 227   void        propagate_spill_slots();
 228 
 229   Interval*   create_interval(int reg_num);
 230   void        append_interval(Interval* it);
 231   void        copy_register_flags(Interval* from, Interval* to);
 232 
 233   // platform dependent functions
 234   static bool is_processed_reg_num(int reg_num);
 235   static int  num_physical_regs(BasicType type);
 236   static bool requires_adjacent_regs(BasicType type);
 237   static bool is_caller_save(int assigned_reg);
 238 
 239   // spill move optimization: eliminate moves from register to stack if
 240   // stack slot is known to be correct
 241   void        change_spill_definition_pos(Interval* interval, int def_pos);
 242   void        change_spill_state(Interval* interval, int spill_pos);
 243   static bool must_store_at_definition(const Interval* i);
 244   void        eliminate_spill_moves();
 245 
 246   // Phase 1: number all instructions in all blocks
 247   void number_instructions();
 248 
 249   // Phase 2: compute local live sets separately for each block
 250   // (sets live_gen and live_kill for each block)
 251   //
 252   // helper methods used by compute_local_live_sets()
 253   void set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill);
 254 
 255   void compute_local_live_sets();
 256 
 257   // Phase 3: perform a backward dataflow analysis to compute global live sets
 258   // (sets live_in and live_out for each block)
 259   void compute_global_live_sets();
 260 
 261 
 262   // Phase 4: build intervals
 263   // (fills the list _intervals)
 264   //
 265   // helper methods used by build_intervals()
 266   void add_use (Value value, int from, int to, IntervalUseKind use_kind);
 267 
 268   void add_def (LIR_Opr opr, int def_pos,      IntervalUseKind use_kind);
 269   void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind);
 270   void add_temp(LIR_Opr opr, int temp_pos,     IntervalUseKind use_kind);
 271 
 272   void add_def (int reg_num, int def_pos,      IntervalUseKind use_kind, BasicType type);
 273   void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
 274   void add_temp(int reg_num, int temp_pos,     IntervalUseKind use_kind, BasicType type);
 275 
 276   // Add platform dependent kills for particular LIR ops.  Can be used
 277   // to add platform dependent behaviour for some operations.
 278   void pd_add_temps(LIR_Op* op);
 279 
 280   IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);
 281   IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);
 282   void handle_method_arguments(LIR_Op* op);
 283   void handle_doubleword_moves(LIR_Op* op);
 284   void add_register_hints(LIR_Op* op);
 285 
 286   void build_intervals();
 287 
 288 
 289   // Phase 5: actual register allocation
 290   // (Uses LinearScanWalker)
 291   //
 292   // helper functions for building a sorted list of intervals
 293   NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);)
 294   static int interval_cmp(Interval** a, Interval** b);
 295   void add_to_list(Interval** first, Interval** prev, Interval* interval);
 296   void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
 297 
 298   void sort_intervals_before_allocation();
 299   void sort_intervals_after_allocation();
 300   void allocate_registers();
 301 
 302 
 303   // Phase 6: resolve data flow
 304   // (insert moves at edges between blocks if intervals have been split)
 305   //
 306   // helper functions for resolve_data_flow()
 307   Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);
 308   Interval* interval_at_block_begin(BlockBegin* block, int reg_num);
 309   Interval* interval_at_block_end(BlockBegin* block, int reg_num);
 310   Interval* interval_at_op_id(int reg_num, int op_id);
 311   void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
 312   void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
 313   void resolve_data_flow();
 314 
 315   void resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver);
 316   void resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver);
 317   void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver);
 318   void resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver);
 319   void resolve_exception_handlers();
 320 
 321   // Phase 7: assign register numbers back to LIR
 322   // (includes computation of debug information and oop maps)
 323   //
 324   // helper functions for assign_reg_num()
 325   VMReg vm_reg_for_interval(Interval* interval);
 326   VMReg vm_reg_for_operand(LIR_Opr opr);
 327 
 328   static LIR_Opr operand_for_interval(Interval* interval);
 329   static LIR_Opr calc_operand_for_interval(const Interval* interval);
 330   LIR_Opr       canonical_spill_opr(Interval* interval);
 331 
 332   LIR_Opr color_lir_opr(LIR_Opr opr, int id, LIR_OpVisitState::OprMode);
 333 
 334   // methods used for oop map computation
 335   IntervalWalker* init_compute_oop_maps();
 336   OopMap*         compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site);
 337   void            compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op);
 338 
 339   // methods used for debug information computation
 340   void init_compute_debug_info();
 341 
 342   MonitorValue*  location_for_monitor_index(int monitor_index);
 343   LocationValue* location_for_name(int name, Location::Type loc_type);
 344   void set_oop(OopMap* map, VMReg name) {
 345     if (map->legal_vm_reg_name(name)) {
 346       map->set_oop(name);
 347     } else {
 348       bailout("illegal oopMap register name");
 349     }
 350   }
 351 
 352   int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
 353   int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
 354   int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values);
 355 
 356   IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state);
 357   void compute_debug_info(CodeEmitInfo* info, int op_id);
 358 
 359   void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw);
 360   void assign_reg_num();
 361 
 362 
 363   // Phase 8: fpu stack allocation
 364   // (Used only on x86 when fpu operands are present)
 365   void allocate_fpu_stack();
 366 
 367 
 368   // helper functions for printing state
 369 #ifndef PRODUCT
 370   static void print_bitmap(BitMap& bitmap);
 371   void        print_intervals(const char* label);
 372   void        print_lir(int level, const char* label, bool hir_valid = true);
 373 #endif
 374 
 375 #ifdef ASSERT
 376   // verification functions for allocation
 377   // (check that all intervals have a correct register and that no registers are overwritten)
 378   void verify();
 379   void verify_intervals();
 380   void verify_no_oops_in_fixed_intervals();
 381   void verify_constants();
 382   void verify_registers();
 383 #endif
 384 
 385  public:
 386   // creation
 387   LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map);
 388 
 389   // main entry function: perform linear scan register allocation
 390   void             do_linear_scan();
 391 
 392   // accessors used by Compilation
 393   int         max_spills()  const { return _max_spills; }
 394   int         num_calls() const   { assert(_num_calls >= 0, "not set"); return _num_calls; }
 395 
 396   // entry functions for printing
 397 #ifndef PRODUCT
 398   static void print_statistics();
 399   static void print_timers(double total);
 400 #endif
 401 };
 402 
 403 
 404 // Helper class for ordering moves that are inserted at the same position in the LIR
 405 // When moves between registers are inserted, it is important that the moves are
 406 // ordered such that no register is overwritten. So moves from register to stack
 407 // are processed prior to moves from stack to register. When moves have circular
 408 // dependencies, a temporary stack slot is used to break the circle.
 409 // The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow
 410 // and therefore factored out in a separate class
 411 class MoveResolver: public StackObj {
 412  private:
 413   LinearScan*      _allocator;
 414 
 415   LIR_List*        _insert_list;
 416   int              _insert_idx;
 417   LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted
 418 
 419   IntervalList     _mapping_from;
 420   LIR_OprList      _mapping_from_opr;
 421   IntervalList     _mapping_to;
 422   bool             _multiple_reads_allowed;
 423   int              _register_blocked[LinearScan::nof_regs];
 424 
 425   int  register_blocked(int reg)                    { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
 426   void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
 427 
 428   void block_registers(Interval* it);
 429   void unblock_registers(Interval* it);
 430   bool save_to_process_move(Interval* from, Interval* to);
 431 
 432   void create_insertion_buffer(LIR_List* list);
 433   void append_insertion_buffer();
 434   void insert_move(Interval* from_interval, Interval* to_interval);
 435   void insert_move(LIR_Opr from_opr, Interval* to_interval);
 436 
 437   DEBUG_ONLY(void verify_before_resolve();)
 438   void resolve_mappings();
 439  public:
 440   MoveResolver(LinearScan* allocator);
 441 
 442   DEBUG_ONLY(void check_empty();)
 443   void set_multiple_reads_allowed() { _multiple_reads_allowed = true; }
 444   void set_insert_position(LIR_List* insert_list, int insert_idx);
 445   void move_insert_position(LIR_List* insert_list, int insert_idx);
 446   void add_mapping(Interval* from, Interval* to);
 447   void add_mapping(LIR_Opr from, Interval* to);
 448   void resolve_and_append_moves();
 449 
 450   LinearScan* allocator()   { return _allocator; }
 451   bool has_mappings()       { return _mapping_from.length() > 0; }
 452 };
 453 
 454 
 455 class Range : public CompilationResourceObj {
 456   friend class Interval;
 457 
 458  private:
 459   static Range*    _end;       // sentinel (from == to == max_jint)
 460 
 461   int              _from;      // from (inclusive)
 462   int              _to;        // to (exclusive)
 463   Range*           _next;      // linear list of Ranges
 464 
 465   // used only by class Interval, so hide them
 466   bool             intersects(Range* r) const    { return intersects_at(r) != -1; }
 467   int              intersects_at(Range* r) const;
 468 
 469  public:
 470   Range(int from, int to, Range* next);
 471 
 472   static void      initialize(Arena* arena);
 473   static Range*    end()                         { return _end; }
 474 
 475   int              from() const                  { return _from; }
 476   int              to()   const                  { return _to; }
 477   Range*           next() const                  { return _next; }
 478   void             set_from(int from)            { _from = from; }
 479   void             set_to(int to)                { _to = to; }
 480   void             set_next(Range* next)         { _next = next; }
 481 
 482   // for testing
 483   void             print(outputStream* out = tty) const PRODUCT_RETURN;
 484 };
 485 
 486 
 487 // Interval is an ordered list of disjoint ranges.
 488 
 489 // For pre-colored double word LIR_Oprs, one interval is created for
 490 // the low word register and one is created for the hi word register.
 491 // On Intel for FPU double registers only one interval is created.  At
 492 // all times assigned_reg contains the reg. number of the physical
 493 // register.
 494 
 495 // For LIR_Opr in virtual registers a single interval can represent
 496 // single and double word values.  When a physical register is
 497 // assigned to the interval, assigned_reg contains the
 498 // phys. reg. number and for double word values assigned_regHi the
 499 // phys. reg. number of the hi word if there is any.  For spilled
 500 // intervals assigned_reg contains the stack index.  assigned_regHi is
 501 // always -1.
 502 
 503 class Interval : public CompilationResourceObj {
 504  private:
 505   static Interval* _end;          // sentinel (interval with only range Range::end())
 506 
 507   int              _reg_num;
 508   BasicType        _type;         // valid only for virtual registers
 509   Range*           _first;        // sorted list of Ranges
 510   intStack         _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds
 511 
 512   Range*           _current;      // interval iteration: the current Range
 513   Interval*        _next;         // interval iteration: sorted list of Intervals (ends with sentinel)
 514   IntervalState    _state;        // interval iteration: to which set belongs this interval
 515 
 516 
 517   int              _assigned_reg;
 518   int              _assigned_regHi;
 519 
 520   int              _cached_to;    // cached value: to of last range (-1: not cached)
 521   LIR_Opr          _cached_opr;
 522   VMReg            _cached_vm_reg;
 523 
 524   Interval*        _split_parent;           // the original interval where this interval is derived from
 525   IntervalList     _split_children;         // list of all intervals that are split off from this interval (only available for split parents)
 526   Interval*        _current_split_child;    // the current split child that has been active or inactive last (always stored in split parents)
 527 
 528   int              _canonical_spill_slot;   // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
 529   bool             _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
 530   IntervalSpillState _spill_state;          // for spill move optimization
 531   int              _spill_definition_pos;   // position where the interval is defined (if defined only once)
 532   Interval*        _register_hint;          // this interval should be in the same register as the hint interval
 533 
 534   int              calc_to();
 535   Interval*        new_split_child();
 536  public:
 537   Interval(int reg_num);
 538 
 539   static void      initialize(Arena* arena);
 540   static Interval* end()                         { return _end; }
 541 
 542   // accessors
 543   int              reg_num() const               { return _reg_num; }
 544   void             set_reg_num(int r)            { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }
 545   BasicType        type() const                  { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
 546   void             set_type(BasicType type)      { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; }
 547 
 548   Range*           first() const                 { return _first; }
 549   int              from() const                  { return _first->from(); }
 550   int              to()                          { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }
 551   int              num_use_positions() const     { return _use_pos_and_kinds.length() / 2; }
 552 
 553   Interval*        next() const                  { return _next; }
 554   Interval**       next_addr()                   { return &_next; }
 555   void             set_next(Interval* next)      { _next = next; }
 556 
 557   int              assigned_reg() const          { return _assigned_reg; }
 558   int              assigned_regHi() const        { return _assigned_regHi; }
 559   void             assign_reg(int reg)           { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; }
 560   void             assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; }
 561 
 562   Interval*        register_hint(bool search_split_child = true) const; // calculation needed
 563   void             set_register_hint(Interval* i) { _register_hint = i; }
 564 
 565   int              state() const                 { return _state; }
 566   void             set_state(IntervalState s)    { _state = s; }
 567 
 568   // access to split parent and split children
 569   bool             is_split_parent() const       { return _split_parent == this; }
 570   bool             is_split_child() const        { return _split_parent != this; }
 571   Interval*        split_parent() const          { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; }
 572   Interval*        split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode);
 573   Interval*        split_child_before_op_id(int op_id);
 574   bool             split_child_covers(int op_id, LIR_OpVisitState::OprMode mode);
 575   DEBUG_ONLY(void  check_split_children();)
 576 
 577   // information stored in split parent, but available for all children
 578   int              canonical_spill_slot() const            { return split_parent()->_canonical_spill_slot; }
 579   void             set_canonical_spill_slot(int slot)      { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }
 580   Interval*        current_split_child() const             { return split_parent()->_current_split_child; }
 581   void             make_current_split_child()              { split_parent()->_current_split_child = this; }
 582 
 583   bool             insert_move_when_activated() const      { return _insert_move_when_activated; }
 584   void             set_insert_move_when_activated(bool b)  { _insert_move_when_activated = b; }
 585 
 586   // for spill optimization
 587   IntervalSpillState spill_state() const         { return split_parent()->_spill_state; }
 588   int              spill_definition_pos() const  { return split_parent()->_spill_definition_pos; }
 589   void             set_spill_state(IntervalSpillState state) {  assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; }
 590   void             set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; }
 591   // returns true if this interval has a shadow copy on the stack that is always correct
 592   bool             always_in_memory() const      { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; }
 593 
 594   // caching of values that take time to compute and are used multiple times
 595   LIR_Opr          cached_opr() const            { return _cached_opr; }
 596   VMReg            cached_vm_reg() const         { return _cached_vm_reg; }
 597   void             set_cached_opr(LIR_Opr opr)   { _cached_opr = opr; }
 598   void             set_cached_vm_reg(VMReg reg)  { _cached_vm_reg = reg; }
 599 
 600   // access to use positions
 601   int    first_usage(IntervalUseKind min_use_kind) const;           // id of the first operation requiring this interval in a register
 602   int    next_usage(IntervalUseKind min_use_kind, int from) const;  // id of next usage seen from the given position
 603   int    next_usage_exact(IntervalUseKind exact_use_kind, int from) const;
 604   int    previous_usage(IntervalUseKind min_use_kind, int from) const;
 605 
 606   // manipulating intervals
 607   void   add_use_pos(int pos, IntervalUseKind use_kind);
 608   void   add_range(int from, int to);
 609   Interval* split(int split_pos);
 610   Interval* split_from_start(int split_pos);
 611   void remove_first_use_pos()                    { _use_pos_and_kinds.trunc_to(_use_pos_and_kinds.length() - 2); }
 612 
 613   // test intersection
 614   bool   covers(int op_id, LIR_OpVisitState::OprMode mode) const;
 615   bool   has_hole_between(int from, int to);
 616   bool   intersects(Interval* i) const           { return _first->intersects(i->_first); }
 617   int    intersects_at(Interval* i) const        { return _first->intersects_at(i->_first); }
 618 
 619   // range iteration
 620   void   rewind_range()                          { _current = _first; }
 621   void   next_range()                            { assert(this != _end, "not allowed on sentinel"); _current = _current->next(); }
 622   int    current_from() const                    { return _current->from(); }
 623   int    current_to() const                      { return _current->to(); }
 624   bool   current_at_end() const                  { return _current == Range::end(); }
 625   bool   current_intersects(Interval* it)        { return _current->intersects(it->_current); };
 626   int    current_intersects_at(Interval* it)     { return _current->intersects_at(it->_current); };
 627 
 628   // printing
 629   void print(outputStream* out = tty) const      PRODUCT_RETURN;
 630 };
 631 
 632 
 633 class IntervalWalker : public CompilationResourceObj {
 634  protected:
 635   Compilation*     _compilation;
 636   LinearScan*      _allocator;
 637 
 638   Interval*        _unhandled_first[nofKinds];  // sorted list of intervals, not life before the current position
 639   Interval*        _active_first   [nofKinds];  // sorted list of intervals, life at the current position
 640   Interval*        _inactive_first [nofKinds];  // sorted list of intervals, intervals in a life time hole at the current position
 641 
 642   Interval*        _current;                     // the current interval coming from unhandled list
 643   int              _current_position;            // the current position (intercept point through the intervals)
 644   IntervalKind     _current_kind;                // and whether it is fixed_kind or any_kind.
 645 
 646 
 647   Compilation*     compilation() const               { return _compilation; }
 648   LinearScan*      allocator() const                 { return _allocator; }
 649 
 650   // unified bailout support
 651   void             bailout(const char* msg) const    { compilation()->bailout(msg); }
 652   bool             bailed_out() const                { return compilation()->bailed_out(); }
 653 
 654   void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); }
 655 
 656   Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; }
 657   Interval** active_first_addr(IntervalKind kind)    { check_bounds(kind); return &_active_first[kind]; }
 658   Interval** inactive_first_addr(IntervalKind kind)  { check_bounds(kind); return &_inactive_first[kind]; }
 659 
 660   void append_unsorted(Interval** first, Interval* interval);
 661   void append_sorted(Interval** first, Interval* interval);
 662   void append_to_unhandled(Interval** list, Interval* interval);
 663 
 664   bool remove_from_list(Interval** list, Interval* i);
 665   void remove_from_list(Interval* i);
 666 
 667   void next_interval();
 668   Interval*        current() const               { return _current; }
 669   IntervalKind     current_kind() const          { return _current_kind; }
 670 
 671   void walk_to(IntervalState state, int from);
 672 
 673   // activate_current() is called when an unhandled interval becomes active (in current(), current_kind()).
 674   // Return false if current() should not be moved the the active interval list.
 675   // It is safe to append current to any interval list but the unhandled list.
 676   virtual bool activate_current() { return true; }
 677 
 678   // interval_moved() is called whenever an interval moves from one interval list to another.
 679   // In the implementation of this method it is prohibited to move the interval to any list.
 680   virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
 681 
 682  public:
 683   IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
 684 
 685   Interval* unhandled_first(IntervalKind kind)   { check_bounds(kind); return _unhandled_first[kind]; }
 686   Interval* active_first(IntervalKind kind)      { check_bounds(kind); return _active_first[kind]; }
 687   Interval* inactive_first(IntervalKind kind)    { check_bounds(kind); return _inactive_first[kind]; }
 688 
 689   // active contains the intervals that are live after the lir_op
 690   void walk_to(int lir_op_id);
 691   // active contains the intervals that are live before the lir_op
 692   void walk_before(int lir_op_id)  { walk_to(lir_op_id-1); }
 693   // walk through all intervals
 694   void walk()                      { walk_to(max_jint); }
 695 
 696   int current_position()           { return _current_position; }
 697 };
 698 
 699 
 700 // The actual linear scan register allocator
 701 class LinearScanWalker : public IntervalWalker {
 702   enum {
 703     any_reg = LinearScan::any_reg
 704   };
 705 
 706  private:
 707   int              _first_reg;       // the reg. number of the first phys. register
 708   int              _last_reg;        // the reg. nmber of the last phys. register
 709   int              _num_phys_regs;   // required by current interval
 710   bool             _adjacent_regs;   // have lo/hi words of phys. regs be adjacent
 711 
 712   int              _use_pos[LinearScan::nof_regs];
 713   int              _block_pos[LinearScan::nof_regs];
 714   IntervalList*    _spill_intervals[LinearScan::nof_regs];
 715 
 716   MoveResolver     _move_resolver;   // for ordering spill moves
 717 
 718   // accessors mapped to same functions in class LinearScan
 719   int         block_count() const      { return allocator()->block_count(); }
 720   BlockBegin* block_at(int idx) const  { return allocator()->block_at(idx); }
 721   BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
 722 
 723   void init_use_lists(bool only_process_use_pos);
 724   void exclude_from_use(int reg);
 725   void exclude_from_use(Interval* i);
 726   void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);
 727   void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);
 728   void set_block_pos(int reg, Interval* i, int block_pos);
 729   void set_block_pos(Interval* i, int block_pos);
 730 
 731   void free_exclude_active_fixed();
 732   void free_exclude_active_any();
 733   void free_collect_inactive_fixed(Interval* cur);
 734   void free_collect_inactive_any(Interval* cur);
 735   void free_collect_unhandled(IntervalKind kind, Interval* cur);
 736   void spill_exclude_active_fixed();
 737   void spill_block_unhandled_fixed(Interval* cur);
 738   void spill_block_inactive_fixed(Interval* cur);
 739   void spill_collect_active_any();
 740   void spill_collect_inactive_any(Interval* cur);
 741 
 742   void insert_move(int op_id, Interval* src_it, Interval* dst_it);
 743   int  find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos);
 744   int  find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization);
 745   void split_before_usage(Interval* it, int min_split_pos, int max_split_pos);
 746   void split_for_spilling(Interval* it);
 747   void split_stack_interval(Interval* it);
 748   void split_when_partial_register_available(Interval* it, int register_available_until);
 749   void split_and_spill_interval(Interval* it);
 750 
 751   int  find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
 752   int  find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
 753   bool alloc_free_reg(Interval* cur);
 754 
 755   int  find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
 756   int  find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
 757   void split_and_spill_intersecting_intervals(int reg, int regHi);
 758   void alloc_locked_reg(Interval* cur);
 759 
 760   bool no_allocation_possible(Interval* cur);
 761   void update_phys_reg_range(bool requires_cpu_register);
 762   void init_vars_for_alloc(Interval* cur);
 763   bool pd_init_regs_for_alloc(Interval* cur);
 764 
 765   void combine_spilled_intervals(Interval* cur);
 766   bool is_move(LIR_Op* op, Interval* from, Interval* to);
 767 
 768   bool activate_current();
 769 
 770  public:
 771   LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
 772 
 773   // must be called when all intervals are allocated
 774   void             finish_allocation()           { _move_resolver.resolve_and_append_moves(); }
 775 };
 776 
 777 
 778 
 779 /*
 780 When a block has more than one predecessor, and all predecessors end with
 781 the same sequence of move-instructions, than this moves can be placed once
 782 at the beginning of the block instead of multiple times in the predecessors.
 783 
 784 Similarly, when a block has more than one successor, then equal sequences of
 785 moves at the beginning of the successors can be placed once at the end of
 786 the block. But because the moves must be inserted before all branch
 787 instructions, this works only when there is exactly one conditional branch
 788 at the end of the block (because the moves must be inserted before all
 789 branches, but after all compares).
 790 
 791 This optimization affects all kind of moves (reg->reg, reg->stack and
 792 stack->reg). Because this optimization works best when a block contains only
 793 few moves, it has a huge impact on the number of blocks that are totally
 794 empty.
 795 */
 796 class EdgeMoveOptimizer : public StackObj {
 797  private:
 798   // the class maintains a list with all lir-instruction-list of the
 799   // successors (predecessors) and the current index into the lir-lists
 800   LIR_OpListStack _edge_instructions;
 801   intStack        _edge_instructions_idx;
 802 
 803   void init_instructions();
 804   void append_instructions(LIR_OpList* instructions, int instructions_idx);
 805   LIR_Op* instruction_at(int edge);
 806   void remove_cur_instruction(int edge, bool decrement_index);
 807 
 808   bool operations_different(LIR_Op* op1, LIR_Op* op2);
 809 
 810   void optimize_moves_at_block_end(BlockBegin* cur);
 811   void optimize_moves_at_block_begin(BlockBegin* cur);
 812 
 813   EdgeMoveOptimizer();
 814 
 815  public:
 816   static void optimize(BlockList* code);
 817 };
 818 
 819 
 820 
 821 class ControlFlowOptimizer : public StackObj {
 822  private:
 823   BlockList _original_preds;
 824 
 825   enum {
 826     ShortLoopSize = 5
 827   };
 828   void reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx);
 829   void reorder_short_loops(BlockList* code);
 830 
 831   bool can_delete_block(BlockBegin* cur);
 832   void substitute_branch_target(BlockBegin* cur, BlockBegin* target_from, BlockBegin* target_to);
 833   void delete_empty_blocks(BlockList* code);
 834 
 835   void delete_unnecessary_jumps(BlockList* code);
 836   void delete_jumps_to_return(BlockList* code);
 837 
 838   DEBUG_ONLY(void verify(BlockList* code);)
 839 
 840   ControlFlowOptimizer();
 841  public:
 842   static void optimize(BlockList* code);
 843 };
 844 
 845 
 846 #ifndef PRODUCT
 847 
 848 // Helper class for collecting statistics of LinearScan
 849 class LinearScanStatistic : public StackObj {
 850  public:
 851   enum Counter {
 852     // general counters
 853     counter_method,
 854     counter_fpu_method,
 855     counter_loop_method,
 856     counter_exception_method,
 857     counter_loop,
 858     counter_block,
 859     counter_loop_block,
 860     counter_exception_block,
 861     counter_interval,
 862     counter_fixed_interval,
 863     counter_range,
 864     counter_fixed_range,
 865     counter_use_pos,
 866     counter_fixed_use_pos,
 867     counter_spill_slots,
 868     blank_line_1,
 869 
 870     // counter for classes of lir instructions
 871     counter_instruction,
 872     counter_label,
 873     counter_entry,
 874     counter_return,
 875     counter_call,
 876     counter_move,
 877     counter_cmp,
 878     counter_cond_branch,
 879     counter_uncond_branch,
 880     counter_stub_branch,
 881     counter_alu,
 882     counter_alloc,
 883     counter_sync,
 884     counter_throw,
 885     counter_unwind,
 886     counter_typecheck,
 887     counter_fpu_stack,
 888     counter_misc_inst,
 889     counter_other_inst,
 890     blank_line_2,
 891 
 892     // counter for different types of moves
 893     counter_move_total,
 894     counter_move_reg_reg,
 895     counter_move_reg_stack,
 896     counter_move_stack_reg,
 897     counter_move_stack_stack,
 898     counter_move_reg_mem,
 899     counter_move_mem_reg,
 900     counter_move_const_any,
 901 
 902     number_of_counters,
 903     invalid_counter = -1
 904   };
 905 
 906  private:
 907   int _counters_sum[number_of_counters];
 908   int _counters_max[number_of_counters];
 909 
 910   void inc_counter(Counter idx, int value = 1) { _counters_sum[idx] += value; }
 911 
 912   const char* counter_name(int counter_idx);
 913   Counter base_counter(int counter_idx);
 914 
 915   void sum_up(LinearScanStatistic &method_statistic);
 916   void collect(LinearScan* allocator);
 917 
 918  public:
 919   LinearScanStatistic();
 920   void print(const char* title);
 921   static void compute(LinearScan* allocator, LinearScanStatistic &global_statistic);
 922 };
 923 
 924 
 925 // Helper class for collecting compilation time of LinearScan
 926 class LinearScanTimers : public StackObj {
 927  public:
 928   enum Timer {
 929     timer_do_nothing,
 930     timer_number_instructions,
 931     timer_compute_local_live_sets,
 932     timer_compute_global_live_sets,
 933     timer_build_intervals,
 934     timer_sort_intervals_before,
 935     timer_allocate_registers,
 936     timer_resolve_data_flow,
 937     timer_sort_intervals_after,
 938     timer_eliminate_spill_moves,
 939     timer_assign_reg_num,
 940     timer_allocate_fpu_stack,
 941     timer_optimize_lir,
 942 
 943     number_of_timers
 944   };
 945 
 946  private:
 947   elapsedTimer _timers[number_of_timers];
 948   const char*  timer_name(int idx);
 949 
 950  public:
 951   LinearScanTimers();
 952 
 953   void begin_method();                     // called for each method when register allocation starts
 954   void end_method(LinearScan* allocator);  // called for each method when register allocation completed
 955   void print(double total_time);           // called before termination of VM to print global summary
 956 
 957   elapsedTimer* timer(int idx) { return &(_timers[idx]); }
 958 };
 959 
 960 
 961 #endif // ifndef PRODUCT
 962 
 963 // Pick up platform-dependent implementation details
 964 #include CPU_HEADER(c1_LinearScan)
 965 
 966 #endif // SHARE_VM_C1_C1_LINEARSCAN_HPP