1 /*
   2  * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_c1_LinearScan.cpp.incl"
  27 
  28 
  29 #ifndef PRODUCT
  30 
  31   static LinearScanStatistic _stat_before_alloc;
  32   static LinearScanStatistic _stat_after_asign;
  33   static LinearScanStatistic _stat_final;
  34 
  35   static LinearScanTimers _total_timer;
  36 
  37   // helper macro for short definition of timer
  38   #define TIME_LINEAR_SCAN(timer_name)  TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose);
  39 
  40   // helper macro for short definition of trace-output inside code
  41   #define TRACE_LINEAR_SCAN(level, code)       \
  42     if (TraceLinearScanLevel >= level) {       \
  43       code;                                    \
  44     }
  45 
  46 #else
  47 
  48   #define TIME_LINEAR_SCAN(timer_name)
  49   #define TRACE_LINEAR_SCAN(level, code)
  50 
  51 #endif
  52 
  53 // Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
  54 #ifdef _LP64
  55 static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 1, -1};
  56 #else
  57 static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1};
  58 #endif
  59 
  60 
  61 // Implementation of LinearScan
  62 
  63 LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
  64  : _compilation(ir->compilation())
  65  , _ir(ir)
  66  , _gen(gen)
  67  , _frame_map(frame_map)
  68  , _num_virtual_regs(gen->max_virtual_register_number())
  69  , _has_fpu_registers(false)
  70  , _num_calls(-1)
  71  , _max_spills(0)
  72  , _unused_spill_slot(-1)
  73  , _intervals(0)   // initialized later with correct length
  74  , _new_intervals_from_allocation(new IntervalList())
  75  , _sorted_intervals(NULL)
  76  , _lir_ops(0)     // initialized later with correct length
  77  , _block_of_op(0) // initialized later with correct length
  78  , _has_info(0)
  79  , _has_call(0)
  80  , _scope_value_cache(0) // initialized later with correct length
  81  , _interval_in_loop(0, 0) // initialized later with correct length
  82  , _cached_blocks(*ir->linear_scan_order())
  83 #ifdef X86
  84  , _fpu_stack_allocator(NULL)
  85 #endif
  86 {
  87   // note: to use more than on instance of LinearScan at a time this function call has to
  88   //       be moved somewhere outside of this constructor:
  89   Interval::initialize();
  90 
  91   assert(this->ir() != NULL,          "check if valid");
  92   assert(this->compilation() != NULL, "check if valid");
  93   assert(this->gen() != NULL,         "check if valid");
  94   assert(this->frame_map() != NULL,   "check if valid");
  95 }
  96 
  97 
  98 // ********** functions for converting LIR-Operands to register numbers
  99 //
 100 // Emulate a flat register file comprising physical integer registers,
 101 // physical floating-point registers and virtual registers, in that order.
 102 // Virtual registers already have appropriate numbers, since V0 is
 103 // the number of physical registers.
 104 // Returns -1 for hi word if opr is a single word operand.
 105 //
 106 // Note: the inverse operation (calculating an operand for register numbers)
 107 //       is done in calc_operand_for_interval()
 108 
 109 int LinearScan::reg_num(LIR_Opr opr) {
 110   assert(opr->is_register(), "should not call this otherwise");
 111 
 112   if (opr->is_virtual_register()) {
 113     assert(opr->vreg_number() >= nof_regs, "found a virtual register with a fixed-register number");
 114     return opr->vreg_number();
 115   } else if (opr->is_single_cpu()) {
 116     return opr->cpu_regnr();
 117   } else if (opr->is_double_cpu()) {
 118     return opr->cpu_regnrLo();
 119 #ifdef X86
 120   } else if (opr->is_single_xmm()) {
 121     return opr->fpu_regnr() + pd_first_xmm_reg;
 122   } else if (opr->is_double_xmm()) {
 123     return opr->fpu_regnrLo() + pd_first_xmm_reg;
 124 #endif
 125   } else if (opr->is_single_fpu()) {
 126     return opr->fpu_regnr() + pd_first_fpu_reg;
 127   } else if (opr->is_double_fpu()) {
 128     return opr->fpu_regnrLo() + pd_first_fpu_reg;
 129   } else {
 130     ShouldNotReachHere();
 131     return -1;
 132   }
 133 }
 134 
 135 int LinearScan::reg_numHi(LIR_Opr opr) {
 136   assert(opr->is_register(), "should not call this otherwise");
 137 
 138   if (opr->is_virtual_register()) {
 139     return -1;
 140   } else if (opr->is_single_cpu()) {
 141     return -1;
 142   } else if (opr->is_double_cpu()) {
 143     return opr->cpu_regnrHi();
 144 #ifdef X86
 145   } else if (opr->is_single_xmm()) {
 146     return -1;
 147   } else if (opr->is_double_xmm()) {
 148     return -1;
 149 #endif
 150   } else if (opr->is_single_fpu()) {
 151     return -1;
 152   } else if (opr->is_double_fpu()) {
 153     return opr->fpu_regnrHi() + pd_first_fpu_reg;
 154   } else {
 155     ShouldNotReachHere();
 156     return -1;
 157   }
 158 }
 159 
 160 
 161 // ********** functions for classification of intervals
 162 
 163 bool LinearScan::is_precolored_interval(const Interval* i) {
 164   return i->reg_num() < LinearScan::nof_regs;
 165 }
 166 
 167 bool LinearScan::is_virtual_interval(const Interval* i) {
 168   return i->reg_num() >= LIR_OprDesc::vreg_base;
 169 }
 170 
 171 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
 172   return i->reg_num() < LinearScan::nof_cpu_regs;
 173 }
 174 
 175 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
 176   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
 177 }
 178 
 179 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
 180   return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
 181 }
 182 
 183 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
 184   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
 185 }
 186 
 187 bool LinearScan::is_in_fpu_register(const Interval* i) {
 188   // fixed intervals not needed for FPU stack allocation
 189   return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
 190 }
 191 
 192 bool LinearScan::is_oop_interval(const Interval* i) {
 193   // fixed intervals never contain oops
 194   return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
 195 }
 196 
 197 
 198 // ********** General helper functions
 199 
 200 // compute next unused stack index that can be used for spilling
 201 int LinearScan::allocate_spill_slot(bool double_word) {
 202   int spill_slot;
 203   if (double_word) {
 204     if ((_max_spills & 1) == 1) {
 205       // alignment of double-word values
 206       // the hole because of the alignment is filled with the next single-word value
 207       assert(_unused_spill_slot == -1, "wasting a spill slot");
 208       _unused_spill_slot = _max_spills;
 209       _max_spills++;
 210     }
 211     spill_slot = _max_spills;
 212     _max_spills += 2;
 213 
 214   } else if (_unused_spill_slot != -1) {
 215     // re-use hole that was the result of a previous double-word alignment
 216     spill_slot = _unused_spill_slot;
 217     _unused_spill_slot = -1;
 218 
 219   } else {
 220     spill_slot = _max_spills;
 221     _max_spills++;
 222   }
 223 
 224   int result = spill_slot + LinearScan::nof_regs + frame_map()->argcount();
 225 
 226   // the class OopMapValue uses only 11 bits for storing the name of the
 227   // oop location. So a stack slot bigger than 2^11 leads to an overflow
 228   // that is not reported in product builds. Prevent this by checking the
 229   // spill slot here (altough this value and the later used location name
 230   // are slightly different)
 231   if (result > 2000) {
 232     bailout("too many stack slots used");
 233   }
 234 
 235   return result;
 236 }
 237 
 238 void LinearScan::assign_spill_slot(Interval* it) {
 239   // assign the canonical spill slot of the parent (if a part of the interval
 240   // is already spilled) or allocate a new spill slot
 241   if (it->canonical_spill_slot() >= 0) {
 242     it->assign_reg(it->canonical_spill_slot());
 243   } else {
 244     int spill = allocate_spill_slot(type2spill_size[it->type()] == 2);
 245     it->set_canonical_spill_slot(spill);
 246     it->assign_reg(spill);
 247   }
 248 }
 249 
 250 void LinearScan::propagate_spill_slots() {
 251   if (!frame_map()->finalize_frame(max_spills())) {
 252     bailout("frame too large");
 253   }
 254 }
 255 
 256 // create a new interval with a predefined reg_num
 257 // (only used for parent intervals that are created during the building phase)
 258 Interval* LinearScan::create_interval(int reg_num) {
 259   assert(_intervals.at(reg_num) == NULL, "overwriting exisiting interval");
 260 
 261   Interval* interval = new Interval(reg_num);
 262   _intervals.at_put(reg_num, interval);
 263 
 264   // assign register number for precolored intervals
 265   if (reg_num < LIR_OprDesc::vreg_base) {
 266     interval->assign_reg(reg_num);
 267   }
 268   return interval;
 269 }
 270 
 271 // assign a new reg_num to the interval and append it to the list of intervals
 272 // (only used for child intervals that are created during register allocation)
 273 void LinearScan::append_interval(Interval* it) {
 274   it->set_reg_num(_intervals.length());
 275   _intervals.append(it);
 276   _new_intervals_from_allocation->append(it);
 277 }
 278 
 279 // copy the vreg-flags if an interval is split
 280 void LinearScan::copy_register_flags(Interval* from, Interval* to) {
 281   if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::byte_reg)) {
 282     gen()->set_vreg_flag(to->reg_num(), LIRGenerator::byte_reg);
 283   }
 284   if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::callee_saved)) {
 285     gen()->set_vreg_flag(to->reg_num(), LIRGenerator::callee_saved);
 286   }
 287 
 288   // Note: do not copy the must_start_in_memory flag because it is not necessary for child
 289   //       intervals (only the very beginning of the interval must be in memory)
 290 }
 291 
 292 
 293 // ********** spill move optimization
 294 // eliminate moves from register to stack if stack slot is known to be correct
 295 
 296 // called during building of intervals
 297 void LinearScan::change_spill_definition_pos(Interval* interval, int def_pos) {
 298   assert(interval->is_split_parent(), "can only be called for split parents");
 299 
 300   switch (interval->spill_state()) {
 301     case noDefinitionFound:
 302       assert(interval->spill_definition_pos() == -1, "must no be set before");
 303       interval->set_spill_definition_pos(def_pos);
 304       interval->set_spill_state(oneDefinitionFound);
 305       break;
 306 
 307     case oneDefinitionFound:
 308       assert(def_pos <= interval->spill_definition_pos(), "positions are processed in reverse order when intervals are created");
 309       if (def_pos < interval->spill_definition_pos() - 2) {
 310         // second definition found, so no spill optimization possible for this interval
 311         interval->set_spill_state(noOptimization);
 312       } else {
 313         // two consecutive definitions (because of two-operand LIR form)
 314         assert(block_of_op_with_id(def_pos) == block_of_op_with_id(interval->spill_definition_pos()), "block must be equal");
 315       }
 316       break;
 317 
 318     case noOptimization:
 319       // nothing to do
 320       break;
 321 
 322     default:
 323       assert(false, "other states not allowed at this time");
 324   }
 325 }
 326 
 327 // called during register allocation
 328 void LinearScan::change_spill_state(Interval* interval, int spill_pos) {
 329   switch (interval->spill_state()) {
 330     case oneDefinitionFound: {
 331       int def_loop_depth = block_of_op_with_id(interval->spill_definition_pos())->loop_depth();
 332       int spill_loop_depth = block_of_op_with_id(spill_pos)->loop_depth();
 333 
 334       if (def_loop_depth < spill_loop_depth) {
 335         // the loop depth of the spilling position is higher then the loop depth
 336         // at the definition of the interval -> move write to memory out of loop
 337         // by storing at definitin of the interval
 338         interval->set_spill_state(storeAtDefinition);
 339       } else {
 340         // the interval is currently spilled only once, so for now there is no
 341         // reason to store the interval at the definition
 342         interval->set_spill_state(oneMoveInserted);
 343       }
 344       break;
 345     }
 346 
 347     case oneMoveInserted: {
 348       // the interval is spilled more then once, so it is better to store it to
 349       // memory at the definition
 350       interval->set_spill_state(storeAtDefinition);
 351       break;
 352     }
 353 
 354     case storeAtDefinition:
 355     case startInMemory:
 356     case noOptimization:
 357     case noDefinitionFound:
 358       // nothing to do
 359       break;
 360 
 361     default:
 362       assert(false, "other states not allowed at this time");
 363   }
 364 }
 365 
 366 
 367 bool LinearScan::must_store_at_definition(const Interval* i) {
 368   return i->is_split_parent() && i->spill_state() == storeAtDefinition;
 369 }
 370 
 371 // called once before asignment of register numbers
 372 void LinearScan::eliminate_spill_moves() {
 373   TIME_LINEAR_SCAN(timer_eliminate_spill_moves);
 374   TRACE_LINEAR_SCAN(3, tty->print_cr("***** Eliminating unnecessary spill moves"));
 375 
 376   // collect all intervals that must be stored after their definion.
 377   // the list is sorted by Interval::spill_definition_pos
 378   Interval* interval;
 379   Interval* temp_list;
 380   create_unhandled_lists(&interval, &temp_list, must_store_at_definition, NULL);
 381 
 382 #ifdef ASSERT
 383   Interval* prev = NULL;
 384   Interval* temp = interval;
 385   while (temp != Interval::end()) {
 386     assert(temp->spill_definition_pos() > 0, "invalid spill definition pos");
 387     if (prev != NULL) {
 388       assert(temp->from() >= prev->from(), "intervals not sorted");
 389       assert(temp->spill_definition_pos() >= prev->spill_definition_pos(), "when intervals are sorted by from, then they must also be sorted by spill_definition_pos");
 390     }
 391 
 392     assert(temp->canonical_spill_slot() >= LinearScan::nof_regs, "interval has no spill slot assigned");
 393     assert(temp->spill_definition_pos() >= temp->from(), "invalid order");
 394     assert(temp->spill_definition_pos() <= temp->from() + 2, "only intervals defined once at their start-pos can be optimized");
 395 
 396     TRACE_LINEAR_SCAN(4, tty->print_cr("interval %d (from %d to %d) must be stored at %d", temp->reg_num(), temp->from(), temp->to(), temp->spill_definition_pos()));
 397 
 398     temp = temp->next();
 399   }
 400 #endif
 401 
 402   LIR_InsertionBuffer insertion_buffer;
 403   int num_blocks = block_count();
 404   for (int i = 0; i < num_blocks; i++) {
 405     BlockBegin* block = block_at(i);
 406     LIR_OpList* instructions = block->lir()->instructions_list();
 407     int         num_inst = instructions->length();
 408     bool        has_new = false;
 409 
 410     // iterate all instructions of the block. skip the first because it is always a label
 411     for (int j = 1; j < num_inst; j++) {
 412       LIR_Op* op = instructions->at(j);
 413       int op_id = op->id();
 414 
 415       if (op_id == -1) {
 416         // remove move from register to stack if the stack slot is guaranteed to be correct.
 417         // only moves that have been inserted by LinearScan can be removed.
 418         assert(op->code() == lir_move, "only moves can have a op_id of -1");
 419         assert(op->as_Op1() != NULL, "move must be LIR_Op1");
 420         assert(op->as_Op1()->result_opr()->is_virtual(), "LinearScan inserts only moves to virtual registers");
 421 
 422         LIR_Op1* op1 = (LIR_Op1*)op;
 423         Interval* interval = interval_at(op1->result_opr()->vreg_number());
 424 
 425         if (interval->assigned_reg() >= LinearScan::nof_regs && interval->always_in_memory()) {
 426           // move target is a stack slot that is always correct, so eliminate instruction
 427           TRACE_LINEAR_SCAN(4, tty->print_cr("eliminating move from interval %d to %d", op1->in_opr()->vreg_number(), op1->result_opr()->vreg_number()));
 428           instructions->at_put(j, NULL); // NULL-instructions are deleted by assign_reg_num
 429         }
 430 
 431       } else {
 432         // insert move from register to stack just after the beginning of the interval
 433         assert(interval == Interval::end() || interval->spill_definition_pos() >= op_id, "invalid order");
 434         assert(interval == Interval::end() || (interval->is_split_parent() && interval->spill_state() == storeAtDefinition), "invalid interval");
 435 
 436         while (interval != Interval::end() && interval->spill_definition_pos() == op_id) {
 437           if (!has_new) {
 438             // prepare insertion buffer (appended when all instructions of the block are processed)
 439             insertion_buffer.init(block->lir());
 440             has_new = true;
 441           }
 442 
 443           LIR_Opr from_opr = operand_for_interval(interval);
 444           LIR_Opr to_opr = canonical_spill_opr(interval);
 445           assert(from_opr->is_fixed_cpu() || from_opr->is_fixed_fpu(), "from operand must be a register");
 446           assert(to_opr->is_stack(), "to operand must be a stack slot");
 447 
 448           insertion_buffer.move(j, from_opr, to_opr);
 449           TRACE_LINEAR_SCAN(4, tty->print_cr("inserting move after definition of interval %d to stack slot %d at op_id %d", interval->reg_num(), interval->canonical_spill_slot() - LinearScan::nof_regs, op_id));
 450 
 451           interval = interval->next();
 452         }
 453       }
 454     } // end of instruction iteration
 455 
 456     if (has_new) {
 457       block->lir()->append(&insertion_buffer);
 458     }
 459   } // end of block iteration
 460 
 461   assert(interval == Interval::end(), "missed an interval");
 462 }
 463 
 464 
 465 // ********** Phase 1: number all instructions in all blocks
 466 // Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan.
 467 
 468 void LinearScan::number_instructions() {
 469   {
 470     // dummy-timer to measure the cost of the timer itself
 471     // (this time is then subtracted from all other timers to get the real value)
 472     TIME_LINEAR_SCAN(timer_do_nothing);
 473   }
 474   TIME_LINEAR_SCAN(timer_number_instructions);
 475 
 476   // Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node.
 477   int num_blocks = block_count();
 478   int num_instructions = 0;
 479   int i;
 480   for (i = 0; i < num_blocks; i++) {
 481     num_instructions += block_at(i)->lir()->instructions_list()->length();
 482   }
 483 
 484   // initialize with correct length
 485   _lir_ops = LIR_OpArray(num_instructions);
 486   _block_of_op = BlockBeginArray(num_instructions);
 487 
 488   int op_id = 0;
 489   int idx = 0;
 490 
 491   for (i = 0; i < num_blocks; i++) {
 492     BlockBegin* block = block_at(i);
 493     block->set_first_lir_instruction_id(op_id);
 494     LIR_OpList* instructions = block->lir()->instructions_list();
 495 
 496     int num_inst = instructions->length();
 497     for (int j = 0; j < num_inst; j++) {
 498       LIR_Op* op = instructions->at(j);
 499       op->set_id(op_id);
 500 
 501       _lir_ops.at_put(idx, op);
 502       _block_of_op.at_put(idx, block);
 503       assert(lir_op_with_id(op_id) == op, "must match");
 504 
 505       idx++;
 506       op_id += 2; // numbering of lir_ops by two
 507     }
 508     block->set_last_lir_instruction_id(op_id - 2);
 509   }
 510   assert(idx == num_instructions, "must match");
 511   assert(idx * 2 == op_id, "must match");
 512 
 513   _has_call = BitMap(num_instructions); _has_call.clear();
 514   _has_info = BitMap(num_instructions); _has_info.clear();
 515 }
 516 
 517 
 518 // ********** Phase 2: compute local live sets separately for each block
 519 // (sets live_gen and live_kill for each block)
 520 
 521 void LinearScan::set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill) {
 522   LIR_Opr opr = value->operand();
 523   Constant* con = value->as_Constant();
 524 
 525   // check some asumptions about debug information
 526   assert(!value->type()->is_illegal(), "if this local is used by the interpreter it shouldn't be of indeterminate type");
 527   assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands");
 528   assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
 529 
 530   if ((con == NULL || con->is_pinned()) && opr->is_register()) {
 531     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 532     int reg = opr->vreg_number();
 533     if (!live_kill.at(reg)) {
 534       live_gen.set_bit(reg);
 535       TRACE_LINEAR_SCAN(4, tty->print_cr("  Setting live_gen for value %c%d, LIR op_id %d, register number %d", value->type()->tchar(), value->id(), op->id(), reg));
 536     }
 537   }
 538 }
 539 
 540 
 541 void LinearScan::compute_local_live_sets() {
 542   TIME_LINEAR_SCAN(timer_compute_local_live_sets);
 543 
 544   int  num_blocks = block_count();
 545   int  live_size = live_set_size();
 546   bool local_has_fpu_registers = false;
 547   int  local_num_calls = 0;
 548   LIR_OpVisitState visitor;
 549 
 550   BitMap2D local_interval_in_loop = BitMap2D(_num_virtual_regs, num_loops());
 551   local_interval_in_loop.clear();
 552 
 553   // iterate all blocks
 554   for (int i = 0; i < num_blocks; i++) {
 555     BlockBegin* block = block_at(i);
 556 
 557     BitMap live_gen(live_size);  live_gen.clear();
 558     BitMap live_kill(live_size); live_kill.clear();
 559 
 560     if (block->is_set(BlockBegin::exception_entry_flag)) {
 561       // Phi functions at the begin of an exception handler are
 562       // implicitly defined (= killed) at the beginning of the block.
 563       for_each_phi_fun(block, phi,
 564         live_kill.set_bit(phi->operand()->vreg_number())
 565       );
 566     }
 567 
 568     LIR_OpList* instructions = block->lir()->instructions_list();
 569     int num_inst = instructions->length();
 570 
 571     // iterate all instructions of the block. skip the first because it is always a label
 572     assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
 573     for (int j = 1; j < num_inst; j++) {
 574       LIR_Op* op = instructions->at(j);
 575 
 576       // visit operation to collect all operands
 577       visitor.visit(op);
 578 
 579       if (visitor.has_call()) {
 580         _has_call.set_bit(op->id() >> 1);
 581         local_num_calls++;
 582       }
 583       if (visitor.info_count() > 0) {
 584         _has_info.set_bit(op->id() >> 1);
 585       }
 586 
 587       // iterate input operands of instruction
 588       int k, n, reg;
 589       n = visitor.opr_count(LIR_OpVisitState::inputMode);
 590       for (k = 0; k < n; k++) {
 591         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k);
 592         assert(opr->is_register(), "visitor should only return register operands");
 593 
 594         if (opr->is_virtual_register()) {
 595           assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 596           reg = opr->vreg_number();
 597           if (!live_kill.at(reg)) {
 598             live_gen.set_bit(reg);
 599             TRACE_LINEAR_SCAN(4, tty->print_cr("  Setting live_gen for register %d at instruction %d", reg, op->id()));
 600           }
 601           if (block->loop_index() >= 0) {
 602             local_interval_in_loop.set_bit(reg, block->loop_index());
 603           }
 604           local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
 605         }
 606 
 607 #ifdef ASSERT
 608         // fixed intervals are never live at block boundaries, so
 609         // they need not be processed in live sets.
 610         // this is checked by these assertions to be sure about it.
 611         // the entry block may have incoming values in registers, which is ok.
 612         if (!opr->is_virtual_register() && block != ir()->start()) {
 613           reg = reg_num(opr);
 614           if (is_processed_reg_num(reg)) {
 615             assert(live_kill.at(reg), "using fixed register that is not defined in this block");
 616           }
 617           reg = reg_numHi(opr);
 618           if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
 619             assert(live_kill.at(reg), "using fixed register that is not defined in this block");
 620           }
 621         }
 622 #endif
 623       }
 624 
 625       // Add uses of live locals from interpreter's point of view for proper debug information generation
 626       n = visitor.info_count();
 627       for (k = 0; k < n; k++) {
 628         CodeEmitInfo* info = visitor.info_at(k);
 629         ValueStack* stack = info->stack();
 630         for_each_state_value(stack, value,
 631           set_live_gen_kill(value, op, live_gen, live_kill)
 632         );
 633       }
 634 
 635       // iterate temp operands of instruction
 636       n = visitor.opr_count(LIR_OpVisitState::tempMode);
 637       for (k = 0; k < n; k++) {
 638         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k);
 639         assert(opr->is_register(), "visitor should only return register operands");
 640 
 641         if (opr->is_virtual_register()) {
 642           assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 643           reg = opr->vreg_number();
 644           live_kill.set_bit(reg);
 645           if (block->loop_index() >= 0) {
 646             local_interval_in_loop.set_bit(reg, block->loop_index());
 647           }
 648           local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
 649         }
 650 
 651 #ifdef ASSERT
 652         // fixed intervals are never live at block boundaries, so
 653         // they need not be processed in live sets
 654         // process them only in debug mode so that this can be checked
 655         if (!opr->is_virtual_register()) {
 656           reg = reg_num(opr);
 657           if (is_processed_reg_num(reg)) {
 658             live_kill.set_bit(reg_num(opr));
 659           }
 660           reg = reg_numHi(opr);
 661           if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
 662             live_kill.set_bit(reg);
 663           }
 664         }
 665 #endif
 666       }
 667 
 668       // iterate output operands of instruction
 669       n = visitor.opr_count(LIR_OpVisitState::outputMode);
 670       for (k = 0; k < n; k++) {
 671         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k);
 672         assert(opr->is_register(), "visitor should only return register operands");
 673 
 674         if (opr->is_virtual_register()) {
 675           assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 676           reg = opr->vreg_number();
 677           live_kill.set_bit(reg);
 678           if (block->loop_index() >= 0) {
 679             local_interval_in_loop.set_bit(reg, block->loop_index());
 680           }
 681           local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
 682         }
 683 
 684 #ifdef ASSERT
 685         // fixed intervals are never live at block boundaries, so
 686         // they need not be processed in live sets
 687         // process them only in debug mode so that this can be checked
 688         if (!opr->is_virtual_register()) {
 689           reg = reg_num(opr);
 690           if (is_processed_reg_num(reg)) {
 691             live_kill.set_bit(reg_num(opr));
 692           }
 693           reg = reg_numHi(opr);
 694           if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
 695             live_kill.set_bit(reg);
 696           }
 697         }
 698 #endif
 699       }
 700     } // end of instruction iteration
 701 
 702     block->set_live_gen (live_gen);
 703     block->set_live_kill(live_kill);
 704     block->set_live_in  (BitMap(live_size)); block->live_in().clear();
 705     block->set_live_out (BitMap(live_size)); block->live_out().clear();
 706 
 707     TRACE_LINEAR_SCAN(4, tty->print("live_gen  B%d ", block->block_id()); print_bitmap(block->live_gen()));
 708     TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill()));
 709   } // end of block iteration
 710 
 711   // propagate local calculated information into LinearScan object
 712   _has_fpu_registers = local_has_fpu_registers;
 713   compilation()->set_has_fpu_code(local_has_fpu_registers);
 714 
 715   _num_calls = local_num_calls;
 716   _interval_in_loop = local_interval_in_loop;
 717 }
 718 
 719 
 720 // ********** Phase 3: perform a backward dataflow analysis to compute global live sets
 721 // (sets live_in and live_out for each block)
 722 
 723 void LinearScan::compute_global_live_sets() {
 724   TIME_LINEAR_SCAN(timer_compute_global_live_sets);
 725 
 726   int  num_blocks = block_count();
 727   bool change_occurred;
 728   bool change_occurred_in_block;
 729   int  iteration_count = 0;
 730   BitMap live_out(live_set_size()); live_out.clear(); // scratch set for calculations
 731 
 732   // Perform a backward dataflow analysis to compute live_out and live_in for each block.
 733   // The loop is executed until a fixpoint is reached (no changes in an iteration)
 734   // Exception handlers must be processed because not all live values are
 735   // present in the state array, e.g. because of global value numbering
 736   do {
 737     change_occurred = false;
 738 
 739     // iterate all blocks in reverse order
 740     for (int i = num_blocks - 1; i >= 0; i--) {
 741       BlockBegin* block = block_at(i);
 742 
 743       change_occurred_in_block = false;
 744 
 745       // live_out(block) is the union of live_in(sux), for successors sux of block
 746       int n = block->number_of_sux();
 747       int e = block->number_of_exception_handlers();
 748       if (n + e > 0) {
 749         // block has successors
 750         if (n > 0) {
 751           live_out.set_from(block->sux_at(0)->live_in());
 752           for (int j = 1; j < n; j++) {
 753             live_out.set_union(block->sux_at(j)->live_in());
 754           }
 755         } else {
 756           live_out.clear();
 757         }
 758         for (int j = 0; j < e; j++) {
 759           live_out.set_union(block->exception_handler_at(j)->live_in());
 760         }
 761 
 762         if (!block->live_out().is_same(live_out)) {
 763           // A change occurred.  Swap the old and new live out sets to avoid copying.
 764           BitMap temp = block->live_out();
 765           block->set_live_out(live_out);
 766           live_out = temp;
 767 
 768           change_occurred = true;
 769           change_occurred_in_block = true;
 770         }
 771       }
 772 
 773       if (iteration_count == 0 || change_occurred_in_block) {
 774         // live_in(block) is the union of live_gen(block) with (live_out(block) & !live_kill(block))
 775         // note: live_in has to be computed only in first iteration or if live_out has changed!
 776         BitMap live_in = block->live_in();
 777         live_in.set_from(block->live_out());
 778         live_in.set_difference(block->live_kill());
 779         live_in.set_union(block->live_gen());
 780       }
 781 
 782 #ifndef PRODUCT
 783       if (TraceLinearScanLevel >= 4) {
 784         char c = ' ';
 785         if (iteration_count == 0 || change_occurred_in_block) {
 786           c = '*';
 787         }
 788         tty->print("(%d) live_in%c  B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_in());
 789         tty->print("(%d) live_out%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_out());
 790       }
 791 #endif
 792     }
 793     iteration_count++;
 794 
 795     if (change_occurred && iteration_count > 50) {
 796       BAILOUT("too many iterations in compute_global_live_sets");
 797     }
 798   } while (change_occurred);
 799 
 800 
 801 #ifdef ASSERT
 802   // check that fixed intervals are not live at block boundaries
 803   // (live set must be empty at fixed intervals)
 804   for (int i = 0; i < num_blocks; i++) {
 805     BlockBegin* block = block_at(i);
 806     for (int j = 0; j < LIR_OprDesc::vreg_base; j++) {
 807       assert(block->live_in().at(j)  == false, "live_in  set of fixed register must be empty");
 808       assert(block->live_out().at(j) == false, "live_out set of fixed register must be empty");
 809       assert(block->live_gen().at(j) == false, "live_gen set of fixed register must be empty");
 810     }
 811   }
 812 #endif
 813 
 814   // check that the live_in set of the first block is empty
 815   BitMap live_in_args(ir()->start()->live_in().size());
 816   live_in_args.clear();
 817   if (!ir()->start()->live_in().is_same(live_in_args)) {
 818 #ifdef ASSERT
 819     tty->print_cr("Error: live_in set of first block must be empty (when this fails, virtual registers are used before they are defined)");
 820     tty->print_cr("affected registers:");
 821     print_bitmap(ir()->start()->live_in());
 822 
 823     // print some additional information to simplify debugging
 824     for (unsigned int i = 0; i < ir()->start()->live_in().size(); i++) {
 825       if (ir()->start()->live_in().at(i)) {
 826         Instruction* instr = gen()->instruction_for_vreg(i);
 827         tty->print_cr("* vreg %d (HIR instruction %c%d)", i, instr == NULL ? ' ' : instr->type()->tchar(), instr == NULL ? 0 : instr->id());
 828 
 829         for (int j = 0; j < num_blocks; j++) {
 830           BlockBegin* block = block_at(j);
 831           if (block->live_gen().at(i)) {
 832             tty->print_cr("  used in block B%d", block->block_id());
 833           }
 834           if (block->live_kill().at(i)) {
 835             tty->print_cr("  defined in block B%d", block->block_id());
 836           }
 837         }
 838       }
 839     }
 840 
 841 #endif
 842     // when this fails, virtual registers are used before they are defined.
 843     assert(false, "live_in set of first block must be empty");
 844     // bailout of if this occurs in product mode.
 845     bailout("live_in set of first block not empty");
 846   }
 847 }
 848 
 849 
 850 // ********** Phase 4: build intervals
 851 // (fills the list _intervals)
 852 
 853 void LinearScan::add_use(Value value, int from, int to, IntervalUseKind use_kind) {
 854   assert(!value->type()->is_illegal(), "if this value is used by the interpreter it shouldn't be of indeterminate type");
 855   LIR_Opr opr = value->operand();
 856   Constant* con = value->as_Constant();
 857 
 858   if ((con == NULL || con->is_pinned()) && opr->is_register()) {
 859     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 860     add_use(opr, from, to, use_kind);
 861   }
 862 }
 863 
 864 
 865 void LinearScan::add_def(LIR_Opr opr, int def_pos, IntervalUseKind use_kind) {
 866   TRACE_LINEAR_SCAN(2, tty->print(" def "); opr->print(tty); tty->print_cr(" def_pos %d (%d)", def_pos, use_kind));
 867   assert(opr->is_register(), "should not be called otherwise");
 868 
 869   if (opr->is_virtual_register()) {
 870     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 871     add_def(opr->vreg_number(), def_pos, use_kind, opr->type_register());
 872 
 873   } else {
 874     int reg = reg_num(opr);
 875     if (is_processed_reg_num(reg)) {
 876       add_def(reg, def_pos, use_kind, opr->type_register());
 877     }
 878     reg = reg_numHi(opr);
 879     if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
 880       add_def(reg, def_pos, use_kind, opr->type_register());
 881     }
 882   }
 883 }
 884 
 885 void LinearScan::add_use(LIR_Opr opr, int from, int to, IntervalUseKind use_kind) {
 886   TRACE_LINEAR_SCAN(2, tty->print(" use "); opr->print(tty); tty->print_cr(" from %d to %d (%d)", from, to, use_kind));
 887   assert(opr->is_register(), "should not be called otherwise");
 888 
 889   if (opr->is_virtual_register()) {
 890     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 891     add_use(opr->vreg_number(), from, to, use_kind, opr->type_register());
 892 
 893   } else {
 894     int reg = reg_num(opr);
 895     if (is_processed_reg_num(reg)) {
 896       add_use(reg, from, to, use_kind, opr->type_register());
 897     }
 898     reg = reg_numHi(opr);
 899     if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
 900       add_use(reg, from, to, use_kind, opr->type_register());
 901     }
 902   }
 903 }
 904 
 905 void LinearScan::add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind) {
 906   TRACE_LINEAR_SCAN(2, tty->print(" temp "); opr->print(tty); tty->print_cr(" temp_pos %d (%d)", temp_pos, use_kind));
 907   assert(opr->is_register(), "should not be called otherwise");
 908 
 909   if (opr->is_virtual_register()) {
 910     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
 911     add_temp(opr->vreg_number(), temp_pos, use_kind, opr->type_register());
 912 
 913   } else {
 914     int reg = reg_num(opr);
 915     if (is_processed_reg_num(reg)) {
 916       add_temp(reg, temp_pos, use_kind, opr->type_register());
 917     }
 918     reg = reg_numHi(opr);
 919     if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
 920       add_temp(reg, temp_pos, use_kind, opr->type_register());
 921     }
 922   }
 923 }
 924 
 925 
 926 void LinearScan::add_def(int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type) {
 927   Interval* interval = interval_at(reg_num);
 928   if (interval != NULL) {
 929     assert(interval->reg_num() == reg_num, "wrong interval");
 930 
 931     if (type != T_ILLEGAL) {
 932       interval->set_type(type);
 933     }
 934 
 935     Range* r = interval->first();
 936     if (r->from() <= def_pos) {
 937       // Update the starting point (when a range is first created for a use, its
 938       // start is the beginning of the current block until a def is encountered.)
 939       r->set_from(def_pos);
 940       interval->add_use_pos(def_pos, use_kind);
 941 
 942     } else {
 943       // Dead value - make vacuous interval
 944       // also add use_kind for dead intervals
 945       interval->add_range(def_pos, def_pos + 1);
 946       interval->add_use_pos(def_pos, use_kind);
 947       TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: def of reg %d at %d occurs without use", reg_num, def_pos));
 948     }
 949 
 950   } else {
 951     // Dead value - make vacuous interval
 952     // also add use_kind for dead intervals
 953     interval = create_interval(reg_num);
 954     if (type != T_ILLEGAL) {
 955       interval->set_type(type);
 956     }
 957 
 958     interval->add_range(def_pos, def_pos + 1);
 959     interval->add_use_pos(def_pos, use_kind);
 960     TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: dead value %d at %d in live intervals", reg_num, def_pos));
 961   }
 962 
 963   change_spill_definition_pos(interval, def_pos);
 964   if (use_kind == noUse && interval->spill_state() <= startInMemory) {
 965         // detection of method-parameters and roundfp-results
 966         // TODO: move this directly to position where use-kind is computed
 967     interval->set_spill_state(startInMemory);
 968   }
 969 }
 970 
 971 void LinearScan::add_use(int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type) {
 972   Interval* interval = interval_at(reg_num);
 973   if (interval == NULL) {
 974     interval = create_interval(reg_num);
 975   }
 976   assert(interval->reg_num() == reg_num, "wrong interval");
 977 
 978   if (type != T_ILLEGAL) {
 979     interval->set_type(type);
 980   }
 981 
 982   interval->add_range(from, to);
 983   interval->add_use_pos(to, use_kind);
 984 }
 985 
 986 void LinearScan::add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type) {
 987   Interval* interval = interval_at(reg_num);
 988   if (interval == NULL) {
 989     interval = create_interval(reg_num);
 990   }
 991   assert(interval->reg_num() == reg_num, "wrong interval");
 992 
 993   if (type != T_ILLEGAL) {
 994     interval->set_type(type);
 995   }
 996 
 997   interval->add_range(temp_pos, temp_pos + 1);
 998   interval->add_use_pos(temp_pos, use_kind);
 999 }
1000 
1001 
1002 // the results of this functions are used for optimizing spilling and reloading
1003 // if the functions return shouldHaveRegister and the interval is spilled,
1004 // it is not reloaded to a register.
1005 IntervalUseKind LinearScan::use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr) {
1006   if (op->code() == lir_move) {
1007     assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1");
1008     LIR_Op1* move = (LIR_Op1*)op;
1009     LIR_Opr res = move->result_opr();
1010     bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory);
1011 
1012     if (result_in_memory) {
1013       // Begin of an interval with must_start_in_memory set.
1014       // This interval will always get a stack slot first, so return noUse.
1015       return noUse;
1016 
1017     } else if (move->in_opr()->is_stack()) {
1018       // method argument (condition must be equal to handle_method_arguments)
1019       return noUse;
1020 
1021     } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
1022       // Move from register to register
1023       if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
1024         // special handling of phi-function moves inside osr-entry blocks
1025         // input operand must have a register instead of output operand (leads to better register allocation)
1026         return shouldHaveRegister;
1027       }
1028     }
1029   }
1030 
1031   if (opr->is_virtual() &&
1032       gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::must_start_in_memory)) {
1033     // result is a stack-slot, so prevent immediate reloading
1034     return noUse;
1035   }
1036 
1037   // all other operands require a register
1038   return mustHaveRegister;
1039 }
1040 
1041 IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
1042   if (op->code() == lir_move) {
1043     assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1");
1044     LIR_Op1* move = (LIR_Op1*)op;
1045     LIR_Opr res = move->result_opr();
1046     bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory);
1047 
1048     if (result_in_memory) {
1049       // Move to an interval with must_start_in_memory set.
1050       // To avoid moves from stack to stack (not allowed) force the input operand to a register
1051       return mustHaveRegister;
1052 
1053     } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
1054       // Move from register to register
1055       if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
1056         // special handling of phi-function moves inside osr-entry blocks
1057         // input operand must have a register instead of output operand (leads to better register allocation)
1058         return mustHaveRegister;
1059       }
1060 
1061       // The input operand is not forced to a register (moves from stack to register are allowed),
1062       // but it is faster if the input operand is in a register
1063       return shouldHaveRegister;
1064     }
1065   }
1066 
1067 
1068 #ifdef X86
1069   if (op->code() == lir_cmove) {
1070     // conditional moves can handle stack operands
1071     assert(op->result_opr()->is_register(), "result must always be in a register");
1072     return shouldHaveRegister;
1073   }
1074 
1075   // optimizations for second input operand of arithmehtic operations on Intel
1076   // this operand is allowed to be on the stack in some cases
1077   BasicType opr_type = opr->type_register();
1078   if (opr_type == T_FLOAT || opr_type == T_DOUBLE) {
1079     if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2) {
1080       // SSE float instruction (T_DOUBLE only supported with SSE2)
1081       switch (op->code()) {
1082         case lir_cmp:
1083         case lir_add:
1084         case lir_sub:
1085         case lir_mul:
1086         case lir_div:
1087         {
1088           assert(op->as_Op2() != NULL, "must be LIR_Op2");
1089           LIR_Op2* op2 = (LIR_Op2*)op;
1090           if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
1091             assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
1092             return shouldHaveRegister;
1093           }
1094         }
1095       }
1096     } else {
1097       // FPU stack float instruction
1098       switch (op->code()) {
1099         case lir_add:
1100         case lir_sub:
1101         case lir_mul:
1102         case lir_div:
1103         {
1104           assert(op->as_Op2() != NULL, "must be LIR_Op2");
1105           LIR_Op2* op2 = (LIR_Op2*)op;
1106           if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
1107             assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
1108             return shouldHaveRegister;
1109           }
1110         }
1111       }
1112     }
1113 
1114   } else if (opr_type != T_LONG) {
1115     // integer instruction (note: long operands must always be in register)
1116     switch (op->code()) {
1117       case lir_cmp:
1118       case lir_add:
1119       case lir_sub:
1120       case lir_logic_and:
1121       case lir_logic_or:
1122       case lir_logic_xor:
1123       {
1124         assert(op->as_Op2() != NULL, "must be LIR_Op2");
1125         LIR_Op2* op2 = (LIR_Op2*)op;
1126         if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
1127           assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
1128           return shouldHaveRegister;
1129         }
1130       }
1131     }
1132   }
1133 #endif // X86
1134 
1135   // all other operands require a register
1136   return mustHaveRegister;
1137 }
1138 
1139 
1140 void LinearScan::handle_method_arguments(LIR_Op* op) {
1141   // special handling for method arguments (moves from stack to virtual register):
1142   // the interval gets no register assigned, but the stack slot.
1143   // it is split before the first use by the register allocator.
1144 
1145   if (op->code() == lir_move) {
1146     assert(op->as_Op1() != NULL, "must be LIR_Op1");
1147     LIR_Op1* move = (LIR_Op1*)op;
1148 
1149     if (move->in_opr()->is_stack()) {
1150 #ifdef ASSERT
1151       int arg_size = compilation()->method()->arg_size();
1152       LIR_Opr o = move->in_opr();
1153       if (o->is_single_stack()) {
1154         assert(o->single_stack_ix() >= 0 && o->single_stack_ix() < arg_size, "out of range");
1155       } else if (o->is_double_stack()) {
1156         assert(o->double_stack_ix() >= 0 && o->double_stack_ix() < arg_size, "out of range");
1157       } else {
1158         ShouldNotReachHere();
1159       }
1160 
1161       assert(move->id() > 0, "invalid id");
1162       assert(block_of_op_with_id(move->id())->number_of_preds() == 0, "move from stack must be in first block");
1163       assert(move->result_opr()->is_virtual(), "result of move must be a virtual register");
1164 
1165       TRACE_LINEAR_SCAN(4, tty->print_cr("found move from stack slot %d to vreg %d", o->is_single_stack() ? o->single_stack_ix() : o->double_stack_ix(), reg_num(move->result_opr())));
1166 #endif
1167 
1168       Interval* interval = interval_at(reg_num(move->result_opr()));
1169 
1170       int stack_slot = LinearScan::nof_regs + (move->in_opr()->is_single_stack() ? move->in_opr()->single_stack_ix() : move->in_opr()->double_stack_ix());
1171       interval->set_canonical_spill_slot(stack_slot);
1172       interval->assign_reg(stack_slot);
1173     }
1174   }
1175 }
1176 
1177 void LinearScan::handle_doubleword_moves(LIR_Op* op) {
1178   // special handling for doubleword move from memory to register:
1179   // in this case the registers of the input address and the result
1180   // registers must not overlap -> add a temp range for the input registers
1181   if (op->code() == lir_move) {
1182     assert(op->as_Op1() != NULL, "must be LIR_Op1");
1183     LIR_Op1* move = (LIR_Op1*)op;
1184 
1185     if (move->result_opr()->is_double_cpu() && move->in_opr()->is_pointer()) {
1186       LIR_Address* address = move->in_opr()->as_address_ptr();
1187       if (address != NULL) {
1188         if (address->base()->is_valid()) {
1189           add_temp(address->base(), op->id(), noUse);
1190         }
1191         if (address->index()->is_valid()) {
1192           add_temp(address->index(), op->id(), noUse);
1193         }
1194       }
1195     }
1196   }
1197 }
1198 
1199 void LinearScan::add_register_hints(LIR_Op* op) {
1200   switch (op->code()) {
1201     case lir_move:      // fall through
1202     case lir_convert: {
1203       assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1");
1204       LIR_Op1* move = (LIR_Op1*)op;
1205 
1206       LIR_Opr move_from = move->in_opr();
1207       LIR_Opr move_to = move->result_opr();
1208 
1209       if (move_to->is_register() && move_from->is_register()) {
1210         Interval* from = interval_at(reg_num(move_from));
1211         Interval* to = interval_at(reg_num(move_to));
1212         if (from != NULL && to != NULL) {
1213           to->set_register_hint(from);
1214           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num()));
1215         }
1216       }
1217       break;
1218     }
1219     case lir_cmove: {
1220       assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2");
1221       LIR_Op2* cmove = (LIR_Op2*)op;
1222 
1223       LIR_Opr move_from = cmove->in_opr1();
1224       LIR_Opr move_to = cmove->result_opr();
1225 
1226       if (move_to->is_register() && move_from->is_register()) {
1227         Interval* from = interval_at(reg_num(move_from));
1228         Interval* to = interval_at(reg_num(move_to));
1229         if (from != NULL && to != NULL) {
1230           to->set_register_hint(from);
1231           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num()));
1232         }
1233       }
1234       break;
1235     }
1236   }
1237 }
1238 
1239 
1240 void LinearScan::build_intervals() {
1241   TIME_LINEAR_SCAN(timer_build_intervals);
1242 
1243   // initialize interval list with expected number of intervals
1244   // (32 is added to have some space for split children without having to resize the list)
1245   _intervals = IntervalList(num_virtual_regs() + 32);
1246   // initialize all slots that are used by build_intervals
1247   _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
1248 
1249   // create a list with all caller-save registers (cpu, fpu, xmm)
1250   // when an instruction is a call, a temp range is created for all these registers
1251   int num_caller_save_registers = 0;
1252   int caller_save_registers[LinearScan::nof_regs];
1253 
1254   int i;
1255   for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) {
1256     LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
1257     assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1258     assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1259     caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1260   }
1261 
1262   // temp ranges for fpu registers are only created when the method has
1263   // virtual fpu operands. Otherwise no allocation for fpu registers is
1264   // perfomed and so the temp ranges would be useless
1265   if (has_fpu_registers()) {
1266 #ifdef X86
1267     if (UseSSE < 2) {
1268 #endif
1269       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
1270         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
1271         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1272         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1273         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1274       }
1275 #ifdef X86
1276     }
1277     if (UseSSE > 0) {
1278       for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
1279         LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i);
1280         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1281         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1282         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1283       }
1284     }
1285 #endif
1286   }
1287   assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds");
1288 
1289 
1290   LIR_OpVisitState visitor;
1291 
1292   // iterate all blocks in reverse order
1293   for (i = block_count() - 1; i >= 0; i--) {
1294     BlockBegin* block = block_at(i);
1295     LIR_OpList* instructions = block->lir()->instructions_list();
1296     int         block_from =   block->first_lir_instruction_id();
1297     int         block_to =     block->last_lir_instruction_id();
1298 
1299     assert(block_from == instructions->at(0)->id(), "must be");
1300     assert(block_to   == instructions->at(instructions->length() - 1)->id(), "must be");
1301 
1302     // Update intervals for registers live at the end of this block;
1303     BitMap live = block->live_out();
1304     int size = (int)live.size();
1305     for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
1306       assert(live.at(number), "should not stop here otherwise");
1307       assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
1308       TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
1309 
1310       add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL);
1311 
1312       // add special use positions for loop-end blocks when the
1313       // interval is used anywhere inside this loop.  It's possible
1314       // that the block was part of a non-natural loop, so it might
1315       // have an invalid loop index.
1316       if (block->is_set(BlockBegin::linear_scan_loop_end_flag) &&
1317           block->loop_index() != -1 &&
1318           is_interval_in_loop(number, block->loop_index())) {
1319         interval_at(number)->add_use_pos(block_to + 1, loopEndMarker);
1320       }
1321     }
1322 
1323     // iterate all instructions of the block in reverse order.
1324     // skip the first instruction because it is always a label
1325     // definitions of intervals are processed before uses
1326     assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
1327     for (int j = instructions->length() - 1; j >= 1; j--) {
1328       LIR_Op* op = instructions->at(j);
1329       int op_id = op->id();
1330 
1331       // visit operation to collect all operands
1332       visitor.visit(op);
1333 
1334       // add a temp range for each register if operation destroys caller-save registers
1335       if (visitor.has_call()) {
1336         for (int k = 0; k < num_caller_save_registers; k++) {
1337           add_temp(caller_save_registers[k], op_id, noUse, T_ILLEGAL);
1338         }
1339         TRACE_LINEAR_SCAN(4, tty->print_cr("operation destroys all caller-save registers"));
1340       }
1341 
1342       // Add any platform dependent temps
1343       pd_add_temps(op);
1344 
1345       // visit definitions (output and temp operands)
1346       int k, n;
1347       n = visitor.opr_count(LIR_OpVisitState::outputMode);
1348       for (k = 0; k < n; k++) {
1349         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k);
1350         assert(opr->is_register(), "visitor should only return register operands");
1351         add_def(opr, op_id, use_kind_of_output_operand(op, opr));
1352       }
1353 
1354       n = visitor.opr_count(LIR_OpVisitState::tempMode);
1355       for (k = 0; k < n; k++) {
1356         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k);
1357         assert(opr->is_register(), "visitor should only return register operands");
1358         add_temp(opr, op_id, mustHaveRegister);
1359       }
1360 
1361       // visit uses (input operands)
1362       n = visitor.opr_count(LIR_OpVisitState::inputMode);
1363       for (k = 0; k < n; k++) {
1364         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k);
1365         assert(opr->is_register(), "visitor should only return register operands");
1366         add_use(opr, block_from, op_id, use_kind_of_input_operand(op, opr));
1367       }
1368 
1369       // Add uses of live locals from interpreter's point of view for proper
1370       // debug information generation
1371       // Treat these operands as temp values (if the life range is extended
1372       // to a call site, the value would be in a register at the call otherwise)
1373       n = visitor.info_count();
1374       for (k = 0; k < n; k++) {
1375         CodeEmitInfo* info = visitor.info_at(k);
1376         ValueStack* stack = info->stack();
1377         for_each_state_value(stack, value,
1378           add_use(value, block_from, op_id + 1, noUse);
1379         );
1380       }
1381 
1382       // special steps for some instructions (especially moves)
1383       handle_method_arguments(op);
1384       handle_doubleword_moves(op);
1385       add_register_hints(op);
1386 
1387     } // end of instruction iteration
1388   } // end of block iteration
1389 
1390 
1391   // add the range [0, 1[ to all fixed intervals
1392   // -> the register allocator need not handle unhandled fixed intervals
1393   for (int n = 0; n < LinearScan::nof_regs; n++) {
1394     Interval* interval = interval_at(n);
1395     if (interval != NULL) {
1396       interval->add_range(0, 1);
1397     }
1398   }
1399 }
1400 
1401 
1402 // ********** Phase 5: actual register allocation
1403 
1404 int LinearScan::interval_cmp(Interval** a, Interval** b) {
1405   if (*a != NULL) {
1406     if (*b != NULL) {
1407       return (*a)->from() - (*b)->from();
1408     } else {
1409       return -1;
1410     }
1411   } else {
1412     if (*b != NULL) {
1413       return 1;
1414     } else {
1415       return 0;
1416     }
1417   }
1418 }
1419 
1420 #ifndef PRODUCT
1421 bool LinearScan::is_sorted(IntervalArray* intervals) {
1422   int from = -1;
1423   int i, j;
1424   for (i = 0; i < intervals->length(); i ++) {
1425     Interval* it = intervals->at(i);
1426     if (it != NULL) {
1427       if (from > it->from()) {
1428         assert(false, "");
1429         return false;
1430       }
1431       from = it->from();
1432     }
1433   }
1434 
1435   // check in both directions if sorted list and unsorted list contain same intervals
1436   for (i = 0; i < interval_count(); i++) {
1437     if (interval_at(i) != NULL) {
1438       int num_found = 0;
1439       for (j = 0; j < intervals->length(); j++) {
1440         if (interval_at(i) == intervals->at(j)) {
1441           num_found++;
1442         }
1443       }
1444       assert(num_found == 1, "lists do not contain same intervals");
1445     }
1446   }
1447   for (j = 0; j < intervals->length(); j++) {
1448     int num_found = 0;
1449     for (i = 0; i < interval_count(); i++) {
1450       if (interval_at(i) == intervals->at(j)) {
1451         num_found++;
1452       }
1453     }
1454     assert(num_found == 1, "lists do not contain same intervals");
1455   }
1456 
1457   return true;
1458 }
1459 #endif
1460 
1461 void LinearScan::add_to_list(Interval** first, Interval** prev, Interval* interval) {
1462   if (*prev != NULL) {
1463     (*prev)->set_next(interval);
1464   } else {
1465     *first = interval;
1466   }
1467   *prev = interval;
1468 }
1469 
1470 void LinearScan::create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i)) {
1471   assert(is_sorted(_sorted_intervals), "interval list is not sorted");
1472 
1473   *list1 = *list2 = Interval::end();
1474 
1475   Interval* list1_prev = NULL;
1476   Interval* list2_prev = NULL;
1477   Interval* v;
1478 
1479   const int n = _sorted_intervals->length();
1480   for (int i = 0; i < n; i++) {
1481     v = _sorted_intervals->at(i);
1482     if (v == NULL) continue;
1483 
1484     if (is_list1(v)) {
1485       add_to_list(list1, &list1_prev, v);
1486     } else if (is_list2 == NULL || is_list2(v)) {
1487       add_to_list(list2, &list2_prev, v);
1488     }
1489   }
1490 
1491   if (list1_prev != NULL) list1_prev->set_next(Interval::end());
1492   if (list2_prev != NULL) list2_prev->set_next(Interval::end());
1493 
1494   assert(list1_prev == NULL || list1_prev->next() == Interval::end(), "linear list ends not with sentinel");
1495   assert(list2_prev == NULL || list2_prev->next() == Interval::end(), "linear list ends not with sentinel");
1496 }
1497 
1498 
1499 void LinearScan::sort_intervals_before_allocation() {
1500   TIME_LINEAR_SCAN(timer_sort_intervals_before);
1501 
1502   IntervalList* unsorted_list = &_intervals;
1503   int unsorted_len = unsorted_list->length();
1504   int sorted_len = 0;
1505   int unsorted_idx;
1506   int sorted_idx = 0;
1507   int sorted_from_max = -1;
1508 
1509   // calc number of items for sorted list (sorted list must not contain NULL values)
1510   for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) {
1511     if (unsorted_list->at(unsorted_idx) != NULL) {
1512       sorted_len++;
1513     }
1514   }
1515   IntervalArray* sorted_list = new IntervalArray(sorted_len);
1516 
1517   // special sorting algorithm: the original interval-list is almost sorted,
1518   // only some intervals are swapped. So this is much faster than a complete QuickSort
1519   for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) {
1520     Interval* cur_interval = unsorted_list->at(unsorted_idx);
1521 
1522     if (cur_interval != NULL) {
1523       int cur_from = cur_interval->from();
1524 
1525       if (sorted_from_max <= cur_from) {
1526         sorted_list->at_put(sorted_idx++, cur_interval);
1527         sorted_from_max = cur_interval->from();
1528       } else {
1529         // the asumption that the intervals are already sorted failed,
1530         // so this interval must be sorted in manually
1531         int j;
1532         for (j = sorted_idx - 1; j >= 0 && cur_from < sorted_list->at(j)->from(); j--) {
1533           sorted_list->at_put(j + 1, sorted_list->at(j));
1534         }
1535         sorted_list->at_put(j + 1, cur_interval);
1536         sorted_idx++;
1537       }
1538     }
1539   }
1540   _sorted_intervals = sorted_list;
1541 }
1542 
1543 void LinearScan::sort_intervals_after_allocation() {
1544   TIME_LINEAR_SCAN(timer_sort_intervals_after);
1545 
1546   IntervalArray* old_list      = _sorted_intervals;
1547   IntervalList*  new_list      = _new_intervals_from_allocation;
1548   int old_len = old_list->length();
1549   int new_len = new_list->length();
1550 
1551   if (new_len == 0) {
1552     // no intervals have been added during allocation, so sorted list is already up to date
1553     return;
1554   }
1555 
1556   // conventional sort-algorithm for new intervals
1557   new_list->sort(interval_cmp);
1558 
1559   // merge old and new list (both already sorted) into one combined list
1560   IntervalArray* combined_list = new IntervalArray(old_len + new_len);
1561   int old_idx = 0;
1562   int new_idx = 0;
1563 
1564   while (old_idx + new_idx < old_len + new_len) {
1565     if (new_idx >= new_len || (old_idx < old_len && old_list->at(old_idx)->from() <= new_list->at(new_idx)->from())) {
1566       combined_list->at_put(old_idx + new_idx, old_list->at(old_idx));
1567       old_idx++;
1568     } else {
1569       combined_list->at_put(old_idx + new_idx, new_list->at(new_idx));
1570       new_idx++;
1571     }
1572   }
1573 
1574   _sorted_intervals = combined_list;
1575 }
1576 
1577 
1578 void LinearScan::allocate_registers() {
1579   TIME_LINEAR_SCAN(timer_allocate_registers);
1580 
1581   Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
1582   Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
1583 
1584   create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
1585   if (has_fpu_registers()) {
1586     create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
1587 #ifdef ASSERT
1588   } else {
1589     // fpu register allocation is omitted because no virtual fpu registers are present
1590     // just check this again...
1591     create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
1592     assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
1593 #endif
1594   }
1595 
1596   // allocate cpu registers
1597   LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
1598   cpu_lsw.walk();
1599   cpu_lsw.finish_allocation();
1600 
1601   if (has_fpu_registers()) {
1602     // allocate fpu registers
1603     LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
1604     fpu_lsw.walk();
1605     fpu_lsw.finish_allocation();
1606   }
1607 }
1608 
1609 
1610 // ********** Phase 6: resolve data flow
1611 // (insert moves at edges between blocks if intervals have been split)
1612 
1613 // wrapper for Interval::split_child_at_op_id that performs a bailout in product mode
1614 // instead of returning NULL
1615 Interval* LinearScan::split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode) {
1616   Interval* result = interval->split_child_at_op_id(op_id, mode);
1617   if (result != NULL) {
1618     return result;
1619   }
1620 
1621   assert(false, "must find an interval, but do a clean bailout in product mode");
1622   result = new Interval(LIR_OprDesc::vreg_base);
1623   result->assign_reg(0);
1624   result->set_type(T_INT);
1625   BAILOUT_("LinearScan: interval is NULL", result);
1626 }
1627 
1628 
1629 Interval* LinearScan::interval_at_block_begin(BlockBegin* block, int reg_num) {
1630   assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
1631   assert(interval_at(reg_num) != NULL, "no interval found");
1632 
1633   return split_child_at_op_id(interval_at(reg_num), block->first_lir_instruction_id(), LIR_OpVisitState::outputMode);
1634 }
1635 
1636 Interval* LinearScan::interval_at_block_end(BlockBegin* block, int reg_num) {
1637   assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
1638   assert(interval_at(reg_num) != NULL, "no interval found");
1639 
1640   return split_child_at_op_id(interval_at(reg_num), block->last_lir_instruction_id() + 1, LIR_OpVisitState::outputMode);
1641 }
1642 
1643 Interval* LinearScan::interval_at_op_id(int reg_num, int op_id) {
1644   assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
1645   assert(interval_at(reg_num) != NULL, "no interval found");
1646 
1647   return split_child_at_op_id(interval_at(reg_num), op_id, LIR_OpVisitState::inputMode);
1648 }
1649 
1650 
1651 void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) {
1652   DEBUG_ONLY(move_resolver.check_empty());
1653 
1654   const int num_regs = num_virtual_regs();
1655   const int size = live_set_size();
1656   const BitMap live_at_edge = to_block->live_in();
1657 
1658   // visit all registers where the live_at_edge bit is set
1659   for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
1660     assert(r < num_regs, "live information set for not exisiting interval");
1661     assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
1662 
1663     Interval* from_interval = interval_at_block_end(from_block, r);
1664     Interval* to_interval = interval_at_block_begin(to_block, r);
1665 
1666     if (from_interval != to_interval && (from_interval->assigned_reg() != to_interval->assigned_reg() || from_interval->assigned_regHi() != to_interval->assigned_regHi())) {
1667       // need to insert move instruction
1668       move_resolver.add_mapping(from_interval, to_interval);
1669     }
1670   }
1671 }
1672 
1673 
1674 void LinearScan::resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) {
1675   if (from_block->number_of_sux() <= 1) {
1676     TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at end of from_block B%d", from_block->block_id()));
1677 
1678     LIR_OpList* instructions = from_block->lir()->instructions_list();
1679     LIR_OpBranch* branch = instructions->last()->as_OpBranch();
1680     if (branch != NULL) {
1681       // insert moves before branch
1682       assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
1683       move_resolver.set_insert_position(from_block->lir(), instructions->length() - 2);
1684     } else {
1685       move_resolver.set_insert_position(from_block->lir(), instructions->length() - 1);
1686     }
1687 
1688   } else {
1689     TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at beginning of to_block B%d", to_block->block_id()));
1690 #ifdef ASSERT
1691     assert(from_block->lir()->instructions_list()->at(0)->as_OpLabel() != NULL, "block does not start with a label");
1692 
1693     // because the number of predecessor edges matches the number of
1694     // successor edges, blocks which are reached by switch statements
1695     // may have be more than one predecessor but it will be guaranteed
1696     // that all predecessors will be the same.
1697     for (int i = 0; i < to_block->number_of_preds(); i++) {
1698       assert(from_block == to_block->pred_at(i), "all critical edges must be broken");
1699     }
1700 #endif
1701 
1702     move_resolver.set_insert_position(to_block->lir(), 0);
1703   }
1704 }
1705 
1706 
1707 // insert necessary moves (spilling or reloading) at edges between blocks if interval has been split
1708 void LinearScan::resolve_data_flow() {
1709   TIME_LINEAR_SCAN(timer_resolve_data_flow);
1710 
1711   int num_blocks = block_count();
1712   MoveResolver move_resolver(this);
1713   BitMap block_completed(num_blocks);  block_completed.clear();
1714   BitMap already_resolved(num_blocks); already_resolved.clear();
1715 
1716   int i;
1717   for (i = 0; i < num_blocks; i++) {
1718     BlockBegin* block = block_at(i);
1719 
1720     // check if block has only one predecessor and only one successor
1721     if (block->number_of_preds() == 1 && block->number_of_sux() == 1 && block->number_of_exception_handlers() == 0) {
1722       LIR_OpList* instructions = block->lir()->instructions_list();
1723       assert(instructions->at(0)->code() == lir_label, "block must start with label");
1724       assert(instructions->last()->code() == lir_branch, "block with successors must end with branch");
1725       assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block with successor must end with unconditional branch");
1726 
1727       // check if block is empty (only label and branch)
1728       if (instructions->length() == 2) {
1729         BlockBegin* pred = block->pred_at(0);
1730         BlockBegin* sux = block->sux_at(0);
1731 
1732         // prevent optimization of two consecutive blocks
1733         if (!block_completed.at(pred->linear_scan_number()) && !block_completed.at(sux->linear_scan_number())) {
1734           TRACE_LINEAR_SCAN(3, tty->print_cr("**** optimizing empty block B%d (pred: B%d, sux: B%d)", block->block_id(), pred->block_id(), sux->block_id()));
1735           block_completed.set_bit(block->linear_scan_number());
1736 
1737           // directly resolve between pred and sux (without looking at the empty block between)
1738           resolve_collect_mappings(pred, sux, move_resolver);
1739           if (move_resolver.has_mappings()) {
1740             move_resolver.set_insert_position(block->lir(), 0);
1741             move_resolver.resolve_and_append_moves();
1742           }
1743         }
1744       }
1745     }
1746   }
1747 
1748 
1749   for (i = 0; i < num_blocks; i++) {
1750     if (!block_completed.at(i)) {
1751       BlockBegin* from_block = block_at(i);
1752       already_resolved.set_from(block_completed);
1753 
1754       int num_sux = from_block->number_of_sux();
1755       for (int s = 0; s < num_sux; s++) {
1756         BlockBegin* to_block = from_block->sux_at(s);
1757 
1758         // check for duplicate edges between the same blocks (can happen with switch blocks)
1759         if (!already_resolved.at(to_block->linear_scan_number())) {
1760           TRACE_LINEAR_SCAN(3, tty->print_cr("**** processing edge between B%d and B%d", from_block->block_id(), to_block->block_id()));
1761           already_resolved.set_bit(to_block->linear_scan_number());
1762 
1763           // collect all intervals that have been split between from_block and to_block
1764           resolve_collect_mappings(from_block, to_block, move_resolver);
1765           if (move_resolver.has_mappings()) {
1766             resolve_find_insert_pos(from_block, to_block, move_resolver);
1767             move_resolver.resolve_and_append_moves();
1768           }
1769         }
1770       }
1771     }
1772   }
1773 }
1774 
1775 
1776 void LinearScan::resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver) {
1777   if (interval_at(reg_num) == NULL) {
1778     // if a phi function is never used, no interval is created -> ignore this
1779     return;
1780   }
1781 
1782   Interval* interval = interval_at_block_begin(block, reg_num);
1783   int reg = interval->assigned_reg();
1784   int regHi = interval->assigned_regHi();
1785 
1786   if ((reg < nof_regs && interval->always_in_memory()) ||
1787       (use_fpu_stack_allocation() && reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg)) {
1788     // the interval is split to get a short range that is located on the stack
1789     // in the following two cases:
1790     // * the interval started in memory (e.g. method parameter), but is currently in a register
1791     //   this is an optimization for exception handling that reduces the number of moves that
1792     //   are necessary for resolving the states when an exception uses this exception handler
1793     // * the interval would be on the fpu stack at the begin of the exception handler
1794     //   this is not allowed because of the complicated fpu stack handling on Intel
1795 
1796     // range that will be spilled to memory
1797     int from_op_id = block->first_lir_instruction_id();
1798     int to_op_id = from_op_id + 1;  // short live range of length 1
1799     assert(interval->from() <= from_op_id && interval->to() >= to_op_id,
1800            "no split allowed between exception entry and first instruction");
1801 
1802     if (interval->from() != from_op_id) {
1803       // the part before from_op_id is unchanged
1804       interval = interval->split(from_op_id);
1805       interval->assign_reg(reg, regHi);
1806       append_interval(interval);
1807     }
1808     assert(interval->from() == from_op_id, "must be true now");
1809 
1810     Interval* spilled_part = interval;
1811     if (interval->to() != to_op_id) {
1812       // the part after to_op_id is unchanged
1813       spilled_part = interval->split_from_start(to_op_id);
1814       append_interval(spilled_part);
1815       move_resolver.add_mapping(spilled_part, interval);
1816     }
1817     assign_spill_slot(spilled_part);
1818 
1819     assert(spilled_part->from() == from_op_id && spilled_part->to() == to_op_id, "just checking");
1820   }
1821 }
1822 
1823 void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver) {
1824   assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise");
1825   DEBUG_ONLY(move_resolver.check_empty());
1826 
1827   // visit all registers where the live_in bit is set
1828   int size = live_set_size();
1829   for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
1830     resolve_exception_entry(block, r, move_resolver);
1831   }
1832 
1833   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
1834   for_each_phi_fun(block, phi,
1835     resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver)
1836   );
1837 
1838   if (move_resolver.has_mappings()) {
1839     // insert moves after first instruction
1840     move_resolver.set_insert_position(block->lir(), 1);
1841     move_resolver.resolve_and_append_moves();
1842   }
1843 }
1844 
1845 
1846 void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver) {
1847   if (interval_at(reg_num) == NULL) {
1848     // if a phi function is never used, no interval is created -> ignore this
1849     return;
1850   }
1851 
1852   // the computation of to_interval is equal to resolve_collect_mappings,
1853   // but from_interval is more complicated because of phi functions
1854   BlockBegin* to_block = handler->entry_block();
1855   Interval* to_interval = interval_at_block_begin(to_block, reg_num);
1856 
1857   if (phi != NULL) {
1858     // phi function of the exception entry block
1859     // no moves are created for this phi function in the LIR_Generator, so the
1860     // interval at the throwing instruction must be searched using the operands
1861     // of the phi function
1862     Value from_value = phi->operand_at(handler->phi_operand());
1863 
1864     // with phi functions it can happen that the same from_value is used in
1865     // multiple mappings, so notify move-resolver that this is allowed
1866     move_resolver.set_multiple_reads_allowed();
1867 
1868     Constant* con = from_value->as_Constant();
1869     if (con != NULL && !con->is_pinned()) {
1870       // unpinned constants may have no register, so add mapping from constant to interval
1871       move_resolver.add_mapping(LIR_OprFact::value_type(con->type()), to_interval);
1872     } else {
1873       // search split child at the throwing op_id
1874       Interval* from_interval = interval_at_op_id(from_value->operand()->vreg_number(), throwing_op_id);
1875       move_resolver.add_mapping(from_interval, to_interval);
1876     }
1877 
1878   } else {
1879     // no phi function, so use reg_num also for from_interval
1880     // search split child at the throwing op_id
1881     Interval* from_interval = interval_at_op_id(reg_num, throwing_op_id);
1882     if (from_interval != to_interval) {
1883       // optimization to reduce number of moves: when to_interval is on stack and
1884       // the stack slot is known to be always correct, then no move is necessary
1885       if (!from_interval->always_in_memory() || from_interval->canonical_spill_slot() != to_interval->assigned_reg()) {
1886         move_resolver.add_mapping(from_interval, to_interval);
1887       }
1888     }
1889   }
1890 }
1891 
1892 void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver) {
1893   TRACE_LINEAR_SCAN(4, tty->print_cr("resolving exception handler B%d: throwing_op_id=%d", handler->entry_block()->block_id(), throwing_op_id));
1894 
1895   DEBUG_ONLY(move_resolver.check_empty());
1896   assert(handler->lir_op_id() == -1, "already processed this xhandler");
1897   DEBUG_ONLY(handler->set_lir_op_id(throwing_op_id));
1898   assert(handler->entry_code() == NULL, "code already present");
1899 
1900   // visit all registers where the live_in bit is set
1901   BlockBegin* block = handler->entry_block();
1902   int size = live_set_size();
1903   for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
1904     resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
1905   }
1906 
1907   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
1908   for_each_phi_fun(block, phi,
1909     resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver)
1910   );
1911 
1912   if (move_resolver.has_mappings()) {
1913     LIR_List* entry_code = new LIR_List(compilation());
1914     move_resolver.set_insert_position(entry_code, 0);
1915     move_resolver.resolve_and_append_moves();
1916 
1917     entry_code->jump(handler->entry_block());
1918     handler->set_entry_code(entry_code);
1919   }
1920 }
1921 
1922 
1923 void LinearScan::resolve_exception_handlers() {
1924   MoveResolver move_resolver(this);
1925   LIR_OpVisitState visitor;
1926   int num_blocks = block_count();
1927 
1928   int i;
1929   for (i = 0; i < num_blocks; i++) {
1930     BlockBegin* block = block_at(i);
1931     if (block->is_set(BlockBegin::exception_entry_flag)) {
1932       resolve_exception_entry(block, move_resolver);
1933     }
1934   }
1935 
1936   for (i = 0; i < num_blocks; i++) {
1937     BlockBegin* block = block_at(i);
1938     LIR_List* ops = block->lir();
1939     int num_ops = ops->length();
1940 
1941     // iterate all instructions of the block. skip the first because it is always a label
1942     assert(visitor.no_operands(ops->at(0)), "first operation must always be a label");
1943     for (int j = 1; j < num_ops; j++) {
1944       LIR_Op* op = ops->at(j);
1945       int op_id = op->id();
1946 
1947       if (op_id != -1 && has_info(op_id)) {
1948         // visit operation to collect all operands
1949         visitor.visit(op);
1950         assert(visitor.info_count() > 0, "should not visit otherwise");
1951 
1952         XHandlers* xhandlers = visitor.all_xhandler();
1953         int n = xhandlers->length();
1954         for (int k = 0; k < n; k++) {
1955           resolve_exception_edge(xhandlers->handler_at(k), op_id, move_resolver);
1956         }
1957 
1958 #ifdef ASSERT
1959       } else {
1960         visitor.visit(op);
1961         assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
1962 #endif
1963       }
1964     }
1965   }
1966 }
1967 
1968 
1969 // ********** Phase 7: assign register numbers back to LIR
1970 // (includes computation of debug information and oop maps)
1971 
1972 VMReg LinearScan::vm_reg_for_interval(Interval* interval) {
1973   VMReg reg = interval->cached_vm_reg();
1974   if (!reg->is_valid() ) {
1975     reg = vm_reg_for_operand(operand_for_interval(interval));
1976     interval->set_cached_vm_reg(reg);
1977   }
1978   assert(reg == vm_reg_for_operand(operand_for_interval(interval)), "wrong cached value");
1979   return reg;
1980 }
1981 
1982 VMReg LinearScan::vm_reg_for_operand(LIR_Opr opr) {
1983   assert(opr->is_oop(), "currently only implemented for oop operands");
1984   return frame_map()->regname(opr);
1985 }
1986 
1987 
1988 LIR_Opr LinearScan::operand_for_interval(Interval* interval) {
1989   LIR_Opr opr = interval->cached_opr();
1990   if (opr->is_illegal()) {
1991     opr = calc_operand_for_interval(interval);
1992     interval->set_cached_opr(opr);
1993   }
1994 
1995   assert(opr == calc_operand_for_interval(interval), "wrong cached value");
1996   return opr;
1997 }
1998 
1999 LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
2000   int assigned_reg = interval->assigned_reg();
2001   BasicType type = interval->type();
2002 
2003   if (assigned_reg >= nof_regs) {
2004     // stack slot
2005     assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2006     return LIR_OprFact::stack(assigned_reg - nof_regs, type);
2007 
2008   } else {
2009     // register
2010     switch (type) {
2011       case T_OBJECT: {
2012         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2013         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2014         return LIR_OprFact::single_cpu_oop(assigned_reg);
2015       }
2016 
2017       case T_INT: {
2018         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2019         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2020         return LIR_OprFact::single_cpu(assigned_reg);
2021       }
2022 
2023       case T_LONG: {
2024         int assigned_regHi = interval->assigned_regHi();
2025         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2026         assert(num_physical_regs(T_LONG) == 1 ||
2027                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2028 
2029         assert(assigned_reg != assigned_regHi, "invalid allocation");
2030         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2031                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2032         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2033         if (requires_adjacent_regs(T_LONG)) {
2034           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2035         }
2036 
2037 #ifdef _LP64
2038         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2039 #else
2040 #ifdef SPARC
2041         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2042 #else
2043         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2044 #endif // SPARC
2045 #endif // LP64
2046       }
2047 
2048       case T_FLOAT: {
2049 #ifdef X86
2050         if (UseSSE >= 1) {
2051           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
2052           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2053           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
2054         }
2055 #endif
2056 
2057         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2058         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2059         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2060       }
2061 
2062       case T_DOUBLE: {
2063 #ifdef X86
2064         if (UseSSE >= 2) {
2065           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
2066           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2067           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2068         }
2069 #endif
2070 
2071 #ifdef SPARC
2072         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2073         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2074         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2075         LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2076 #else
2077         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2078         assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2079         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2080 #endif
2081         return result;
2082       }
2083 
2084       default: {
2085         ShouldNotReachHere();
2086         return LIR_OprFact::illegalOpr;
2087       }
2088     }
2089   }
2090 }
2091 
2092 LIR_Opr LinearScan::canonical_spill_opr(Interval* interval) {
2093   assert(interval->canonical_spill_slot() >= nof_regs, "canonical spill slot not set");
2094   return LIR_OprFact::stack(interval->canonical_spill_slot() - nof_regs, interval->type());
2095 }
2096 
2097 LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprMode mode) {
2098   assert(opr->is_virtual(), "should not call this otherwise");
2099 
2100   Interval* interval = interval_at(opr->vreg_number());
2101   assert(interval != NULL, "interval must exist");
2102 
2103   if (op_id != -1) {
2104 #ifdef ASSERT
2105     BlockBegin* block = block_of_op_with_id(op_id);
2106     if (block->number_of_sux() <= 1 && op_id == block->last_lir_instruction_id()) {
2107       // check if spill moves could have been appended at the end of this block, but
2108       // before the branch instruction. So the split child information for this branch would
2109       // be incorrect.
2110       LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch();
2111       if (branch != NULL) {
2112         if (block->live_out().at(opr->vreg_number())) {
2113           assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
2114           assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)");
2115         }
2116       }
2117     }
2118 #endif
2119 
2120     // operands are not changed when an interval is split during allocation,
2121     // so search the right interval here
2122     interval = split_child_at_op_id(interval, op_id, mode);
2123   }
2124 
2125   LIR_Opr res = operand_for_interval(interval);
2126 
2127 #ifdef X86
2128   // new semantic for is_last_use: not only set on definite end of interval,
2129   // but also before hole
2130   // This may still miss some cases (e.g. for dead values), but it is not necessary that the
2131   // last use information is completely correct
2132   // information is only needed for fpu stack allocation
2133   if (res->is_fpu_register()) {
2134     if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) {
2135       assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow");
2136       res = res->make_last_use();
2137     }
2138   }
2139 #endif
2140 
2141   assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation");
2142 
2143   return res;
2144 }
2145 
2146 
2147 #ifdef ASSERT
2148 // some methods used to check correctness of debug information
2149 
2150 void assert_no_register_values(GrowableArray<ScopeValue*>* values) {
2151   if (values == NULL) {
2152     return;
2153   }
2154 
2155   for (int i = 0; i < values->length(); i++) {
2156     ScopeValue* value = values->at(i);
2157 
2158     if (value->is_location()) {
2159       Location location = ((LocationValue*)value)->location();
2160       assert(location.where() == Location::on_stack, "value is in register");
2161     }
2162   }
2163 }
2164 
2165 void assert_no_register_values(GrowableArray<MonitorValue*>* values) {
2166   if (values == NULL) {
2167     return;
2168   }
2169 
2170   for (int i = 0; i < values->length(); i++) {
2171     MonitorValue* value = values->at(i);
2172 
2173     if (value->owner()->is_location()) {
2174       Location location = ((LocationValue*)value->owner())->location();
2175       assert(location.where() == Location::on_stack, "owner is in register");
2176     }
2177     assert(value->basic_lock().where() == Location::on_stack, "basic_lock is in register");
2178   }
2179 }
2180 
2181 void assert_equal(Location l1, Location l2) {
2182   assert(l1.where() == l2.where() && l1.type() == l2.type() && l1.offset() == l2.offset(), "");
2183 }
2184 
2185 void assert_equal(ScopeValue* v1, ScopeValue* v2) {
2186   if (v1->is_location()) {
2187     assert(v2->is_location(), "");
2188     assert_equal(((LocationValue*)v1)->location(), ((LocationValue*)v2)->location());
2189   } else if (v1->is_constant_int()) {
2190     assert(v2->is_constant_int(), "");
2191     assert(((ConstantIntValue*)v1)->value() == ((ConstantIntValue*)v2)->value(), "");
2192   } else if (v1->is_constant_double()) {
2193     assert(v2->is_constant_double(), "");
2194     assert(((ConstantDoubleValue*)v1)->value() == ((ConstantDoubleValue*)v2)->value(), "");
2195   } else if (v1->is_constant_long()) {
2196     assert(v2->is_constant_long(), "");
2197     assert(((ConstantLongValue*)v1)->value() == ((ConstantLongValue*)v2)->value(), "");
2198   } else if (v1->is_constant_oop()) {
2199     assert(v2->is_constant_oop(), "");
2200     assert(((ConstantOopWriteValue*)v1)->value() == ((ConstantOopWriteValue*)v2)->value(), "");
2201   } else {
2202     ShouldNotReachHere();
2203   }
2204 }
2205 
2206 void assert_equal(MonitorValue* m1, MonitorValue* m2) {
2207   assert_equal(m1->owner(), m2->owner());
2208   assert_equal(m1->basic_lock(), m2->basic_lock());
2209 }
2210 
2211 void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) {
2212   assert(d1->scope() == d2->scope(), "not equal");
2213   assert(d1->bci() == d2->bci(), "not equal");
2214 
2215   if (d1->locals() != NULL) {
2216     assert(d1->locals() != NULL && d2->locals() != NULL, "not equal");
2217     assert(d1->locals()->length() == d2->locals()->length(), "not equal");
2218     for (int i = 0; i < d1->locals()->length(); i++) {
2219       assert_equal(d1->locals()->at(i), d2->locals()->at(i));
2220     }
2221   } else {
2222     assert(d1->locals() == NULL && d2->locals() == NULL, "not equal");
2223   }
2224 
2225   if (d1->expressions() != NULL) {
2226     assert(d1->expressions() != NULL && d2->expressions() != NULL, "not equal");
2227     assert(d1->expressions()->length() == d2->expressions()->length(), "not equal");
2228     for (int i = 0; i < d1->expressions()->length(); i++) {
2229       assert_equal(d1->expressions()->at(i), d2->expressions()->at(i));
2230     }
2231   } else {
2232     assert(d1->expressions() == NULL && d2->expressions() == NULL, "not equal");
2233   }
2234 
2235   if (d1->monitors() != NULL) {
2236     assert(d1->monitors() != NULL && d2->monitors() != NULL, "not equal");
2237     assert(d1->monitors()->length() == d2->monitors()->length(), "not equal");
2238     for (int i = 0; i < d1->monitors()->length(); i++) {
2239       assert_equal(d1->monitors()->at(i), d2->monitors()->at(i));
2240     }
2241   } else {
2242     assert(d1->monitors() == NULL && d2->monitors() == NULL, "not equal");
2243   }
2244 
2245   if (d1->caller() != NULL) {
2246     assert(d1->caller() != NULL && d2->caller() != NULL, "not equal");
2247     assert_equal(d1->caller(), d2->caller());
2248   } else {
2249     assert(d1->caller() == NULL && d2->caller() == NULL, "not equal");
2250   }
2251 }
2252 
2253 void check_stack_depth(CodeEmitInfo* info, int stack_end) {
2254   if (info->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) {
2255     Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
2256     switch (code) {
2257       case Bytecodes::_ifnull    : // fall through
2258       case Bytecodes::_ifnonnull : // fall through
2259       case Bytecodes::_ifeq      : // fall through
2260       case Bytecodes::_ifne      : // fall through
2261       case Bytecodes::_iflt      : // fall through
2262       case Bytecodes::_ifge      : // fall through
2263       case Bytecodes::_ifgt      : // fall through
2264       case Bytecodes::_ifle      : // fall through
2265       case Bytecodes::_if_icmpeq : // fall through
2266       case Bytecodes::_if_icmpne : // fall through
2267       case Bytecodes::_if_icmplt : // fall through
2268       case Bytecodes::_if_icmpge : // fall through
2269       case Bytecodes::_if_icmpgt : // fall through
2270       case Bytecodes::_if_icmple : // fall through
2271       case Bytecodes::_if_acmpeq : // fall through
2272       case Bytecodes::_if_acmpne :
2273         assert(stack_end >= -Bytecodes::depth(code), "must have non-empty expression stack at if bytecode");
2274         break;
2275     }
2276   }
2277 }
2278 
2279 #endif // ASSERT
2280 
2281 
2282 IntervalWalker* LinearScan::init_compute_oop_maps() {
2283   // setup lists of potential oops for walking
2284   Interval* oop_intervals;
2285   Interval* non_oop_intervals;
2286 
2287   create_unhandled_lists(&oop_intervals, &non_oop_intervals, is_oop_interval, NULL);
2288 
2289   // intervals that have no oops inside need not to be processed
2290   // to ensure a walking until the last instruction id, add a dummy interval
2291   // with a high operation id
2292   non_oop_intervals = new Interval(any_reg);
2293   non_oop_intervals->add_range(max_jint - 2, max_jint - 1);
2294 
2295   return new IntervalWalker(this, oop_intervals, non_oop_intervals);
2296 }
2297 
2298 
2299 OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site) {
2300   TRACE_LINEAR_SCAN(3, tty->print_cr("creating oop map at op_id %d", op->id()));
2301 
2302   // walk before the current operation -> intervals that start at
2303   // the operation (= output operands of the operation) are not
2304   // included in the oop map
2305   iw->walk_before(op->id());
2306 
2307   int frame_size = frame_map()->framesize();
2308   int arg_count = frame_map()->oop_map_arg_count();
2309   OopMap* map = new OopMap(frame_size, arg_count);
2310 
2311   // Check if this is a patch site.
2312   bool is_patch_info = false;
2313   if (op->code() == lir_move) {
2314     assert(!is_call_site, "move must not be a call site");
2315     assert(op->as_Op1() != NULL, "move must be LIR_Op1");
2316     LIR_Op1* move = (LIR_Op1*)op;
2317 
2318     is_patch_info = move->patch_code() != lir_patch_none;
2319   }
2320 
2321   // Iterate through active intervals
2322   for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) {
2323     int assigned_reg = interval->assigned_reg();
2324 
2325     assert(interval->current_from() <= op->id() && op->id() <= interval->current_to(), "interval should not be active otherwise");
2326     assert(interval->assigned_regHi() == any_reg, "oop must be single word");
2327     assert(interval->reg_num() >= LIR_OprDesc::vreg_base, "fixed interval found");
2328 
2329     // Check if this range covers the instruction. Intervals that
2330     // start or end at the current operation are not included in the
2331     // oop map, except in the case of patching moves.  For patching
2332     // moves, any intervals which end at this instruction are included
2333     // in the oop map since we may safepoint while doing the patch
2334     // before we've consumed the inputs.
2335     if (is_patch_info || op->id() < interval->current_to()) {
2336 
2337       // caller-save registers must not be included into oop-maps at calls
2338       assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
2339 
2340       VMReg name = vm_reg_for_interval(interval);
2341       map->set_oop(name);
2342 
2343       // Spill optimization: when the stack value is guaranteed to be always correct,
2344       // then it must be added to the oop map even if the interval is currently in a register
2345       if (interval->always_in_memory() &&
2346           op->id() > interval->spill_definition_pos() &&
2347           interval->assigned_reg() != interval->canonical_spill_slot()) {
2348         assert(interval->spill_definition_pos() > 0, "position not set correctly");
2349         assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
2350         assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
2351 
2352         map->set_oop(frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
2353       }
2354     }
2355   }
2356 
2357   // add oops from lock stack
2358   assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
2359   int locks_count = info->stack()->locks_size();
2360   for (int i = 0; i < locks_count; i++) {
2361     map->set_oop(frame_map()->monitor_object_regname(i));
2362   }
2363 
2364   return map;
2365 }
2366 
2367 
2368 void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op) {
2369   assert(visitor.info_count() > 0, "no oop map needed");
2370 
2371   // compute oop_map only for first CodeEmitInfo
2372   // because it is (in most cases) equal for all other infos of the same operation
2373   CodeEmitInfo* first_info = visitor.info_at(0);
2374   OopMap* first_oop_map = compute_oop_map(iw, op, first_info, visitor.has_call());
2375 
2376   for (int i = 0; i < visitor.info_count(); i++) {
2377     CodeEmitInfo* info = visitor.info_at(i);
2378     OopMap* oop_map = first_oop_map;
2379 
2380     if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
2381       // this info has a different number of locks then the precomputed oop map
2382       // (possible for lock and unlock instructions) -> compute oop map with
2383       // correct lock information
2384       oop_map = compute_oop_map(iw, op, info, visitor.has_call());
2385     }
2386 
2387     if (info->_oop_map == NULL) {
2388       info->_oop_map = oop_map;
2389     } else {
2390       // a CodeEmitInfo can not be shared between different LIR-instructions
2391       // because interval splitting can occur anywhere between two instructions
2392       // and so the oop maps must be different
2393       // -> check if the already set oop_map is exactly the one calculated for this operation
2394       assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions");
2395     }
2396   }
2397 }
2398 
2399 
2400 // frequently used constants
2401 ConstantOopWriteValue LinearScan::_oop_null_scope_value = ConstantOopWriteValue(NULL);
2402 ConstantIntValue      LinearScan::_int_m1_scope_value = ConstantIntValue(-1);
2403 ConstantIntValue      LinearScan::_int_0_scope_value =  ConstantIntValue(0);
2404 ConstantIntValue      LinearScan::_int_1_scope_value =  ConstantIntValue(1);
2405 ConstantIntValue      LinearScan::_int_2_scope_value =  ConstantIntValue(2);
2406 LocationValue         _illegal_value = LocationValue(Location());
2407 
2408 void LinearScan::init_compute_debug_info() {
2409   // cache for frequently used scope values
2410   // (cpu registers and stack slots)
2411   _scope_value_cache = ScopeValueArray((LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2, NULL);
2412 }
2413 
2414 MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) {
2415   Location loc;
2416   if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) {
2417     bailout("too large frame");
2418   }
2419   ScopeValue* object_scope_value = new LocationValue(loc);
2420 
2421   if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) {
2422     bailout("too large frame");
2423   }
2424   return new MonitorValue(object_scope_value, loc);
2425 }
2426 
2427 LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) {
2428   Location loc;
2429   if (!frame_map()->locations_for_slot(name, loc_type, &loc)) {
2430     bailout("too large frame");
2431   }
2432   return new LocationValue(loc);
2433 }
2434 
2435 
2436 int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
2437   assert(opr->is_constant(), "should not be called otherwise");
2438 
2439   LIR_Const* c = opr->as_constant_ptr();
2440   BasicType t = c->type();
2441   switch (t) {
2442     case T_OBJECT: {
2443       jobject value = c->as_jobject();
2444       if (value == NULL) {
2445         scope_values->append(&_oop_null_scope_value);
2446       } else {
2447         scope_values->append(new ConstantOopWriteValue(c->as_jobject()));
2448       }
2449       return 1;
2450     }
2451 
2452     case T_INT: // fall through
2453     case T_FLOAT: {
2454       int value = c->as_jint_bits();
2455       switch (value) {
2456         case -1: scope_values->append(&_int_m1_scope_value); break;
2457         case 0:  scope_values->append(&_int_0_scope_value); break;
2458         case 1:  scope_values->append(&_int_1_scope_value); break;
2459         case 2:  scope_values->append(&_int_2_scope_value); break;
2460         default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break;
2461       }
2462       return 1;
2463     }
2464 
2465     case T_LONG: // fall through
2466     case T_DOUBLE: {
2467 #ifdef _LP64
2468       scope_values->append(&_int_0_scope_value);
2469       scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
2470 #else
2471       if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
2472         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
2473         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
2474       } else {
2475         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
2476         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
2477       }
2478 #endif
2479       return 2;
2480     }
2481 
2482     case T_ADDRESS: {
2483 #ifdef _LP64
2484       scope_values->append(new ConstantLongValue(c->as_jint()));
2485 #else
2486       scope_values->append(new ConstantIntValue(c->as_jint()));
2487 #endif
2488       return 1;
2489     }
2490 
2491     default:
2492       ShouldNotReachHere();
2493       return -1;
2494   }
2495 }
2496 
2497 int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
2498   if (opr->is_single_stack()) {
2499     int stack_idx = opr->single_stack_ix();
2500     bool is_oop = opr->is_oop_register();
2501     int cache_idx = (stack_idx + LinearScan::nof_cpu_regs) * 2 + (is_oop ? 1 : 0);
2502 
2503     ScopeValue* sv = _scope_value_cache.at(cache_idx);
2504     if (sv == NULL) {
2505       Location::Type loc_type = is_oop ? Location::oop : Location::normal;
2506       sv = location_for_name(stack_idx, loc_type);
2507       _scope_value_cache.at_put(cache_idx, sv);
2508     }
2509 
2510     // check if cached value is correct
2511     DEBUG_ONLY(assert_equal(sv, location_for_name(stack_idx, is_oop ? Location::oop : Location::normal)));
2512 
2513     scope_values->append(sv);
2514     return 1;
2515 
2516   } else if (opr->is_single_cpu()) {
2517     bool is_oop = opr->is_oop_register();
2518     int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
2519     Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
2520 
2521     ScopeValue* sv = _scope_value_cache.at(cache_idx);
2522     if (sv == NULL) {
2523       Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
2524       VMReg rname = frame_map()->regname(opr);
2525       sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
2526       _scope_value_cache.at_put(cache_idx, sv);
2527     }
2528 
2529     // check if cached value is correct
2530     DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
2531 
2532     scope_values->append(sv);
2533     return 1;
2534 
2535 #ifdef X86
2536   } else if (opr->is_single_xmm()) {
2537     VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
2538     LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
2539 
2540     scope_values->append(sv);
2541     return 1;
2542 #endif
2543 
2544   } else if (opr->is_single_fpu()) {
2545 #ifdef X86
2546     // the exact location of fpu stack values is only known
2547     // during fpu stack allocation, so the stack allocator object
2548     // must be present
2549     assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2550     assert(_fpu_stack_allocator != NULL, "must be present");
2551     opr = _fpu_stack_allocator->to_fpu_stack(opr);
2552 #endif
2553 
2554     Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal;
2555     VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr());
2556     LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
2557 
2558     scope_values->append(sv);
2559     return 1;
2560 
2561   } else {
2562     // double-size operands
2563 
2564     ScopeValue* first;
2565     ScopeValue* second;
2566 
2567     if (opr->is_double_stack()) {
2568 #ifdef _LP64
2569       Location loc1;
2570       Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl;
2571       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) {
2572         bailout("too large frame");
2573       }
2574       // Does this reverse on x86 vs. sparc?
2575       first =  new LocationValue(loc1);
2576       second = &_int_0_scope_value;
2577 #else
2578       Location loc1, loc2;
2579       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
2580         bailout("too large frame");
2581       }
2582       first =  new LocationValue(loc1);
2583       second = new LocationValue(loc2);
2584 #endif // _LP64
2585 
2586     } else if (opr->is_double_cpu()) {
2587 #ifdef _LP64
2588       VMReg rname_first = opr->as_register_lo()->as_VMReg();
2589       first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
2590       second = &_int_0_scope_value;
2591 #else
2592       VMReg rname_first = opr->as_register_lo()->as_VMReg();
2593       VMReg rname_second = opr->as_register_hi()->as_VMReg();
2594 
2595       if (hi_word_offset_in_bytes < lo_word_offset_in_bytes) {
2596         // lo/hi and swapped relative to first and second, so swap them
2597         VMReg tmp = rname_first;
2598         rname_first = rname_second;
2599         rname_second = tmp;
2600       }
2601 
2602       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2603       second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2604 #endif //_LP64
2605 
2606 
2607 #ifdef X86
2608     } else if (opr->is_double_xmm()) {
2609       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
2610       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
2611       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2612       // %%% This is probably a waste but we'll keep things as they were for now
2613       if (true) {
2614         VMReg rname_second = rname_first->next();
2615         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2616       }
2617 #endif
2618 
2619     } else if (opr->is_double_fpu()) {
2620       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2621       // the double as float registers in the native ordering. On X86,
2622       // fpu_regnrLo is a FPU stack slot whose VMReg represents
2623       // the low-order word of the double and fpu_regnrLo + 1 is the
2624       // name for the other half.  *first and *second must represent the
2625       // least and most significant words, respectively.
2626 
2627 #ifdef X86
2628       // the exact location of fpu stack values is only known
2629       // during fpu stack allocation, so the stack allocator object
2630       // must be present
2631       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2632       assert(_fpu_stack_allocator != NULL, "must be present");
2633       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2634 
2635       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2636 #endif
2637 #ifdef SPARC
2638       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2639 #endif
2640 
2641       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2642 
2643       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2644       // %%% This is probably a waste but we'll keep things as they were for now
2645       if (true) {
2646         VMReg rname_second = rname_first->next();
2647         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2648       }
2649 
2650     } else {
2651       ShouldNotReachHere();
2652       first = NULL;
2653       second = NULL;
2654     }
2655 
2656     assert(first != NULL && second != NULL, "must be set");
2657     // The convention the interpreter uses is that the second local
2658     // holds the first raw word of the native double representation.
2659     // This is actually reasonable, since locals and stack arrays
2660     // grow downwards in all implementations.
2661     // (If, on some machine, the interpreter's Java locals or stack
2662     // were to grow upwards, the embedded doubles would be word-swapped.)
2663     scope_values->append(second);
2664     scope_values->append(first);
2665     return 2;
2666   }
2667 }
2668 
2669 
2670 int LinearScan::append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values) {
2671   if (value != NULL) {
2672     LIR_Opr opr = value->operand();
2673     Constant* con = value->as_Constant();
2674 
2675     assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands (or illegal if constant is optimized away)");
2676     assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
2677 
2678     if (con != NULL && !con->is_pinned() && !opr->is_constant()) {
2679       // Unpinned constants may have a virtual operand for a part of the lifetime
2680       // or may be illegal when it was optimized away,
2681       // so always use a constant operand
2682       opr = LIR_OprFact::value_type(con->type());
2683     }
2684     assert(opr->is_virtual() || opr->is_constant(), "other cases not allowed here");
2685 
2686     if (opr->is_virtual()) {
2687       LIR_OpVisitState::OprMode mode = LIR_OpVisitState::inputMode;
2688 
2689       BlockBegin* block = block_of_op_with_id(op_id);
2690       if (block->number_of_sux() == 1 && op_id == block->last_lir_instruction_id()) {
2691         // generating debug information for the last instruction of a block.
2692         // if this instruction is a branch, spill moves are inserted before this branch
2693         // and so the wrong operand would be returned (spill moves at block boundaries are not
2694         // considered in the live ranges of intervals)
2695         // Solution: use the first op_id of the branch target block instead.
2696         if (block->lir()->instructions_list()->last()->as_OpBranch() != NULL) {
2697           if (block->live_out().at(opr->vreg_number())) {
2698             op_id = block->sux_at(0)->first_lir_instruction_id();
2699             mode = LIR_OpVisitState::outputMode;
2700           }
2701         }
2702       }
2703 
2704       // Get current location of operand
2705       // The operand must be live because debug information is considered when building the intervals
2706       // if the interval is not live, color_lir_opr will cause an assertion failure
2707       opr = color_lir_opr(opr, op_id, mode);
2708       assert(!has_call(op_id) || opr->is_stack() || !is_caller_save(reg_num(opr)), "can not have caller-save register operands at calls");
2709 
2710       // Append to ScopeValue array
2711       return append_scope_value_for_operand(opr, scope_values);
2712 
2713     } else {
2714       assert(value->as_Constant() != NULL, "all other instructions have only virtual operands");
2715       assert(opr->is_constant(), "operand must be constant");
2716 
2717       return append_scope_value_for_constant(opr, scope_values);
2718     }
2719   } else {
2720     // append a dummy value because real value not needed
2721     scope_values->append(&_illegal_value);
2722     return 1;
2723   }
2724 }
2725 
2726 
2727 IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state, int cur_bci, int stack_end, int locks_end) {
2728   IRScopeDebugInfo* caller_debug_info = NULL;
2729   int stack_begin, locks_begin;
2730 
2731   ValueStack* caller_state = cur_scope->caller_state();
2732   if (caller_state != NULL) {
2733     // process recursively to compute outermost scope first
2734     stack_begin = caller_state->stack_size();
2735     locks_begin = caller_state->locks_size();
2736     caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state, cur_scope->caller_bci(), stack_begin, locks_begin);
2737   } else {
2738     stack_begin = 0;
2739     locks_begin = 0;
2740   }
2741 
2742   // initialize these to null.
2743   // If we don't need deopt info or there are no locals, expressions or monitors,
2744   // then these get recorded as no information and avoids the allocation of 0 length arrays.
2745   GrowableArray<ScopeValue*>*   locals      = NULL;
2746   GrowableArray<ScopeValue*>*   expressions = NULL;
2747   GrowableArray<MonitorValue*>* monitors    = NULL;
2748 
2749   // describe local variable values
2750   int nof_locals = cur_scope->method()->max_locals();
2751   if (nof_locals > 0) {
2752     locals = new GrowableArray<ScopeValue*>(nof_locals);
2753 
2754     int pos = 0;
2755     while (pos < nof_locals) {
2756       assert(pos < cur_state->locals_size(), "why not?");
2757 
2758       Value local = cur_state->local_at(pos);
2759       pos += append_scope_value(op_id, local, locals);
2760 
2761       assert(locals->length() == pos, "must match");
2762     }
2763     assert(locals->length() == cur_scope->method()->max_locals(), "wrong number of locals");
2764     assert(locals->length() == cur_state->locals_size(), "wrong number of locals");
2765   }
2766 
2767 
2768   // describe expression stack
2769   //
2770   // When we inline methods containing exception handlers, the
2771   // "lock_stacks" are changed to preserve expression stack values
2772   // in caller scopes when exception handlers are present. This
2773   // can cause callee stacks to be smaller than caller stacks.
2774   if (stack_end > innermost_state->stack_size()) {
2775     stack_end = innermost_state->stack_size();
2776   }
2777 
2778 
2779 
2780   int nof_stack = stack_end - stack_begin;
2781   if (nof_stack > 0) {
2782     expressions = new GrowableArray<ScopeValue*>(nof_stack);
2783 
2784     int pos = stack_begin;
2785     while (pos < stack_end) {
2786       Value expression = innermost_state->stack_at_inc(pos);
2787       append_scope_value(op_id, expression, expressions);
2788 
2789       assert(expressions->length() + stack_begin == pos, "must match");
2790     }
2791   }
2792 
2793   // describe monitors
2794   assert(locks_begin <= locks_end, "error in scope iteration");
2795   int nof_locks = locks_end - locks_begin;
2796   if (nof_locks > 0) {
2797     monitors = new GrowableArray<MonitorValue*>(nof_locks);
2798     for (int i = locks_begin; i < locks_end; i++) {
2799       monitors->append(location_for_monitor_index(i));
2800     }
2801   }
2802 
2803   return new IRScopeDebugInfo(cur_scope, cur_bci, locals, expressions, monitors, caller_debug_info);
2804 }
2805 
2806 
2807 void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
2808   if (!compilation()->needs_debug_information()) {
2809     return;
2810   }
2811   TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
2812 
2813   IRScope* innermost_scope = info->scope();
2814   ValueStack* innermost_state = info->stack();
2815 
2816   assert(innermost_scope != NULL && innermost_state != NULL, "why is it missing?");
2817 
2818   int stack_end = innermost_state->stack_size();
2819   int locks_end = innermost_state->locks_size();
2820 
2821   DEBUG_ONLY(check_stack_depth(info, stack_end));
2822 
2823   if (info->_scope_debug_info == NULL) {
2824     // compute debug information
2825     info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state, info->bci(), stack_end, locks_end);
2826   } else {
2827     // debug information already set. Check that it is correct from the current point of view
2828     DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state, info->bci(), stack_end, locks_end)));
2829   }
2830 }
2831 
2832 
2833 void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
2834   LIR_OpVisitState visitor;
2835   int num_inst = instructions->length();
2836   bool has_dead = false;
2837 
2838   for (int j = 0; j < num_inst; j++) {
2839     LIR_Op* op = instructions->at(j);
2840     if (op == NULL) {  // this can happen when spill-moves are removed in eliminate_spill_moves
2841       has_dead = true;
2842       continue;
2843     }
2844     int op_id = op->id();
2845 
2846     // visit instruction to get list of operands
2847     visitor.visit(op);
2848 
2849     // iterate all modes of the visitor and process all virtual operands
2850     for_each_visitor_mode(mode) {
2851       int n = visitor.opr_count(mode);
2852       for (int k = 0; k < n; k++) {
2853         LIR_Opr opr = visitor.opr_at(mode, k);
2854         if (opr->is_virtual_register()) {
2855           visitor.set_opr_at(mode, k, color_lir_opr(opr, op_id, mode));
2856         }
2857       }
2858     }
2859 
2860     if (visitor.info_count() > 0) {
2861       // exception handling
2862       if (compilation()->has_exception_handlers()) {
2863         XHandlers* xhandlers = visitor.all_xhandler();
2864         int n = xhandlers->length();
2865         for (int k = 0; k < n; k++) {
2866           XHandler* handler = xhandlers->handler_at(k);
2867           if (handler->entry_code() != NULL) {
2868             assign_reg_num(handler->entry_code()->instructions_list(), NULL);
2869           }
2870         }
2871       } else {
2872         assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
2873       }
2874 
2875       // compute oop map
2876       assert(iw != NULL, "needed for compute_oop_map");
2877       compute_oop_map(iw, visitor, op);
2878 
2879       // compute debug information
2880       if (!use_fpu_stack_allocation()) {
2881         // compute debug information if fpu stack allocation is not needed.
2882         // when fpu stack allocation is needed, the debug information can not
2883         // be computed here because the exact location of fpu operands is not known
2884         // -> debug information is created inside the fpu stack allocator
2885         int n = visitor.info_count();
2886         for (int k = 0; k < n; k++) {
2887           compute_debug_info(visitor.info_at(k), op_id);
2888         }
2889       }
2890     }
2891 
2892 #ifdef ASSERT
2893     // make sure we haven't made the op invalid.
2894     op->verify();
2895 #endif
2896 
2897     // remove useless moves
2898     if (op->code() == lir_move) {
2899       assert(op->as_Op1() != NULL, "move must be LIR_Op1");
2900       LIR_Op1* move = (LIR_Op1*)op;
2901       LIR_Opr src = move->in_opr();
2902       LIR_Opr dst = move->result_opr();
2903       if (dst == src ||
2904           !dst->is_pointer() && !src->is_pointer() &&
2905           src->is_same_register(dst)) {
2906         instructions->at_put(j, NULL);
2907         has_dead = true;
2908       }
2909     }
2910   }
2911 
2912   if (has_dead) {
2913     // iterate all instructions of the block and remove all null-values.
2914     int insert_point = 0;
2915     for (int j = 0; j < num_inst; j++) {
2916       LIR_Op* op = instructions->at(j);
2917       if (op != NULL) {
2918         if (insert_point != j) {
2919           instructions->at_put(insert_point, op);
2920         }
2921         insert_point++;
2922       }
2923     }
2924     instructions->truncate(insert_point);
2925   }
2926 }
2927 
2928 void LinearScan::assign_reg_num() {
2929   TIME_LINEAR_SCAN(timer_assign_reg_num);
2930 
2931   init_compute_debug_info();
2932   IntervalWalker* iw = init_compute_oop_maps();
2933 
2934   int num_blocks = block_count();
2935   for (int i = 0; i < num_blocks; i++) {
2936     BlockBegin* block = block_at(i);
2937     assign_reg_num(block->lir()->instructions_list(), iw);
2938   }
2939 }
2940 
2941 
2942 void LinearScan::do_linear_scan() {
2943   NOT_PRODUCT(_total_timer.begin_method());
2944 
2945   number_instructions();
2946 
2947   NOT_PRODUCT(print_lir(1, "Before Register Allocation"));
2948 
2949   compute_local_live_sets();
2950   compute_global_live_sets();
2951   CHECK_BAILOUT();
2952 
2953   build_intervals();
2954   CHECK_BAILOUT();
2955   sort_intervals_before_allocation();
2956 
2957   NOT_PRODUCT(print_intervals("Before Register Allocation"));
2958   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_before_alloc));
2959 
2960   allocate_registers();
2961   CHECK_BAILOUT();
2962 
2963   resolve_data_flow();
2964   if (compilation()->has_exception_handlers()) {
2965     resolve_exception_handlers();
2966   }
2967   // fill in number of spill slots into frame_map
2968   propagate_spill_slots();
2969   CHECK_BAILOUT();
2970 
2971   NOT_PRODUCT(print_intervals("After Register Allocation"));
2972   NOT_PRODUCT(print_lir(2, "LIR after register allocation:"));
2973 
2974   sort_intervals_after_allocation();
2975 
2976   DEBUG_ONLY(verify());
2977 
2978   eliminate_spill_moves();
2979   assign_reg_num();
2980   CHECK_BAILOUT();
2981 
2982   NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:"));
2983   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign));
2984 
2985   { TIME_LINEAR_SCAN(timer_allocate_fpu_stack);
2986 
2987     if (use_fpu_stack_allocation()) {
2988       allocate_fpu_stack(); // Only has effect on Intel
2989       NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:"));
2990     }
2991   }
2992 
2993   { TIME_LINEAR_SCAN(timer_optimize_lir);
2994 
2995     EdgeMoveOptimizer::optimize(ir()->code());
2996     ControlFlowOptimizer::optimize(ir()->code());
2997     // check that cfg is still correct after optimizations
2998     ir()->verify();
2999   }
3000 
3001   NOT_PRODUCT(print_lir(1, "Before Code Generation", false));
3002   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final));
3003   NOT_PRODUCT(_total_timer.end_method(this));
3004 }
3005 
3006 
3007 // ********** Printing functions
3008 
3009 #ifndef PRODUCT
3010 
3011 void LinearScan::print_timers(double total) {
3012   _total_timer.print(total);
3013 }
3014 
3015 void LinearScan::print_statistics() {
3016   _stat_before_alloc.print("before allocation");
3017   _stat_after_asign.print("after assignment of register");
3018   _stat_final.print("after optimization");
3019 }
3020 
3021 void LinearScan::print_bitmap(BitMap& b) {
3022   for (unsigned int i = 0; i < b.size(); i++) {
3023     if (b.at(i)) tty->print("%d ", i);
3024   }
3025   tty->cr();
3026 }
3027 
3028 void LinearScan::print_intervals(const char* label) {
3029   if (TraceLinearScanLevel >= 1) {
3030     int i;
3031     tty->cr();
3032     tty->print_cr("%s", label);
3033 
3034     for (i = 0; i < interval_count(); i++) {
3035       Interval* interval = interval_at(i);
3036       if (interval != NULL) {
3037         interval->print();
3038       }
3039     }
3040 
3041     tty->cr();
3042     tty->print_cr("--- Basic Blocks ---");
3043     for (i = 0; i < block_count(); i++) {
3044       BlockBegin* block = block_at(i);
3045       tty->print("B%d [%d, %d, %d, %d] ", block->block_id(), block->first_lir_instruction_id(), block->last_lir_instruction_id(), block->loop_index(), block->loop_depth());
3046     }
3047     tty->cr();
3048     tty->cr();
3049   }
3050 
3051   if (PrintCFGToFile) {
3052     CFGPrinter::print_intervals(&_intervals, label);
3053   }
3054 }
3055 
3056 void LinearScan::print_lir(int level, const char* label, bool hir_valid) {
3057   if (TraceLinearScanLevel >= level) {
3058     tty->cr();
3059     tty->print_cr("%s", label);
3060     print_LIR(ir()->linear_scan_order());
3061     tty->cr();
3062   }
3063 
3064   if (level == 1 && PrintCFGToFile) {
3065     CFGPrinter::print_cfg(ir()->linear_scan_order(), label, hir_valid, true);
3066   }
3067 }
3068 
3069 #endif //PRODUCT
3070 
3071 
3072 // ********** verification functions for allocation
3073 // (check that all intervals have a correct register and that no registers are overwritten)
3074 #ifdef ASSERT
3075 
3076 void LinearScan::verify() {
3077   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying intervals ******************************************"));
3078   verify_intervals();
3079 
3080   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that no oops are in fixed intervals ****************"));
3081   verify_no_oops_in_fixed_intervals();
3082 
3083   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that unpinned constants are not alive across block boundaries"));
3084   verify_constants();
3085 
3086   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying register allocation ********************************"));
3087   verify_registers();
3088 
3089   TRACE_LINEAR_SCAN(2, tty->print_cr("********* no errors found **********************************************"));
3090 }
3091 
3092 void LinearScan::verify_intervals() {
3093   int len = interval_count();
3094   bool has_error = false;
3095 
3096   for (int i = 0; i < len; i++) {
3097     Interval* i1 = interval_at(i);
3098     if (i1 == NULL) continue;
3099 
3100     i1->check_split_children();
3101 
3102     if (i1->reg_num() != i) {
3103       tty->print_cr("Interval %d is on position %d in list", i1->reg_num(), i); i1->print(); tty->cr();
3104       has_error = true;
3105     }
3106 
3107     if (i1->reg_num() >= LIR_OprDesc::vreg_base && i1->type() == T_ILLEGAL) {
3108       tty->print_cr("Interval %d has no type assigned", i1->reg_num()); i1->print(); tty->cr();
3109       has_error = true;
3110     }
3111 
3112     if (i1->assigned_reg() == any_reg) {
3113       tty->print_cr("Interval %d has no register assigned", i1->reg_num()); i1->print(); tty->cr();
3114       has_error = true;
3115     }
3116 
3117     if (i1->assigned_reg() == i1->assigned_regHi()) {
3118       tty->print_cr("Interval %d: low and high register equal", i1->reg_num()); i1->print(); tty->cr();
3119       has_error = true;
3120     }
3121 
3122     if (!is_processed_reg_num(i1->assigned_reg())) {
3123       tty->print_cr("Can not have an Interval for an ignored register"); i1->print(); tty->cr();
3124       has_error = true;
3125     }
3126 
3127     if (i1->first() == Range::end()) {
3128       tty->print_cr("Interval %d has no Range", i1->reg_num()); i1->print(); tty->cr();
3129       has_error = true;
3130     }
3131 
3132     for (Range* r = i1->first(); r != Range::end(); r = r->next()) {
3133       if (r->from() >= r->to()) {
3134         tty->print_cr("Interval %d has zero length range", i1->reg_num()); i1->print(); tty->cr();
3135         has_error = true;
3136       }
3137     }
3138 
3139     for (int j = i + 1; j < len; j++) {
3140       Interval* i2 = interval_at(j);
3141       if (i2 == NULL) continue;
3142 
3143       // special intervals that are created in MoveResolver
3144       // -> ignore them because the range information has no meaning there
3145       if (i1->from() == 1 && i1->to() == 2) continue;
3146       if (i2->from() == 1 && i2->to() == 2) continue;
3147 
3148       int r1 = i1->assigned_reg();
3149       int r1Hi = i1->assigned_regHi();
3150       int r2 = i2->assigned_reg();
3151       int r2Hi = i2->assigned_regHi();
3152       if (i1->intersects(i2) && (r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi)))) {
3153         tty->print_cr("Intervals %d and %d overlap and have the same register assigned", i1->reg_num(), i2->reg_num());
3154         i1->print(); tty->cr();
3155         i2->print(); tty->cr();
3156         has_error = true;
3157       }
3158     }
3159   }
3160 
3161   assert(has_error == false, "register allocation invalid");
3162 }
3163 
3164 
3165 void LinearScan::verify_no_oops_in_fixed_intervals() {
3166   Interval* fixed_intervals;
3167   Interval* other_intervals;
3168   create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL);
3169 
3170   // to ensure a walking until the last instruction id, add a dummy interval
3171   // with a high operation id
3172   other_intervals = new Interval(any_reg);
3173   other_intervals->add_range(max_jint - 2, max_jint - 1);
3174   IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals);
3175 
3176   LIR_OpVisitState visitor;
3177   for (int i = 0; i < block_count(); i++) {
3178     BlockBegin* block = block_at(i);
3179 
3180     LIR_OpList* instructions = block->lir()->instructions_list();
3181 
3182     for (int j = 0; j < instructions->length(); j++) {
3183       LIR_Op* op = instructions->at(j);
3184       int op_id = op->id();
3185 
3186       visitor.visit(op);
3187 
3188       if (visitor.info_count() > 0) {
3189         iw->walk_before(op->id());
3190         bool check_live = true;
3191         if (op->code() == lir_move) {
3192           LIR_Op1* move = (LIR_Op1*)op;
3193           check_live = (move->patch_code() == lir_patch_none);
3194         }
3195         LIR_OpBranch* branch = op->as_OpBranch();
3196         if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) {
3197           // Don't bother checking the stub in this case since the
3198           // exception stub will never return to normal control flow.
3199           check_live = false;
3200         }
3201 
3202         // Make sure none of the fixed registers is live across an
3203         // oopmap since we can't handle that correctly.
3204         if (check_live) {
3205           for (Interval* interval = iw->active_first(fixedKind);
3206                interval != Interval::end();
3207                interval = interval->next()) {
3208             if (interval->current_to() > op->id() + 1) {
3209               // This interval is live out of this op so make sure
3210               // that this interval represents some value that's
3211               // referenced by this op either as an input or output.
3212               bool ok = false;
3213               for_each_visitor_mode(mode) {
3214                 int n = visitor.opr_count(mode);
3215                 for (int k = 0; k < n; k++) {
3216                   LIR_Opr opr = visitor.opr_at(mode, k);
3217                   if (opr->is_fixed_cpu()) {
3218                     if (interval_at(reg_num(opr)) == interval) {
3219                       ok = true;
3220                       break;
3221                     }
3222                     int hi = reg_numHi(opr);
3223                     if (hi != -1 && interval_at(hi) == interval) {
3224                       ok = true;
3225                       break;
3226                     }
3227                   }
3228                 }
3229               }
3230               assert(ok, "fixed intervals should never be live across an oopmap point");
3231             }
3232           }
3233         }
3234       }
3235 
3236       // oop-maps at calls do not contain registers, so check is not needed
3237       if (!visitor.has_call()) {
3238 
3239         for_each_visitor_mode(mode) {
3240           int n = visitor.opr_count(mode);
3241           for (int k = 0; k < n; k++) {
3242             LIR_Opr opr = visitor.opr_at(mode, k);
3243 
3244             if (opr->is_fixed_cpu() && opr->is_oop()) {
3245               // operand is a non-virtual cpu register and contains an oop
3246               TRACE_LINEAR_SCAN(4, op->print_on(tty); tty->print("checking operand "); opr->print(); tty->cr());
3247 
3248               Interval* interval = interval_at(reg_num(opr));
3249               assert(interval != NULL, "no interval");
3250 
3251               if (mode == LIR_OpVisitState::inputMode) {
3252                 if (interval->to() >= op_id + 1) {
3253                   assert(interval->to() < op_id + 2 ||
3254                          interval->has_hole_between(op_id, op_id + 2),
3255                          "oop input operand live after instruction");
3256                 }
3257               } else if (mode == LIR_OpVisitState::outputMode) {
3258                 if (interval->from() <= op_id - 1) {
3259                   assert(interval->has_hole_between(op_id - 1, op_id),
3260                          "oop input operand live after instruction");
3261                 }
3262               }
3263             }
3264           }
3265         }
3266       }
3267     }
3268   }
3269 }
3270 
3271 
3272 void LinearScan::verify_constants() {
3273   int num_regs = num_virtual_regs();
3274   int size = live_set_size();
3275   int num_blocks = block_count();
3276 
3277   for (int i = 0; i < num_blocks; i++) {
3278     BlockBegin* block = block_at(i);
3279     BitMap live_at_edge = block->live_in();
3280 
3281     // visit all registers where the live_at_edge bit is set
3282     for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
3283       TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
3284 
3285       Value value = gen()->instruction_for_vreg(r);
3286 
3287       assert(value != NULL, "all intervals live across block boundaries must have Value");
3288       assert(value->operand()->is_register() && value->operand()->is_virtual(), "value must have virtual operand");
3289       assert(value->operand()->vreg_number() == r, "register number must match");
3290       // TKR assert(value->as_Constant() == NULL || value->is_pinned(), "only pinned constants can be alive accross block boundaries");
3291     }
3292   }
3293 }
3294 
3295 
3296 class RegisterVerifier: public StackObj {
3297  private:
3298   LinearScan*   _allocator;
3299   BlockList     _work_list;      // all blocks that must be processed
3300   IntervalsList _saved_states;   // saved information of previous check
3301 
3302   // simplified access to methods of LinearScan
3303   Compilation*  compilation() const              { return _allocator->compilation(); }
3304   Interval*     interval_at(int reg_num) const   { return _allocator->interval_at(reg_num); }
3305   int           reg_num(LIR_Opr opr) const       { return _allocator->reg_num(opr); }
3306 
3307   // currently, only registers are processed
3308   int           state_size()                     { return LinearScan::nof_regs; }
3309 
3310   // accessors
3311   IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); }
3312   void          set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); }
3313   void          add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); }
3314 
3315   // helper functions
3316   IntervalList* copy(IntervalList* input_state);
3317   void          state_put(IntervalList* input_state, int reg, Interval* interval);
3318   bool          check_state(IntervalList* input_state, int reg, Interval* interval);
3319 
3320   void process_block(BlockBegin* block);
3321   void process_xhandler(XHandler* xhandler, IntervalList* input_state);
3322   void process_successor(BlockBegin* block, IntervalList* input_state);
3323   void process_operations(LIR_List* ops, IntervalList* input_state);
3324 
3325  public:
3326   RegisterVerifier(LinearScan* allocator)
3327     : _allocator(allocator)
3328     , _work_list(16)
3329     , _saved_states(BlockBegin::number_of_blocks(), NULL)
3330   { }
3331 
3332   void verify(BlockBegin* start);
3333 };
3334 
3335 
3336 // entry function from LinearScan that starts the verification
3337 void LinearScan::verify_registers() {
3338   RegisterVerifier verifier(this);
3339   verifier.verify(block_at(0));
3340 }
3341 
3342 
3343 void RegisterVerifier::verify(BlockBegin* start) {
3344   // setup input registers (method arguments) for first block
3345   IntervalList* input_state = new IntervalList(state_size(), NULL);
3346   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3347   for (int n = 0; n < args->length(); n++) {
3348     LIR_Opr opr = args->at(n);
3349     if (opr->is_register()) {
3350       Interval* interval = interval_at(reg_num(opr));
3351 
3352       if (interval->assigned_reg() < state_size()) {
3353         input_state->at_put(interval->assigned_reg(), interval);
3354       }
3355       if (interval->assigned_regHi() != LinearScan::any_reg && interval->assigned_regHi() < state_size()) {
3356         input_state->at_put(interval->assigned_regHi(), interval);
3357       }
3358     }
3359   }
3360 
3361   set_state_for_block(start, input_state);
3362   add_to_work_list(start);
3363 
3364   // main loop for verification
3365   do {
3366     BlockBegin* block = _work_list.at(0);
3367     _work_list.remove_at(0);
3368 
3369     process_block(block);
3370   } while (!_work_list.is_empty());
3371 }
3372 
3373 void RegisterVerifier::process_block(BlockBegin* block) {
3374   TRACE_LINEAR_SCAN(2, tty->cr(); tty->print_cr("process_block B%d", block->block_id()));
3375 
3376   // must copy state because it is modified
3377   IntervalList* input_state = copy(state_for_block(block));
3378 
3379   if (TraceLinearScanLevel >= 4) {
3380     tty->print_cr("Input-State of intervals:");
3381     tty->print("    ");
3382     for (int i = 0; i < state_size(); i++) {
3383       if (input_state->at(i) != NULL) {
3384         tty->print(" %4d", input_state->at(i)->reg_num());
3385       } else {
3386         tty->print("   __");
3387       }
3388     }
3389     tty->cr();
3390     tty->cr();
3391   }
3392 
3393   // process all operations of the block
3394   process_operations(block->lir(), input_state);
3395 
3396   // iterate all successors
3397   for (int i = 0; i < block->number_of_sux(); i++) {
3398     process_successor(block->sux_at(i), input_state);
3399   }
3400 }
3401 
3402 void RegisterVerifier::process_xhandler(XHandler* xhandler, IntervalList* input_state) {
3403   TRACE_LINEAR_SCAN(2, tty->print_cr("process_xhandler B%d", xhandler->entry_block()->block_id()));
3404 
3405   // must copy state because it is modified
3406   input_state = copy(input_state);
3407 
3408   if (xhandler->entry_code() != NULL) {
3409     process_operations(xhandler->entry_code(), input_state);
3410   }
3411   process_successor(xhandler->entry_block(), input_state);
3412 }
3413 
3414 void RegisterVerifier::process_successor(BlockBegin* block, IntervalList* input_state) {
3415   IntervalList* saved_state = state_for_block(block);
3416 
3417   if (saved_state != NULL) {
3418     // this block was already processed before.
3419     // check if new input_state is consistent with saved_state
3420 
3421     bool saved_state_correct = true;
3422     for (int i = 0; i < state_size(); i++) {
3423       if (input_state->at(i) != saved_state->at(i)) {
3424         // current input_state and previous saved_state assume a different
3425         // interval in this register -> assume that this register is invalid
3426         if (saved_state->at(i) != NULL) {
3427           // invalidate old calculation only if it assumed that
3428           // register was valid. when the register was already invalid,
3429           // then the old calculation was correct.
3430           saved_state_correct = false;
3431           saved_state->at_put(i, NULL);
3432 
3433           TRACE_LINEAR_SCAN(4, tty->print_cr("process_successor B%d: invalidating slot %d", block->block_id(), i));
3434         }
3435       }
3436     }
3437 
3438     if (saved_state_correct) {
3439       // already processed block with correct input_state
3440       TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: previous visit already correct", block->block_id()));
3441     } else {
3442       // must re-visit this block
3443       TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: must re-visit because input state changed", block->block_id()));
3444       add_to_work_list(block);
3445     }
3446 
3447   } else {
3448     // block was not processed before, so set initial input_state
3449     TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: initial visit", block->block_id()));
3450 
3451     set_state_for_block(block, copy(input_state));
3452     add_to_work_list(block);
3453   }
3454 }
3455 
3456 
3457 IntervalList* RegisterVerifier::copy(IntervalList* input_state) {
3458   IntervalList* copy_state = new IntervalList(input_state->length());
3459   copy_state->push_all(input_state);
3460   return copy_state;
3461 }
3462 
3463 void RegisterVerifier::state_put(IntervalList* input_state, int reg, Interval* interval) {
3464   if (reg != LinearScan::any_reg && reg < state_size()) {
3465     if (interval != NULL) {
3466       TRACE_LINEAR_SCAN(4, tty->print_cr("        reg[%d] = %d", reg, interval->reg_num()));
3467     } else if (input_state->at(reg) != NULL) {
3468       TRACE_LINEAR_SCAN(4, tty->print_cr("        reg[%d] = NULL", reg));
3469     }
3470 
3471     input_state->at_put(reg, interval);
3472   }
3473 }
3474 
3475 bool RegisterVerifier::check_state(IntervalList* input_state, int reg, Interval* interval) {
3476   if (reg != LinearScan::any_reg && reg < state_size()) {
3477     if (input_state->at(reg) != interval) {
3478       tty->print_cr("!! Error in register allocation: register %d does not contain interval %d", reg, interval->reg_num());
3479       return true;
3480     }
3481   }
3482   return false;
3483 }
3484 
3485 void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_state) {
3486   // visit all instructions of the block
3487   LIR_OpVisitState visitor;
3488   bool has_error = false;
3489 
3490   for (int i = 0; i < ops->length(); i++) {
3491     LIR_Op* op = ops->at(i);
3492     visitor.visit(op);
3493 
3494     TRACE_LINEAR_SCAN(4, op->print_on(tty));
3495 
3496     // check if input operands are correct
3497     int j;
3498     int n = visitor.opr_count(LIR_OpVisitState::inputMode);
3499     for (j = 0; j < n; j++) {
3500       LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, j);
3501       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3502         Interval* interval = interval_at(reg_num(opr));
3503         if (op->id() != -1) {
3504           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
3505         }
3506 
3507         has_error |= check_state(input_state, interval->assigned_reg(),   interval->split_parent());
3508         has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
3509 
3510         // When an operand is marked with is_last_use, then the fpu stack allocator
3511         // removes the register from the fpu stack -> the register contains no value
3512         if (opr->is_last_use()) {
3513           state_put(input_state, interval->assigned_reg(),   NULL);
3514           state_put(input_state, interval->assigned_regHi(), NULL);
3515         }
3516       }
3517     }
3518 
3519     // invalidate all caller save registers at calls
3520     if (visitor.has_call()) {
3521       for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) {
3522         state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
3523       }
3524       for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
3525         state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
3526       }
3527 
3528 #ifdef X86
3529       for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
3530         state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
3531       }
3532 #endif
3533     }
3534 
3535     // process xhandler before output and temp operands
3536     XHandlers* xhandlers = visitor.all_xhandler();
3537     n = xhandlers->length();
3538     for (int k = 0; k < n; k++) {
3539       process_xhandler(xhandlers->handler_at(k), input_state);
3540     }
3541 
3542     // set temp operands (some operations use temp operands also as output operands, so can't set them NULL)
3543     n = visitor.opr_count(LIR_OpVisitState::tempMode);
3544     for (j = 0; j < n; j++) {
3545       LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, j);
3546       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3547         Interval* interval = interval_at(reg_num(opr));
3548         if (op->id() != -1) {
3549           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::tempMode);
3550         }
3551 
3552         state_put(input_state, interval->assigned_reg(),   interval->split_parent());
3553         state_put(input_state, interval->assigned_regHi(), interval->split_parent());
3554       }
3555     }
3556 
3557     // set output operands
3558     n = visitor.opr_count(LIR_OpVisitState::outputMode);
3559     for (j = 0; j < n; j++) {
3560       LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, j);
3561       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3562         Interval* interval = interval_at(reg_num(opr));
3563         if (op->id() != -1) {
3564           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::outputMode);
3565         }
3566 
3567         state_put(input_state, interval->assigned_reg(),   interval->split_parent());
3568         state_put(input_state, interval->assigned_regHi(), interval->split_parent());
3569       }
3570     }
3571   }
3572   assert(has_error == false, "Error in register allocation");
3573 }
3574 
3575 #endif // ASSERT
3576 
3577 
3578 
3579 // **** Implementation of MoveResolver ******************************
3580 
3581 MoveResolver::MoveResolver(LinearScan* allocator) :
3582   _allocator(allocator),
3583   _multiple_reads_allowed(false),
3584   _mapping_from(8),
3585   _mapping_from_opr(8),
3586   _mapping_to(8),
3587   _insert_list(NULL),
3588   _insert_idx(-1),
3589   _insertion_buffer()
3590 {
3591   for (int i = 0; i < LinearScan::nof_regs; i++) {
3592     _register_blocked[i] = 0;
3593   }
3594   DEBUG_ONLY(check_empty());
3595 }
3596 
3597 
3598 #ifdef ASSERT
3599 
3600 void MoveResolver::check_empty() {
3601   assert(_mapping_from.length() == 0 && _mapping_from_opr.length() == 0 && _mapping_to.length() == 0, "list must be empty before and after processing");
3602   for (int i = 0; i < LinearScan::nof_regs; i++) {
3603     assert(register_blocked(i) == 0, "register map must be empty before and after processing");
3604   }
3605   assert(_multiple_reads_allowed == false, "must have default value");
3606 }
3607 
3608 void MoveResolver::verify_before_resolve() {
3609   assert(_mapping_from.length() == _mapping_from_opr.length(), "length must be equal");
3610   assert(_mapping_from.length() == _mapping_to.length(), "length must be equal");
3611   assert(_insert_list != NULL && _insert_idx != -1, "insert position not set");
3612 
3613   int i, j;
3614   if (!_multiple_reads_allowed) {
3615     for (i = 0; i < _mapping_from.length(); i++) {
3616       for (j = i + 1; j < _mapping_from.length(); j++) {
3617         assert(_mapping_from.at(i) == NULL || _mapping_from.at(i) != _mapping_from.at(j), "cannot read from same interval twice");
3618       }
3619     }
3620   }
3621 
3622   for (i = 0; i < _mapping_to.length(); i++) {
3623     for (j = i + 1; j < _mapping_to.length(); j++) {
3624       assert(_mapping_to.at(i) != _mapping_to.at(j), "cannot write to same interval twice");
3625     }
3626   }
3627 
3628 
3629   BitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills());
3630   used_regs.clear();
3631   if (!_multiple_reads_allowed) {
3632     for (i = 0; i < _mapping_from.length(); i++) {
3633       Interval* it = _mapping_from.at(i);
3634       if (it != NULL) {
3635         assert(!used_regs.at(it->assigned_reg()), "cannot read from same register twice");
3636         used_regs.set_bit(it->assigned_reg());
3637 
3638         if (it->assigned_regHi() != LinearScan::any_reg) {
3639           assert(!used_regs.at(it->assigned_regHi()), "cannot read from same register twice");
3640           used_regs.set_bit(it->assigned_regHi());
3641         }
3642       }
3643     }
3644   }
3645 
3646   used_regs.clear();
3647   for (i = 0; i < _mapping_to.length(); i++) {
3648     Interval* it = _mapping_to.at(i);
3649     assert(!used_regs.at(it->assigned_reg()), "cannot write to same register twice");
3650     used_regs.set_bit(it->assigned_reg());
3651 
3652     if (it->assigned_regHi() != LinearScan::any_reg) {
3653       assert(!used_regs.at(it->assigned_regHi()), "cannot write to same register twice");
3654       used_regs.set_bit(it->assigned_regHi());
3655     }
3656   }
3657 
3658   used_regs.clear();
3659   for (i = 0; i < _mapping_from.length(); i++) {
3660     Interval* it = _mapping_from.at(i);
3661     if (it != NULL && it->assigned_reg() >= LinearScan::nof_regs) {
3662       used_regs.set_bit(it->assigned_reg());
3663     }
3664   }
3665   for (i = 0; i < _mapping_to.length(); i++) {
3666     Interval* it = _mapping_to.at(i);
3667     assert(!used_regs.at(it->assigned_reg()) || it->assigned_reg() == _mapping_from.at(i)->assigned_reg(), "stack slots used in _mapping_from must be disjoint to _mapping_to");
3668   }
3669 }
3670 
3671 #endif // ASSERT
3672 
3673 
3674 // mark assigned_reg and assigned_regHi of the interval as blocked
3675 void MoveResolver::block_registers(Interval* it) {
3676   int reg = it->assigned_reg();
3677   if (reg < LinearScan::nof_regs) {
3678     assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used");
3679     set_register_blocked(reg, 1);
3680   }
3681   reg = it->assigned_regHi();
3682   if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
3683     assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used");
3684     set_register_blocked(reg, 1);
3685   }
3686 }
3687 
3688 // mark assigned_reg and assigned_regHi of the interval as unblocked
3689 void MoveResolver::unblock_registers(Interval* it) {
3690   int reg = it->assigned_reg();
3691   if (reg < LinearScan::nof_regs) {
3692     assert(register_blocked(reg) > 0, "register already marked as unused");
3693     set_register_blocked(reg, -1);
3694   }
3695   reg = it->assigned_regHi();
3696   if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
3697     assert(register_blocked(reg) > 0, "register already marked as unused");
3698     set_register_blocked(reg, -1);
3699   }
3700 }
3701 
3702 // check if assigned_reg and assigned_regHi of the to-interval are not blocked (or only blocked by from)
3703 bool MoveResolver::save_to_process_move(Interval* from, Interval* to) {
3704   int from_reg = -1;
3705   int from_regHi = -1;
3706   if (from != NULL) {
3707     from_reg = from->assigned_reg();
3708     from_regHi = from->assigned_regHi();
3709   }
3710 
3711   int reg = to->assigned_reg();
3712   if (reg < LinearScan::nof_regs) {
3713     if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) {
3714       return false;
3715     }
3716   }
3717   reg = to->assigned_regHi();
3718   if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
3719     if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) {
3720       return false;
3721     }
3722   }
3723 
3724   return true;
3725 }
3726 
3727 
3728 void MoveResolver::create_insertion_buffer(LIR_List* list) {
3729   assert(!_insertion_buffer.initialized(), "overwriting existing buffer");
3730   _insertion_buffer.init(list);
3731 }
3732 
3733 void MoveResolver::append_insertion_buffer() {
3734   if (_insertion_buffer.initialized()) {
3735     _insertion_buffer.lir_list()->append(&_insertion_buffer);
3736   }
3737   assert(!_insertion_buffer.initialized(), "must be uninitialized now");
3738 
3739   _insert_list = NULL;
3740   _insert_idx = -1;
3741 }
3742 
3743 void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) {
3744   assert(from_interval->reg_num() != to_interval->reg_num(), "from and to interval equal");
3745   assert(from_interval->type() == to_interval->type(), "move between different types");
3746   assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
3747   assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
3748 
3749   LIR_Opr from_opr = LIR_OprFact::virtual_register(from_interval->reg_num(), from_interval->type());
3750   LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type());
3751 
3752   if (!_multiple_reads_allowed) {
3753     // the last_use flag is an optimization for FPU stack allocation. When the same
3754     // input interval is used in more than one move, then it is too difficult to determine
3755     // if this move is really the last use.
3756     from_opr = from_opr->make_last_use();
3757   }
3758   _insertion_buffer.move(_insert_idx, from_opr, to_opr);
3759 
3760   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: inserted move from register %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
3761 }
3762 
3763 void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) {
3764   assert(from_opr->type() == to_interval->type(), "move between different types");
3765   assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
3766   assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
3767 
3768   LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type());
3769   _insertion_buffer.move(_insert_idx, from_opr, to_opr);
3770 
3771   TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr("  to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
3772 }
3773 
3774 
3775 void MoveResolver::resolve_mappings() {
3776   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx));
3777   DEBUG_ONLY(verify_before_resolve());
3778 
3779   // Block all registers that are used as input operands of a move.
3780   // When a register is blocked, no move to this register is emitted.
3781   // This is necessary for detecting cycles in moves.
3782   int i;
3783   for (i = _mapping_from.length() - 1; i >= 0; i--) {
3784     Interval* from_interval = _mapping_from.at(i);
3785     if (from_interval != NULL) {
3786       block_registers(from_interval);
3787     }
3788   }
3789 
3790   int spill_candidate = -1;
3791   while (_mapping_from.length() > 0) {
3792     bool processed_interval = false;
3793 
3794     for (i = _mapping_from.length() - 1; i >= 0; i--) {
3795       Interval* from_interval = _mapping_from.at(i);
3796       Interval* to_interval = _mapping_to.at(i);
3797 
3798       if (save_to_process_move(from_interval, to_interval)) {
3799         // this inverval can be processed because target is free
3800         if (from_interval != NULL) {
3801           insert_move(from_interval, to_interval);
3802           unblock_registers(from_interval);
3803         } else {
3804           insert_move(_mapping_from_opr.at(i), to_interval);
3805         }
3806         _mapping_from.remove_at(i);
3807         _mapping_from_opr.remove_at(i);
3808         _mapping_to.remove_at(i);
3809 
3810         processed_interval = true;
3811       } else if (from_interval != NULL && from_interval->assigned_reg() < LinearScan::nof_regs) {
3812         // this interval cannot be processed now because target is not free
3813         // it starts in a register, so it is a possible candidate for spilling
3814         spill_candidate = i;
3815       }
3816     }
3817 
3818     if (!processed_interval) {
3819       // no move could be processed because there is a cycle in the move list
3820       // (e.g. r1 -> r2, r2 -> r1), so one interval must be spilled to memory
3821       assert(spill_candidate != -1, "no interval in register for spilling found");
3822 
3823       // create a new spill interval and assign a stack slot to it
3824       Interval* from_interval = _mapping_from.at(spill_candidate);
3825       Interval* spill_interval = new Interval(-1);
3826       spill_interval->set_type(from_interval->type());
3827 
3828       // add a dummy range because real position is difficult to calculate
3829       // Note: this range is a special case when the integrity of the allocation is checked
3830       spill_interval->add_range(1, 2);
3831 
3832       //       do not allocate a new spill slot for temporary interval, but
3833       //       use spill slot assigned to from_interval. Otherwise moves from
3834       //       one stack slot to another can happen (not allowed by LIR_Assembler
3835       int spill_slot = from_interval->canonical_spill_slot();
3836       if (spill_slot < 0) {
3837         spill_slot = allocator()->allocate_spill_slot(type2spill_size[spill_interval->type()] == 2);
3838         from_interval->set_canonical_spill_slot(spill_slot);
3839       }
3840       spill_interval->assign_reg(spill_slot);
3841       allocator()->append_interval(spill_interval);
3842 
3843       TRACE_LINEAR_SCAN(4, tty->print_cr("created new Interval %d for spilling", spill_interval->reg_num()));
3844 
3845       // insert a move from register to stack and update the mapping
3846       insert_move(from_interval, spill_interval);
3847       _mapping_from.at_put(spill_candidate, spill_interval);
3848       unblock_registers(from_interval);
3849     }
3850   }
3851 
3852   // reset to default value
3853   _multiple_reads_allowed = false;
3854 
3855   // check that all intervals have been processed
3856   DEBUG_ONLY(check_empty());
3857 }
3858 
3859 
3860 void MoveResolver::set_insert_position(LIR_List* insert_list, int insert_idx) {
3861   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: setting insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx));
3862   assert(_insert_list == NULL && _insert_idx == -1, "use move_insert_position instead of set_insert_position when data already set");
3863 
3864   create_insertion_buffer(insert_list);
3865   _insert_list = insert_list;
3866   _insert_idx = insert_idx;
3867 }
3868 
3869 void MoveResolver::move_insert_position(LIR_List* insert_list, int insert_idx) {
3870   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: moving insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx));
3871 
3872   if (_insert_list != NULL && (insert_list != _insert_list || insert_idx != _insert_idx)) {
3873     // insert position changed -> resolve current mappings
3874     resolve_mappings();
3875   }
3876 
3877   if (insert_list != _insert_list) {
3878     // block changed -> append insertion_buffer because it is
3879     // bound to a specific block and create a new insertion_buffer
3880     append_insertion_buffer();
3881     create_insertion_buffer(insert_list);
3882   }
3883 
3884   _insert_list = insert_list;
3885   _insert_idx = insert_idx;
3886 }
3887 
3888 void MoveResolver::add_mapping(Interval* from_interval, Interval* to_interval) {
3889   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: adding mapping from %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
3890 
3891   _mapping_from.append(from_interval);
3892   _mapping_from_opr.append(LIR_OprFact::illegalOpr);
3893   _mapping_to.append(to_interval);
3894 }
3895 
3896 
3897 void MoveResolver::add_mapping(LIR_Opr from_opr, Interval* to_interval) {
3898   TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: adding mapping from "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
3899   assert(from_opr->is_constant(), "only for constants");
3900 
3901   _mapping_from.append(NULL);
3902   _mapping_from_opr.append(from_opr);
3903   _mapping_to.append(to_interval);
3904 }
3905 
3906 void MoveResolver::resolve_and_append_moves() {
3907   if (has_mappings()) {
3908     resolve_mappings();
3909   }
3910   append_insertion_buffer();
3911 }
3912 
3913 
3914 
3915 // **** Implementation of Range *************************************
3916 
3917 Range::Range(int from, int to, Range* next) :
3918   _from(from),
3919   _to(to),
3920   _next(next)
3921 {
3922 }
3923 
3924 // initialize sentinel
3925 Range* Range::_end = NULL;
3926 void Range::initialize() {
3927   _end = new Range(max_jint, max_jint, NULL);
3928 }
3929 
3930 int Range::intersects_at(Range* r2) const {
3931   const Range* r1 = this;
3932 
3933   assert(r1 != NULL && r2 != NULL, "null ranges not allowed");
3934   assert(r1 != _end && r2 != _end, "empty ranges not allowed");
3935 
3936   do {
3937     if (r1->from() < r2->from()) {
3938       if (r1->to() <= r2->from()) {
3939         r1 = r1->next(); if (r1 == _end) return -1;
3940       } else {
3941         return r2->from();
3942       }
3943     } else if (r2->from() < r1->from()) {
3944       if (r2->to() <= r1->from()) {
3945         r2 = r2->next(); if (r2 == _end) return -1;
3946       } else {
3947         return r1->from();
3948       }
3949     } else { // r1->from() == r2->from()
3950       if (r1->from() == r1->to()) {
3951         r1 = r1->next(); if (r1 == _end) return -1;
3952       } else if (r2->from() == r2->to()) {
3953         r2 = r2->next(); if (r2 == _end) return -1;
3954       } else {
3955         return r1->from();
3956       }
3957     }
3958   } while (true);
3959 }
3960 
3961 #ifndef PRODUCT
3962 void Range::print(outputStream* out) const {
3963   out->print("[%d, %d[ ", _from, _to);
3964 }
3965 #endif
3966 
3967 
3968 
3969 // **** Implementation of Interval **********************************
3970 
3971 // initialize sentinel
3972 Interval* Interval::_end = NULL;
3973 void Interval::initialize() {
3974   Range::initialize();
3975   _end = new Interval(-1);
3976 }
3977 
3978 Interval::Interval(int reg_num) :
3979   _reg_num(reg_num),
3980   _type(T_ILLEGAL),
3981   _first(Range::end()),
3982   _use_pos_and_kinds(12),
3983   _current(Range::end()),
3984   _next(_end),
3985   _state(invalidState),
3986   _assigned_reg(LinearScan::any_reg),
3987   _assigned_regHi(LinearScan::any_reg),
3988   _cached_to(-1),
3989   _cached_opr(LIR_OprFact::illegalOpr),
3990   _cached_vm_reg(VMRegImpl::Bad()),
3991   _split_children(0),
3992   _canonical_spill_slot(-1),
3993   _insert_move_when_activated(false),
3994   _register_hint(NULL),
3995   _spill_state(noDefinitionFound),
3996   _spill_definition_pos(-1)
3997 {
3998   _split_parent = this;
3999   _current_split_child = this;
4000 }
4001 
4002 int Interval::calc_to() {
4003   assert(_first != Range::end(), "interval has no range");
4004 
4005   Range* r = _first;
4006   while (r->next() != Range::end()) {
4007     r = r->next();
4008   }
4009   return r->to();
4010 }
4011 
4012 
4013 #ifdef ASSERT
4014 // consistency check of split-children
4015 void Interval::check_split_children() {
4016   if (_split_children.length() > 0) {
4017     assert(is_split_parent(), "only split parents can have children");
4018 
4019     for (int i = 0; i < _split_children.length(); i++) {
4020       Interval* i1 = _split_children.at(i);
4021 
4022       assert(i1->split_parent() == this, "not a split child of this interval");
4023       assert(i1->type() == type(), "must be equal for all split children");
4024       assert(i1->canonical_spill_slot() == canonical_spill_slot(), "must be equal for all split children");
4025 
4026       for (int j = i + 1; j < _split_children.length(); j++) {
4027         Interval* i2 = _split_children.at(j);
4028 
4029         assert(i1->reg_num() != i2->reg_num(), "same register number");
4030 
4031         if (i1->from() < i2->from()) {
4032           assert(i1->to() <= i2->from() && i1->to() < i2->to(), "intervals overlapping");
4033         } else {
4034           assert(i2->from() < i1->from(), "intervals start at same op_id");
4035           assert(i2->to() <= i1->from() && i2->to() < i1->to(), "intervals overlapping");
4036         }
4037       }
4038     }
4039   }
4040 }
4041 #endif // ASSERT
4042 
4043 Interval* Interval::register_hint(bool search_split_child) const {
4044   if (!search_split_child) {
4045     return _register_hint;
4046   }
4047 
4048   if (_register_hint != NULL) {
4049     assert(_register_hint->is_split_parent(), "ony split parents are valid hint registers");
4050 
4051     if (_register_hint->assigned_reg() >= 0 && _register_hint->assigned_reg() < LinearScan::nof_regs) {
4052       return _register_hint;
4053 
4054     } else if (_register_hint->_split_children.length() > 0) {
4055       // search the first split child that has a register assigned
4056       int len = _register_hint->_split_children.length();
4057       for (int i = 0; i < len; i++) {
4058         Interval* cur = _register_hint->_split_children.at(i);
4059 
4060         if (cur->assigned_reg() >= 0 && cur->assigned_reg() < LinearScan::nof_regs) {
4061           return cur;
4062         }
4063       }
4064     }
4065   }
4066 
4067   // no hint interval found that has a register assigned
4068   return NULL;
4069 }
4070 
4071 
4072 Interval* Interval::split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode) {
4073   assert(is_split_parent(), "can only be called for split parents");
4074   assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)");
4075 
4076   Interval* result;
4077   if (_split_children.length() == 0) {
4078     result = this;
4079   } else {
4080     result = NULL;
4081     int len = _split_children.length();
4082 
4083     // in outputMode, the end of the interval (op_id == cur->to()) is not valid
4084     int to_offset = (mode == LIR_OpVisitState::outputMode ? 0 : 1);
4085 
4086     int i;
4087     for (i = 0; i < len; i++) {
4088       Interval* cur = _split_children.at(i);
4089       if (cur->from() <= op_id && op_id < cur->to() + to_offset) {
4090         if (i > 0) {
4091           // exchange current split child to start of list (faster access for next call)
4092           _split_children.at_put(i, _split_children.at(0));
4093           _split_children.at_put(0, cur);
4094         }
4095 
4096         // interval found
4097         result = cur;
4098         break;
4099       }
4100     }
4101 
4102 #ifdef ASSERT
4103     for (i = 0; i < len; i++) {
4104       Interval* tmp = _split_children.at(i);
4105       if (tmp != result && tmp->from() <= op_id && op_id < tmp->to() + to_offset) {
4106         tty->print_cr("two valid result intervals found for op_id %d: %d and %d", op_id, result->reg_num(), tmp->reg_num());
4107         result->print();
4108         tmp->print();
4109         assert(false, "two valid result intervals found");
4110       }
4111     }
4112 #endif
4113   }
4114 
4115   assert(result != NULL, "no matching interval found");
4116   assert(result->covers(op_id, mode), "op_id not covered by interval");
4117 
4118   return result;
4119 }
4120 
4121 
4122 // returns the last split child that ends before the given op_id
4123 Interval* Interval::split_child_before_op_id(int op_id) {
4124   assert(op_id >= 0, "invalid op_id");
4125 
4126   Interval* parent = split_parent();
4127   Interval* result = NULL;
4128 
4129   int len = parent->_split_children.length();
4130   assert(len > 0, "no split children available");
4131 
4132   for (int i = len - 1; i >= 0; i--) {
4133     Interval* cur = parent->_split_children.at(i);
4134     if (cur->to() <= op_id && (result == NULL || result->to() < cur->to())) {
4135       result = cur;
4136     }
4137   }
4138 
4139   assert(result != NULL, "no split child found");
4140   return result;
4141 }
4142 
4143 
4144 // checks if op_id is covered by any split child
4145 bool Interval::split_child_covers(int op_id, LIR_OpVisitState::OprMode mode) {
4146   assert(is_split_parent(), "can only be called for split parents");
4147   assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)");
4148 
4149   if (_split_children.length() == 0) {
4150     // simple case if interval was not split
4151     return covers(op_id, mode);
4152 
4153   } else {
4154     // extended case: check all split children
4155     int len = _split_children.length();
4156     for (int i = 0; i < len; i++) {
4157       Interval* cur = _split_children.at(i);
4158       if (cur->covers(op_id, mode)) {
4159         return true;
4160       }
4161     }
4162     return false;
4163   }
4164 }
4165 
4166 
4167 // Note: use positions are sorted descending -> first use has highest index
4168 int Interval::first_usage(IntervalUseKind min_use_kind) const {
4169   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4170 
4171   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4172     if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4173       return _use_pos_and_kinds.at(i);
4174     }
4175   }
4176   return max_jint;
4177 }
4178 
4179 int Interval::next_usage(IntervalUseKind min_use_kind, int from) const {
4180   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4181 
4182   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4183     if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4184       return _use_pos_and_kinds.at(i);
4185     }
4186   }
4187   return max_jint;
4188 }
4189 
4190 int Interval::next_usage_exact(IntervalUseKind exact_use_kind, int from) const {
4191   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4192 
4193   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4194     if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) == exact_use_kind) {
4195       return _use_pos_and_kinds.at(i);
4196     }
4197   }
4198   return max_jint;
4199 }
4200 
4201 int Interval::previous_usage(IntervalUseKind min_use_kind, int from) const {
4202   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4203 
4204   int prev = 0;
4205   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4206     if (_use_pos_and_kinds.at(i) > from) {
4207       return prev;
4208     }
4209     if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4210       prev = _use_pos_and_kinds.at(i);
4211     }
4212   }
4213   return prev;
4214 }
4215 
4216 void Interval::add_use_pos(int pos, IntervalUseKind use_kind) {
4217   assert(covers(pos, LIR_OpVisitState::inputMode), "use position not covered by live range");
4218 
4219   // do not add use positions for precolored intervals because
4220   // they are never used
4221   if (use_kind != noUse && reg_num() >= LIR_OprDesc::vreg_base) {
4222 #ifdef ASSERT
4223     assert(_use_pos_and_kinds.length() % 2 == 0, "must be");
4224     for (int i = 0; i < _use_pos_and_kinds.length(); i += 2) {
4225       assert(pos <= _use_pos_and_kinds.at(i), "already added a use-position with lower position");
4226       assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4227       if (i > 0) {
4228         assert(_use_pos_and_kinds.at(i) < _use_pos_and_kinds.at(i - 2), "not sorted descending");
4229       }
4230     }
4231 #endif
4232 
4233     // Note: add_use is called in descending order, so list gets sorted
4234     //       automatically by just appending new use positions
4235     int len = _use_pos_and_kinds.length();
4236     if (len == 0 || _use_pos_and_kinds.at(len - 2) > pos) {
4237       _use_pos_and_kinds.append(pos);
4238       _use_pos_and_kinds.append(use_kind);
4239     } else if (_use_pos_and_kinds.at(len - 1) < use_kind) {
4240       assert(_use_pos_and_kinds.at(len - 2) == pos, "list not sorted correctly");
4241       _use_pos_and_kinds.at_put(len - 1, use_kind);
4242     }
4243   }
4244 }
4245 
4246 void Interval::add_range(int from, int to) {
4247   assert(from < to, "invalid range");
4248   assert(first() == Range::end() || to < first()->next()->from(), "not inserting at begin of interval");
4249   assert(from <= first()->to(), "not inserting at begin of interval");
4250 
4251   if (first()->from() <= to) {
4252     // join intersecting ranges
4253     first()->set_from(MIN2(from, first()->from()));
4254     first()->set_to  (MAX2(to,   first()->to()));
4255   } else {
4256     // insert new range
4257     _first = new Range(from, to, first());
4258   }
4259 }
4260 
4261 Interval* Interval::new_split_child() {
4262   // allocate new interval
4263   Interval* result = new Interval(-1);
4264   result->set_type(type());
4265 
4266   Interval* parent = split_parent();
4267   result->_split_parent = parent;
4268   result->set_register_hint(parent);
4269 
4270   // insert new interval in children-list of parent
4271   if (parent->_split_children.length() == 0) {
4272     assert(is_split_parent(), "list must be initialized at first split");
4273 
4274     parent->_split_children = IntervalList(4);
4275     parent->_split_children.append(this);
4276   }
4277   parent->_split_children.append(result);
4278 
4279   return result;
4280 }
4281 
4282 // split this interval at the specified position and return
4283 // the remainder as a new interval.
4284 //
4285 // when an interval is split, a bi-directional link is established between the original interval
4286 // (the split parent) and the intervals that are split off this interval (the split children)
4287 // When a split child is split again, the new created interval is also a direct child
4288 // of the original parent (there is no tree of split children stored, but a flat list)
4289 // All split children are spilled to the same stack slot (stored in _canonical_spill_slot)
4290 //
4291 // Note: The new interval has no valid reg_num
4292 Interval* Interval::split(int split_pos) {
4293   assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals");
4294 
4295   // allocate new interval
4296   Interval* result = new_split_child();
4297 
4298   // split the ranges
4299   Range* prev = NULL;
4300   Range* cur = _first;
4301   while (cur != Range::end() && cur->to() <= split_pos) {
4302     prev = cur;
4303     cur = cur->next();
4304   }
4305   assert(cur != Range::end(), "split interval after end of last range");
4306 
4307   if (cur->from() < split_pos) {
4308     result->_first = new Range(split_pos, cur->to(), cur->next());
4309     cur->set_to(split_pos);
4310     cur->set_next(Range::end());
4311 
4312   } else {
4313     assert(prev != NULL, "split before start of first range");
4314     result->_first = cur;
4315     prev->set_next(Range::end());
4316   }
4317   result->_current = result->_first;
4318   _cached_to = -1; // clear cached value
4319 
4320   // split list of use positions
4321   int total_len = _use_pos_and_kinds.length();
4322   int start_idx = total_len - 2;
4323   while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) {
4324     start_idx -= 2;
4325   }
4326 
4327   intStack new_use_pos_and_kinds(total_len - start_idx);
4328   int i;
4329   for (i = start_idx + 2; i < total_len; i++) {
4330     new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i));
4331   }
4332 
4333   _use_pos_and_kinds.truncate(start_idx + 2);
4334   result->_use_pos_and_kinds = _use_pos_and_kinds;
4335   _use_pos_and_kinds = new_use_pos_and_kinds;
4336 
4337 #ifdef ASSERT
4338   assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4339   assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4340   assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries");
4341 
4342   for (i = 0; i < _use_pos_and_kinds.length(); i += 2) {
4343     assert(_use_pos_and_kinds.at(i) < split_pos, "must be");
4344     assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4345   }
4346   for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) {
4347     assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be");
4348     assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4349   }
4350 #endif
4351 
4352   return result;
4353 }
4354 
4355 // split this interval at the specified position and return
4356 // the head as a new interval (the original interval is the tail)
4357 //
4358 // Currently, only the first range can be split, and the new interval
4359 // must not have split positions
4360 Interval* Interval::split_from_start(int split_pos) {
4361   assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals");
4362   assert(split_pos > from() && split_pos < to(), "can only split inside interval");
4363   assert(split_pos > _first->from() && split_pos <= _first->to(), "can only split inside first range");
4364   assert(first_usage(noUse) > split_pos, "can not split when use positions are present");
4365 
4366   // allocate new interval
4367   Interval* result = new_split_child();
4368 
4369   // the new created interval has only one range (checked by assertion above),
4370   // so the splitting of the ranges is very simple
4371   result->add_range(_first->from(), split_pos);
4372 
4373   if (split_pos == _first->to()) {
4374     assert(_first->next() != Range::end(), "must not be at end");
4375     _first = _first->next();
4376   } else {
4377     _first->set_from(split_pos);
4378   }
4379 
4380   return result;
4381 }
4382 
4383 
4384 // returns true if the op_id is inside the interval
4385 bool Interval::covers(int op_id, LIR_OpVisitState::OprMode mode) const {
4386   Range* cur  = _first;
4387 
4388   while (cur != Range::end() && cur->to() < op_id) {
4389     cur = cur->next();
4390   }
4391   if (cur != Range::end()) {
4392     assert(cur->to() != cur->next()->from(), "ranges not separated");
4393 
4394     if (mode == LIR_OpVisitState::outputMode) {
4395       return cur->from() <= op_id && op_id < cur->to();
4396     } else {
4397       return cur->from() <= op_id && op_id <= cur->to();
4398     }
4399   }
4400   return false;
4401 }
4402 
4403 // returns true if the interval has any hole between hole_from and hole_to
4404 // (even if the hole has only the length 1)
4405 bool Interval::has_hole_between(int hole_from, int hole_to) {
4406   assert(hole_from < hole_to, "check");
4407   assert(from() <= hole_from && hole_to <= to(), "index out of interval");
4408 
4409   Range* cur  = _first;
4410   while (cur != Range::end()) {
4411     assert(cur->to() < cur->next()->from(), "no space between ranges");
4412 
4413     // hole-range starts before this range -> hole
4414     if (hole_from < cur->from()) {
4415       return true;
4416 
4417     // hole-range completely inside this range -> no hole
4418     } else if (hole_to <= cur->to()) {
4419       return false;
4420 
4421     // overlapping of hole-range with this range -> hole
4422     } else if (hole_from <= cur->to()) {
4423       return true;
4424     }
4425 
4426     cur = cur->next();
4427   }
4428 
4429   return false;
4430 }
4431 
4432 
4433 #ifndef PRODUCT
4434 void Interval::print(outputStream* out) const {
4435   const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
4436   const char* UseKind2Name[] = { "N", "L", "S", "M" };
4437 
4438   const char* type_name;
4439   LIR_Opr opr = LIR_OprFact::illegal();
4440   if (reg_num() < LIR_OprDesc::vreg_base) {
4441     type_name = "fixed";
4442     // need a temporary operand for fixed intervals because type() cannot be called
4443     if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
4444       opr = LIR_OprFact::single_cpu(assigned_reg());
4445     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
4446       opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
4447 #ifdef X86
4448     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
4449       opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
4450 #endif
4451     } else {
4452       ShouldNotReachHere();
4453     }
4454   } else {
4455     type_name = type2name(type());
4456     if (assigned_reg() != -1) {
4457       opr = LinearScan::calc_operand_for_interval(this);
4458     }
4459   }
4460 
4461   out->print("%d %s ", reg_num(), type_name);
4462   if (opr->is_valid()) {
4463     out->print("\"");
4464     opr->print(out);
4465     out->print("\" ");
4466   }
4467   out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
4468 
4469   // print ranges
4470   Range* cur = _first;
4471   while (cur != Range::end()) {
4472     cur->print(out);
4473     cur = cur->next();
4474     assert(cur != NULL, "range list not closed with range sentinel");
4475   }
4476 
4477   // print use positions
4478   int prev = 0;
4479   assert(_use_pos_and_kinds.length() % 2 == 0, "must be");
4480   for (int i =_use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4481     assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4482     assert(prev < _use_pos_and_kinds.at(i), "use positions not sorted");
4483 
4484     out->print("%d %s ", _use_pos_and_kinds.at(i), UseKind2Name[_use_pos_and_kinds.at(i + 1)]);
4485     prev = _use_pos_and_kinds.at(i);
4486   }
4487 
4488   out->print(" \"%s\"", SpillState2Name[spill_state()]);
4489   out->cr();
4490 }
4491 #endif
4492 
4493 
4494 
4495 // **** Implementation of IntervalWalker ****************************
4496 
4497 IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
4498  : _compilation(allocator->compilation())
4499  , _allocator(allocator)
4500 {
4501   _unhandled_first[fixedKind] = unhandled_fixed_first;
4502   _unhandled_first[anyKind]   = unhandled_any_first;
4503   _active_first[fixedKind]    = Interval::end();
4504   _inactive_first[fixedKind]  = Interval::end();
4505   _active_first[anyKind]      = Interval::end();
4506   _inactive_first[anyKind]    = Interval::end();
4507   _current_position = -1;
4508   _current = NULL;
4509   next_interval();
4510 }
4511 
4512 
4513 // append interval at top of list
4514 void IntervalWalker::append_unsorted(Interval** list, Interval* interval) {
4515   interval->set_next(*list); *list = interval;
4516 }
4517 
4518 
4519 // append interval in order of current range from()
4520 void IntervalWalker::append_sorted(Interval** list, Interval* interval) {
4521   Interval* prev = NULL;
4522   Interval* cur  = *list;
4523   while (cur->current_from() < interval->current_from()) {
4524     prev = cur; cur = cur->next();
4525   }
4526   if (prev == NULL) {
4527     *list = interval;
4528   } else {
4529     prev->set_next(interval);
4530   }
4531   interval->set_next(cur);
4532 }
4533 
4534 void IntervalWalker::append_to_unhandled(Interval** list, Interval* interval) {
4535   assert(interval->from() >= current()->current_from(), "cannot append new interval before current walk position");
4536 
4537   Interval* prev = NULL;
4538   Interval* cur  = *list;
4539   while (cur->from() < interval->from() || (cur->from() == interval->from() && cur->first_usage(noUse) < interval->first_usage(noUse))) {
4540     prev = cur; cur = cur->next();
4541   }
4542   if (prev == NULL) {
4543     *list = interval;
4544   } else {
4545     prev->set_next(interval);
4546   }
4547   interval->set_next(cur);
4548 }
4549 
4550 
4551 inline bool IntervalWalker::remove_from_list(Interval** list, Interval* i) {
4552   while (*list != Interval::end() && *list != i) {
4553     list = (*list)->next_addr();
4554   }
4555   if (*list != Interval::end()) {
4556     assert(*list == i, "check");
4557     *list = (*list)->next();
4558     return true;
4559   } else {
4560     return false;
4561   }
4562 }
4563 
4564 void IntervalWalker::remove_from_list(Interval* i) {
4565   bool deleted;
4566 
4567   if (i->state() == activeState) {
4568     deleted = remove_from_list(active_first_addr(anyKind), i);
4569   } else {
4570     assert(i->state() == inactiveState, "invalid state");
4571     deleted = remove_from_list(inactive_first_addr(anyKind), i);
4572   }
4573 
4574   assert(deleted, "interval has not been found in list");
4575 }
4576 
4577 
4578 void IntervalWalker::walk_to(IntervalState state, int from) {
4579   assert (state == activeState || state == inactiveState, "wrong state");
4580   for_each_interval_kind(kind) {
4581     Interval** prev = state == activeState ? active_first_addr(kind) : inactive_first_addr(kind);
4582     Interval* next   = *prev;
4583     while (next->current_from() <= from) {
4584       Interval* cur = next;
4585       next = cur->next();
4586 
4587       bool range_has_changed = false;
4588       while (cur->current_to() <= from) {
4589         cur->next_range();
4590         range_has_changed = true;
4591       }
4592 
4593       // also handle move from inactive list to active list
4594       range_has_changed = range_has_changed || (state == inactiveState && cur->current_from() <= from);
4595 
4596       if (range_has_changed) {
4597         // remove cur from list
4598         *prev = next;
4599         if (cur->current_at_end()) {
4600           // move to handled state (not maintained as a list)
4601           cur->set_state(handledState);
4602           interval_moved(cur, kind, state, handledState);
4603         } else if (cur->current_from() <= from){
4604           // sort into active list
4605           append_sorted(active_first_addr(kind), cur);
4606           cur->set_state(activeState);
4607           if (*prev == cur) {
4608             assert(state == activeState, "check");
4609             prev = cur->next_addr();
4610           }
4611           interval_moved(cur, kind, state, activeState);
4612         } else {
4613           // sort into inactive list
4614           append_sorted(inactive_first_addr(kind), cur);
4615           cur->set_state(inactiveState);
4616           if (*prev == cur) {
4617             assert(state == inactiveState, "check");
4618             prev = cur->next_addr();
4619           }
4620           interval_moved(cur, kind, state, inactiveState);
4621         }
4622       } else {
4623         prev = cur->next_addr();
4624         continue;
4625       }
4626     }
4627   }
4628 }
4629 
4630 
4631 void IntervalWalker::next_interval() {
4632   IntervalKind kind;
4633   Interval* any   = _unhandled_first[anyKind];
4634   Interval* fixed = _unhandled_first[fixedKind];
4635 
4636   if (any != Interval::end()) {
4637     // intervals may start at same position -> prefer fixed interval
4638     kind = fixed != Interval::end() && fixed->from() <= any->from() ? fixedKind : anyKind;
4639 
4640     assert (kind == fixedKind && fixed->from() <= any->from() ||
4641             kind == anyKind   && any->from() <= fixed->from(), "wrong interval!!!");
4642     assert(any == Interval::end() || fixed == Interval::end() || any->from() != fixed->from() || kind == fixedKind, "if fixed and any-Interval start at same position, fixed must be processed first");
4643 
4644   } else if (fixed != Interval::end()) {
4645     kind = fixedKind;
4646   } else {
4647     _current = NULL; return;
4648   }
4649   _current_kind = kind;
4650   _current = _unhandled_first[kind];
4651   _unhandled_first[kind] = _current->next();
4652   _current->set_next(Interval::end());
4653   _current->rewind_range();
4654 }
4655 
4656 
4657 void IntervalWalker::walk_to(int lir_op_id) {
4658   assert(_current_position <= lir_op_id, "can not walk backwards");
4659   while (current() != NULL) {
4660     bool is_active = current()->from() <= lir_op_id;
4661     int id = is_active ? current()->from() : lir_op_id;
4662 
4663     TRACE_LINEAR_SCAN(2, if (_current_position < id) { tty->cr(); tty->print_cr("walk_to(%d) **************************************************************", id); })
4664 
4665     // set _current_position prior to call of walk_to
4666     _current_position = id;
4667 
4668     // call walk_to even if _current_position == id
4669     walk_to(activeState, id);
4670     walk_to(inactiveState, id);
4671 
4672     if (is_active) {
4673       current()->set_state(activeState);
4674       if (activate_current()) {
4675         append_sorted(active_first_addr(current_kind()), current());
4676         interval_moved(current(), current_kind(), unhandledState, activeState);
4677       }
4678 
4679       next_interval();
4680     } else {
4681       return;
4682     }
4683   }
4684 }
4685 
4686 void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to) {
4687 #ifndef PRODUCT
4688   if (TraceLinearScanLevel >= 4) {
4689     #define print_state(state) \
4690     switch(state) {\
4691       case unhandledState: tty->print("unhandled"); break;\
4692       case activeState: tty->print("active"); break;\
4693       case inactiveState: tty->print("inactive"); break;\
4694       case handledState: tty->print("handled"); break;\
4695       default: ShouldNotReachHere(); \
4696     }
4697 
4698     print_state(from); tty->print(" to "); print_state(to);
4699     tty->fill_to(23);
4700     interval->print();
4701 
4702     #undef print_state
4703   }
4704 #endif
4705 }
4706 
4707 
4708 
4709 // **** Implementation of LinearScanWalker **************************
4710 
4711 LinearScanWalker::LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
4712   : IntervalWalker(allocator, unhandled_fixed_first, unhandled_any_first)
4713   , _move_resolver(allocator)
4714 {
4715   for (int i = 0; i < LinearScan::nof_regs; i++) {
4716     _spill_intervals[i] = new IntervalList(2);
4717   }
4718 }
4719 
4720 
4721 inline void LinearScanWalker::init_use_lists(bool only_process_use_pos) {
4722   for (int i = _first_reg; i <= _last_reg; i++) {
4723     _use_pos[i] = max_jint;
4724 
4725     if (!only_process_use_pos) {
4726       _block_pos[i] = max_jint;
4727       _spill_intervals[i]->clear();
4728     }
4729   }
4730 }
4731 
4732 inline void LinearScanWalker::exclude_from_use(int reg) {
4733   assert(reg < LinearScan::nof_regs, "interval must have a register assigned (stack slots not allowed)");
4734   if (reg >= _first_reg && reg <= _last_reg) {
4735     _use_pos[reg] = 0;
4736   }
4737 }
4738 inline void LinearScanWalker::exclude_from_use(Interval* i) {
4739   assert(i->assigned_reg() != any_reg, "interval has no register assigned");
4740 
4741   exclude_from_use(i->assigned_reg());
4742   exclude_from_use(i->assigned_regHi());
4743 }
4744 
4745 inline void LinearScanWalker::set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos) {
4746   assert(use_pos != 0, "must use exclude_from_use to set use_pos to 0");
4747 
4748   if (reg >= _first_reg && reg <= _last_reg) {
4749     if (_use_pos[reg] > use_pos) {
4750       _use_pos[reg] = use_pos;
4751     }
4752     if (!only_process_use_pos) {
4753       _spill_intervals[reg]->append(i);
4754     }
4755   }
4756 }
4757 inline void LinearScanWalker::set_use_pos(Interval* i, int use_pos, bool only_process_use_pos) {
4758   assert(i->assigned_reg() != any_reg, "interval has no register assigned");
4759   if (use_pos != -1) {
4760     set_use_pos(i->assigned_reg(), i, use_pos, only_process_use_pos);
4761     set_use_pos(i->assigned_regHi(), i, use_pos, only_process_use_pos);
4762   }
4763 }
4764 
4765 inline void LinearScanWalker::set_block_pos(int reg, Interval* i, int block_pos) {
4766   if (reg >= _first_reg && reg <= _last_reg) {
4767     if (_block_pos[reg] > block_pos) {
4768       _block_pos[reg] = block_pos;
4769     }
4770     if (_use_pos[reg] > block_pos) {
4771       _use_pos[reg] = block_pos;
4772     }
4773   }
4774 }
4775 inline void LinearScanWalker::set_block_pos(Interval* i, int block_pos) {
4776   assert(i->assigned_reg() != any_reg, "interval has no register assigned");
4777   if (block_pos != -1) {
4778     set_block_pos(i->assigned_reg(), i, block_pos);
4779     set_block_pos(i->assigned_regHi(), i, block_pos);
4780   }
4781 }
4782 
4783 
4784 void LinearScanWalker::free_exclude_active_fixed() {
4785   Interval* list = active_first(fixedKind);
4786   while (list != Interval::end()) {
4787     assert(list->assigned_reg() < LinearScan::nof_regs, "active interval must have a register assigned");
4788     exclude_from_use(list);
4789     list = list->next();
4790   }
4791 }
4792 
4793 void LinearScanWalker::free_exclude_active_any() {
4794   Interval* list = active_first(anyKind);
4795   while (list != Interval::end()) {
4796     exclude_from_use(list);
4797     list = list->next();
4798   }
4799 }
4800 
4801 void LinearScanWalker::free_collect_inactive_fixed(Interval* cur) {
4802   Interval* list = inactive_first(fixedKind);
4803   while (list != Interval::end()) {
4804     if (cur->to() <= list->current_from()) {
4805       assert(list->current_intersects_at(cur) == -1, "must not intersect");
4806       set_use_pos(list, list->current_from(), true);
4807     } else {
4808       set_use_pos(list, list->current_intersects_at(cur), true);
4809     }
4810     list = list->next();
4811   }
4812 }
4813 
4814 void LinearScanWalker::free_collect_inactive_any(Interval* cur) {
4815   Interval* list = inactive_first(anyKind);
4816   while (list != Interval::end()) {
4817     set_use_pos(list, list->current_intersects_at(cur), true);
4818     list = list->next();
4819   }
4820 }
4821 
4822 void LinearScanWalker::free_collect_unhandled(IntervalKind kind, Interval* cur) {
4823   Interval* list = unhandled_first(kind);
4824   while (list != Interval::end()) {
4825     set_use_pos(list, list->intersects_at(cur), true);
4826     if (kind == fixedKind && cur->to() <= list->from()) {
4827       set_use_pos(list, list->from(), true);
4828     }
4829     list = list->next();
4830   }
4831 }
4832 
4833 void LinearScanWalker::spill_exclude_active_fixed() {
4834   Interval* list = active_first(fixedKind);
4835   while (list != Interval::end()) {
4836     exclude_from_use(list);
4837     list = list->next();
4838   }
4839 }
4840 
4841 void LinearScanWalker::spill_block_unhandled_fixed(Interval* cur) {
4842   Interval* list = unhandled_first(fixedKind);
4843   while (list != Interval::end()) {
4844     set_block_pos(list, list->intersects_at(cur));
4845     list = list->next();
4846   }
4847 }
4848 
4849 void LinearScanWalker::spill_block_inactive_fixed(Interval* cur) {
4850   Interval* list = inactive_first(fixedKind);
4851   while (list != Interval::end()) {
4852     if (cur->to() > list->current_from()) {
4853       set_block_pos(list, list->current_intersects_at(cur));
4854     } else {
4855       assert(list->current_intersects_at(cur) == -1, "invalid optimization: intervals intersect");
4856     }
4857 
4858     list = list->next();
4859   }
4860 }
4861 
4862 void LinearScanWalker::spill_collect_active_any() {
4863   Interval* list = active_first(anyKind);
4864   while (list != Interval::end()) {
4865     set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false);
4866     list = list->next();
4867   }
4868 }
4869 
4870 void LinearScanWalker::spill_collect_inactive_any(Interval* cur) {
4871   Interval* list = inactive_first(anyKind);
4872   while (list != Interval::end()) {
4873     if (list->current_intersects(cur)) {
4874       set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false);
4875     }
4876     list = list->next();
4877   }
4878 }
4879 
4880 
4881 void LinearScanWalker::insert_move(int op_id, Interval* src_it, Interval* dst_it) {
4882   // output all moves here. When source and target are equal, the move is
4883   // optimized away later in assign_reg_nums
4884 
4885   op_id = (op_id + 1) & ~1;
4886   BlockBegin* op_block = allocator()->block_of_op_with_id(op_id);
4887   assert(op_id > 0 && allocator()->block_of_op_with_id(op_id - 2) == op_block, "cannot insert move at block boundary");
4888 
4889   // calculate index of instruction inside instruction list of current block
4890   // the minimal index (for a block with no spill moves) can be calculated because the
4891   // numbering of instructions is known.
4892   // When the block already contains spill moves, the index must be increased until the
4893   // correct index is reached.
4894   LIR_OpList* list = op_block->lir()->instructions_list();
4895   int index = (op_id - list->at(0)->id()) / 2;
4896   assert(list->at(index)->id() <= op_id, "error in calculation");
4897 
4898   while (list->at(index)->id() != op_id) {
4899     index++;
4900     assert(0 <= index && index < list->length(), "index out of bounds");
4901   }
4902   assert(1 <= index && index < list->length(), "index out of bounds");
4903   assert(list->at(index)->id() == op_id, "error in calculation");
4904 
4905   // insert new instruction before instruction at position index
4906   _move_resolver.move_insert_position(op_block->lir(), index - 1);
4907   _move_resolver.add_mapping(src_it, dst_it);
4908 }
4909 
4910 
4911 int LinearScanWalker::find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos) {
4912   int from_block_nr = min_block->linear_scan_number();
4913   int to_block_nr = max_block->linear_scan_number();
4914 
4915   assert(0 <= from_block_nr && from_block_nr < block_count(), "out of range");
4916   assert(0 <= to_block_nr && to_block_nr < block_count(), "out of range");
4917   assert(from_block_nr < to_block_nr, "must cross block boundary");
4918 
4919   // Try to split at end of max_block. If this would be after
4920   // max_split_pos, then use the begin of max_block
4921   int optimal_split_pos = max_block->last_lir_instruction_id() + 2;
4922   if (optimal_split_pos > max_split_pos) {
4923     optimal_split_pos = max_block->first_lir_instruction_id();
4924   }
4925 
4926   int min_loop_depth = max_block->loop_depth();
4927   for (int i = to_block_nr - 1; i >= from_block_nr; i--) {
4928     BlockBegin* cur = block_at(i);
4929 
4930     if (cur->loop_depth() < min_loop_depth) {
4931       // block with lower loop-depth found -> split at the end of this block
4932       min_loop_depth = cur->loop_depth();
4933       optimal_split_pos = cur->last_lir_instruction_id() + 2;
4934     }
4935   }
4936   assert(optimal_split_pos > allocator()->max_lir_op_id() || allocator()->is_block_begin(optimal_split_pos), "algorithm must move split pos to block boundary");
4937 
4938   return optimal_split_pos;
4939 }
4940 
4941 
4942 int LinearScanWalker::find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization) {
4943   int optimal_split_pos = -1;
4944   if (min_split_pos == max_split_pos) {
4945     // trivial case, no optimization of split position possible
4946     TRACE_LINEAR_SCAN(4, tty->print_cr("      min-pos and max-pos are equal, no optimization possible"));
4947     optimal_split_pos = min_split_pos;
4948 
4949   } else {
4950     assert(min_split_pos < max_split_pos, "must be true then");
4951     assert(min_split_pos > 0, "cannot access min_split_pos - 1 otherwise");
4952 
4953     // reason for using min_split_pos - 1: when the minimal split pos is exactly at the
4954     // beginning of a block, then min_split_pos is also a possible split position.
4955     // Use the block before as min_block, because then min_block->last_lir_instruction_id() + 2 == min_split_pos
4956     BlockBegin* min_block = allocator()->block_of_op_with_id(min_split_pos - 1);
4957 
4958     // reason for using max_split_pos - 1: otherwise there would be an assertion failure
4959     // when an interval ends at the end of the last block of the method
4960     // (in this case, max_split_pos == allocator()->max_lir_op_id() + 2, and there is no
4961     // block at this op_id)
4962     BlockBegin* max_block = allocator()->block_of_op_with_id(max_split_pos - 1);
4963 
4964     assert(min_block->linear_scan_number() <= max_block->linear_scan_number(), "invalid order");
4965     if (min_block == max_block) {
4966       // split position cannot be moved to block boundary, so split as late as possible
4967       TRACE_LINEAR_SCAN(4, tty->print_cr("      cannot move split pos to block boundary because min_pos and max_pos are in same block"));
4968       optimal_split_pos = max_split_pos;
4969 
4970     } else if (it->has_hole_between(max_split_pos - 1, max_split_pos) && !allocator()->is_block_begin(max_split_pos)) {
4971       // Do not move split position if the interval has a hole before max_split_pos.
4972       // Intervals resulting from Phi-Functions have more than one definition (marked
4973       // as mustHaveRegister) with a hole before each definition. When the register is needed
4974       // for the second definition, an earlier reloading is unnecessary.
4975       TRACE_LINEAR_SCAN(4, tty->print_cr("      interval has hole just before max_split_pos, so splitting at max_split_pos"));
4976       optimal_split_pos = max_split_pos;
4977 
4978     } else {
4979       // seach optimal block boundary between min_split_pos and max_split_pos
4980       TRACE_LINEAR_SCAN(4, tty->print_cr("      moving split pos to optimal block boundary between block B%d and B%d", min_block->block_id(), max_block->block_id()));
4981 
4982       if (do_loop_optimization) {
4983         // Loop optimization: if a loop-end marker is found between min- and max-position,
4984         // then split before this loop
4985         int loop_end_pos = it->next_usage_exact(loopEndMarker, min_block->last_lir_instruction_id() + 2);
4986         TRACE_LINEAR_SCAN(4, tty->print_cr("      loop optimization: loop end found at pos %d", loop_end_pos));
4987 
4988         assert(loop_end_pos > min_split_pos, "invalid order");
4989         if (loop_end_pos < max_split_pos) {
4990           // loop-end marker found between min- and max-position
4991           // if it is not the end marker for the same loop as the min-position, then move
4992           // the max-position to this loop block.
4993           // Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading
4994           // of the interval (normally, only mustHaveRegister causes a reloading)
4995           BlockBegin* loop_block = allocator()->block_of_op_with_id(loop_end_pos);
4996 
4997           TRACE_LINEAR_SCAN(4, tty->print_cr("      interval is used in loop that ends in block B%d, so trying to move max_block back from B%d to B%d", loop_block->block_id(), max_block->block_id(), loop_block->block_id()));
4998           assert(loop_block != min_block, "loop_block and min_block must be different because block boundary is needed between");
4999 
5000           optimal_split_pos = find_optimal_split_pos(min_block, loop_block, loop_block->last_lir_instruction_id() + 2);
5001           if (optimal_split_pos == loop_block->last_lir_instruction_id() + 2) {
5002             optimal_split_pos = -1;
5003             TRACE_LINEAR_SCAN(4, tty->print_cr("      loop optimization not necessary"));
5004           } else {
5005             TRACE_LINEAR_SCAN(4, tty->print_cr("      loop optimization successful"));
5006           }
5007         }
5008       }
5009 
5010       if (optimal_split_pos == -1) {
5011         // not calculated by loop optimization
5012         optimal_split_pos = find_optimal_split_pos(min_block, max_block, max_split_pos);
5013       }
5014     }
5015   }
5016   TRACE_LINEAR_SCAN(4, tty->print_cr("      optimal split position: %d", optimal_split_pos));
5017 
5018   return optimal_split_pos;
5019 }
5020 
5021 
5022 /*
5023   split an interval at the optimal position between min_split_pos and
5024   max_split_pos in two parts:
5025   1) the left part has already a location assigned
5026   2) the right part is sorted into to the unhandled-list
5027 */
5028 void LinearScanWalker::split_before_usage(Interval* it, int min_split_pos, int max_split_pos) {
5029   TRACE_LINEAR_SCAN(2, tty->print   ("----- splitting interval: "); it->print());
5030   TRACE_LINEAR_SCAN(2, tty->print_cr("      between %d and %d", min_split_pos, max_split_pos));
5031 
5032   assert(it->from() < min_split_pos,         "cannot split at start of interval");
5033   assert(current_position() < min_split_pos, "cannot split before current position");
5034   assert(min_split_pos <= max_split_pos,     "invalid order");
5035   assert(max_split_pos <= it->to(),          "cannot split after end of interval");
5036 
5037   int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, true);
5038 
5039   assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range");
5040   assert(optimal_split_pos <= it->to(),  "cannot split after end of interval");
5041   assert(optimal_split_pos > it->from(), "cannot split at start of interval");
5042 
5043   if (optimal_split_pos == it->to() && it->next_usage(mustHaveRegister, min_split_pos) == max_jint) {
5044     // the split position would be just before the end of the interval
5045     // -> no split at all necessary
5046     TRACE_LINEAR_SCAN(4, tty->print_cr("      no split necessary because optimal split position is at end of interval"));
5047     return;
5048   }
5049 
5050   // must calculate this before the actual split is performed and before split position is moved to odd op_id
5051   bool move_necessary = !allocator()->is_block_begin(optimal_split_pos) && !it->has_hole_between(optimal_split_pos - 1, optimal_split_pos);
5052 
5053   if (!allocator()->is_block_begin(optimal_split_pos)) {
5054     // move position before actual instruction (odd op_id)
5055     optimal_split_pos = (optimal_split_pos - 1) | 1;
5056   }
5057 
5058   TRACE_LINEAR_SCAN(4, tty->print_cr("      splitting at position %d", optimal_split_pos));
5059   assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary");
5060   assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary");
5061 
5062   Interval* split_part = it->split(optimal_split_pos);
5063 
5064   allocator()->append_interval(split_part);
5065   allocator()->copy_register_flags(it, split_part);
5066   split_part->set_insert_move_when_activated(move_necessary);
5067   append_to_unhandled(unhandled_first_addr(anyKind), split_part);
5068 
5069   TRACE_LINEAR_SCAN(2, tty->print_cr("      split interval in two parts (insert_move_when_activated: %d)", move_necessary));
5070   TRACE_LINEAR_SCAN(2, tty->print   ("      "); it->print());
5071   TRACE_LINEAR_SCAN(2, tty->print   ("      "); split_part->print());
5072 }
5073 
5074 /*
5075   split an interval at the optimal position between min_split_pos and
5076   max_split_pos in two parts:
5077   1) the left part has already a location assigned
5078   2) the right part is always on the stack and therefore ignored in further processing
5079 */
5080 void LinearScanWalker::split_for_spilling(Interval* it) {
5081   // calculate allowed range of splitting position
5082   int max_split_pos = current_position();
5083   int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, max_split_pos) + 1, it->from());
5084 
5085   TRACE_LINEAR_SCAN(2, tty->print   ("----- splitting and spilling interval: "); it->print());
5086   TRACE_LINEAR_SCAN(2, tty->print_cr("      between %d and %d", min_split_pos, max_split_pos));
5087 
5088   assert(it->state() == activeState,     "why spill interval that is not active?");
5089   assert(it->from() <= min_split_pos,    "cannot split before start of interval");
5090   assert(min_split_pos <= max_split_pos, "invalid order");
5091   assert(max_split_pos < it->to(),       "cannot split at end end of interval");
5092   assert(current_position() < it->to(),  "interval must not end before current position");
5093 
5094   if (min_split_pos == it->from()) {
5095     // the whole interval is never used, so spill it entirely to memory
5096     TRACE_LINEAR_SCAN(2, tty->print_cr("      spilling entire interval because split pos is at beginning of interval"));
5097     assert(it->first_usage(shouldHaveRegister) > current_position(), "interval must not have use position before current_position");
5098 
5099     allocator()->assign_spill_slot(it);
5100     allocator()->change_spill_state(it, min_split_pos);
5101 
5102     // Also kick parent intervals out of register to memory when they have no use
5103     // position. This avoids short interval in register surrounded by intervals in
5104     // memory -> avoid useless moves from memory to register and back
5105     Interval* parent = it;
5106     while (parent != NULL && parent->is_split_child()) {
5107       parent = parent->split_child_before_op_id(parent->from());
5108 
5109       if (parent->assigned_reg() < LinearScan::nof_regs) {
5110         if (parent->first_usage(shouldHaveRegister) == max_jint) {
5111           // parent is never used, so kick it out of its assigned register
5112           TRACE_LINEAR_SCAN(4, tty->print_cr("      kicking out interval %d out of its register because it is never used", parent->reg_num()));
5113           allocator()->assign_spill_slot(parent);
5114         } else {
5115           // do not go further back because the register is actually used by the interval
5116           parent = NULL;
5117         }
5118       }
5119     }
5120 
5121   } else {
5122     // search optimal split pos, split interval and spill only the right hand part
5123     int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, false);
5124 
5125     assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range");
5126     assert(optimal_split_pos < it->to(), "cannot split at end of interval");
5127     assert(optimal_split_pos >= it->from(), "cannot split before start of interval");
5128 
5129     if (!allocator()->is_block_begin(optimal_split_pos)) {
5130       // move position before actual instruction (odd op_id)
5131       optimal_split_pos = (optimal_split_pos - 1) | 1;
5132     }
5133 
5134     TRACE_LINEAR_SCAN(4, tty->print_cr("      splitting at position %d", optimal_split_pos));
5135     assert(allocator()->is_block_begin(optimal_split_pos)  || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary");
5136     assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary");
5137 
5138     Interval* spilled_part = it->split(optimal_split_pos);
5139     allocator()->append_interval(spilled_part);
5140     allocator()->assign_spill_slot(spilled_part);
5141     allocator()->change_spill_state(spilled_part, optimal_split_pos);
5142 
5143     if (!allocator()->is_block_begin(optimal_split_pos)) {
5144       TRACE_LINEAR_SCAN(4, tty->print_cr("      inserting move from interval %d to %d", it->reg_num(), spilled_part->reg_num()));
5145       insert_move(optimal_split_pos, it, spilled_part);
5146     }
5147 
5148     // the current_split_child is needed later when moves are inserted for reloading
5149     assert(spilled_part->current_split_child() == it, "overwriting wrong current_split_child");
5150     spilled_part->make_current_split_child();
5151 
5152     TRACE_LINEAR_SCAN(2, tty->print_cr("      split interval in two parts"));
5153     TRACE_LINEAR_SCAN(2, tty->print   ("      "); it->print());
5154     TRACE_LINEAR_SCAN(2, tty->print   ("      "); spilled_part->print());
5155   }
5156 }
5157 
5158 
5159 void LinearScanWalker::split_stack_interval(Interval* it) {
5160   int min_split_pos = current_position() + 1;
5161   int max_split_pos = MIN2(it->first_usage(shouldHaveRegister), it->to());
5162 
5163   split_before_usage(it, min_split_pos, max_split_pos);
5164 }
5165 
5166 void LinearScanWalker::split_when_partial_register_available(Interval* it, int register_available_until) {
5167   int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, register_available_until), it->from() + 1);
5168   int max_split_pos = register_available_until;
5169 
5170   split_before_usage(it, min_split_pos, max_split_pos);
5171 }
5172 
5173 void LinearScanWalker::split_and_spill_interval(Interval* it) {
5174   assert(it->state() == activeState || it->state() == inactiveState, "other states not allowed");
5175 
5176   int current_pos = current_position();
5177   if (it->state() == inactiveState) {
5178     // the interval is currently inactive, so no spill slot is needed for now.
5179     // when the split part is activated, the interval has a new chance to get a register,
5180     // so in the best case no stack slot is necessary
5181     assert(it->has_hole_between(current_pos - 1, current_pos + 1), "interval can not be inactive otherwise");
5182     split_before_usage(it, current_pos + 1, current_pos + 1);
5183 
5184   } else {
5185     // search the position where the interval must have a register and split
5186     // at the optimal position before.
5187     // The new created part is added to the unhandled list and will get a register
5188     // when it is activated
5189     int min_split_pos = current_pos + 1;
5190     int max_split_pos = MIN2(it->next_usage(mustHaveRegister, min_split_pos), it->to());
5191 
5192     split_before_usage(it, min_split_pos, max_split_pos);
5193 
5194     assert(it->next_usage(mustHaveRegister, current_pos) == max_jint, "the remaining part is spilled to stack and therefore has no register");
5195     split_for_spilling(it);
5196   }
5197 }
5198 
5199 
5200 int LinearScanWalker::find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) {
5201   int min_full_reg = any_reg;
5202   int max_partial_reg = any_reg;
5203 
5204   for (int i = _first_reg; i <= _last_reg; i++) {
5205     if (i == ignore_reg) {
5206       // this register must be ignored
5207 
5208     } else if (_use_pos[i] >= interval_to) {
5209       // this register is free for the full interval
5210       if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) {
5211         min_full_reg = i;
5212       }
5213     } else if (_use_pos[i] > reg_needed_until) {
5214       // this register is at least free until reg_needed_until
5215       if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) {
5216         max_partial_reg = i;
5217       }
5218     }
5219   }
5220 
5221   if (min_full_reg != any_reg) {
5222     return min_full_reg;
5223   } else if (max_partial_reg != any_reg) {
5224     *need_split = true;
5225     return max_partial_reg;
5226   } else {
5227     return any_reg;
5228   }
5229 }
5230 
5231 int LinearScanWalker::find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) {
5232   assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm");
5233 
5234   int min_full_reg = any_reg;
5235   int max_partial_reg = any_reg;
5236 
5237   for (int i = _first_reg; i < _last_reg; i+=2) {
5238     if (_use_pos[i] >= interval_to && _use_pos[i + 1] >= interval_to) {
5239       // this register is free for the full interval
5240       if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) {
5241         min_full_reg = i;
5242       }
5243     } else if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) {
5244       // this register is at least free until reg_needed_until
5245       if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) {
5246         max_partial_reg = i;
5247       }
5248     }
5249   }
5250 
5251   if (min_full_reg != any_reg) {
5252     return min_full_reg;
5253   } else if (max_partial_reg != any_reg) {
5254     *need_split = true;
5255     return max_partial_reg;
5256   } else {
5257     return any_reg;
5258   }
5259 }
5260 
5261 
5262 bool LinearScanWalker::alloc_free_reg(Interval* cur) {
5263   TRACE_LINEAR_SCAN(2, tty->print("trying to find free register for "); cur->print());
5264 
5265   init_use_lists(true);
5266   free_exclude_active_fixed();
5267   free_exclude_active_any();
5268   free_collect_inactive_fixed(cur);
5269   free_collect_inactive_any(cur);
5270 //  free_collect_unhandled(fixedKind, cur);
5271   assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5272 
5273   // _use_pos contains the start of the next interval that has this register assigned
5274   // (either as a fixed register or a normal allocated register in the past)
5275   // only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely
5276   TRACE_LINEAR_SCAN(4, tty->print_cr("      state of registers:"));
5277   TRACE_LINEAR_SCAN(4, for (int i = _first_reg; i <= _last_reg; i++) tty->print_cr("      reg %d: use_pos: %d", i, _use_pos[i]));
5278 
5279   int hint_reg, hint_regHi;
5280   Interval* register_hint = cur->register_hint();
5281   if (register_hint != NULL) {
5282     hint_reg = register_hint->assigned_reg();
5283     hint_regHi = register_hint->assigned_regHi();
5284 
5285     if (allocator()->is_precolored_cpu_interval(register_hint)) {
5286       assert(hint_reg != any_reg && hint_regHi == any_reg, "must be for fixed intervals");
5287       hint_regHi = hint_reg + 1;  // connect e.g. eax-edx
5288     }
5289     TRACE_LINEAR_SCAN(4, tty->print("      hint registers %d, %d from interval ", hint_reg, hint_regHi); register_hint->print());
5290 
5291   } else {
5292     hint_reg = any_reg;
5293     hint_regHi = any_reg;
5294   }
5295   assert(hint_reg == any_reg || hint_reg != hint_regHi, "hint reg and regHi equal");
5296   assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned to interval");
5297 
5298   // the register must be free at least until this position
5299   int reg_needed_until = cur->from() + 1;
5300   int interval_to = cur->to();
5301 
5302   bool need_split = false;
5303   int split_pos = -1;
5304   int reg = any_reg;
5305   int regHi = any_reg;
5306 
5307   if (_adjacent_regs) {
5308     reg = find_free_double_reg(reg_needed_until, interval_to, hint_reg, &need_split);
5309     regHi = reg + 1;
5310     if (reg == any_reg) {
5311       return false;
5312     }
5313     split_pos = MIN2(_use_pos[reg], _use_pos[regHi]);
5314 
5315   } else {
5316     reg = find_free_reg(reg_needed_until, interval_to, hint_reg, any_reg, &need_split);
5317     if (reg == any_reg) {
5318       return false;
5319     }
5320     split_pos = _use_pos[reg];
5321 
5322     if (_num_phys_regs == 2) {
5323       regHi = find_free_reg(reg_needed_until, interval_to, hint_regHi, reg, &need_split);
5324 
5325       if (_use_pos[reg] < interval_to && regHi == any_reg) {
5326         // do not split interval if only one register can be assigned until the split pos
5327         // (when one register is found for the whole interval, split&spill is only
5328         // performed for the hi register)
5329         return false;
5330 
5331       } else if (regHi != any_reg) {
5332         split_pos = MIN2(split_pos, _use_pos[regHi]);
5333 
5334         // sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax
5335         if (reg > regHi) {
5336           int temp = reg;
5337           reg = regHi;
5338           regHi = temp;
5339         }
5340       }
5341     }
5342   }
5343 
5344   cur->assign_reg(reg, regHi);
5345   TRACE_LINEAR_SCAN(2, tty->print_cr("selected register %d, %d", reg, regHi));
5346 
5347   assert(split_pos > 0, "invalid split_pos");
5348   if (need_split) {
5349     // register not available for full interval, so split it
5350     split_when_partial_register_available(cur, split_pos);
5351   }
5352 
5353   // only return true if interval is completely assigned
5354   return _num_phys_regs == 1 || regHi != any_reg;
5355 }
5356 
5357 
5358 int LinearScanWalker::find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) {
5359   int max_reg = any_reg;
5360 
5361   for (int i = _first_reg; i <= _last_reg; i++) {
5362     if (i == ignore_reg) {
5363       // this register must be ignored
5364 
5365     } else if (_use_pos[i] > reg_needed_until) {
5366       if (max_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_reg] && max_reg != hint_reg)) {
5367         max_reg = i;
5368       }
5369     }
5370   }
5371 
5372   if (max_reg != any_reg && _block_pos[max_reg] <= interval_to) {
5373     *need_split = true;
5374   }
5375 
5376   return max_reg;
5377 }
5378 
5379 int LinearScanWalker::find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) {
5380   assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm");
5381 
5382   int max_reg = any_reg;
5383 
5384   for (int i = _first_reg; i < _last_reg; i+=2) {
5385     if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) {
5386       if (max_reg == any_reg || _use_pos[i] > _use_pos[max_reg]) {
5387         max_reg = i;
5388       }
5389     }
5390   }
5391 
5392   if (_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to) {
5393     *need_split = true;
5394   }
5395 
5396   return max_reg;
5397 }
5398 
5399 void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) {
5400   assert(reg != any_reg, "no register assigned");
5401 
5402   for (int i = 0; i < _spill_intervals[reg]->length(); i++) {
5403     Interval* it = _spill_intervals[reg]->at(i);
5404     remove_from_list(it);
5405     split_and_spill_interval(it);
5406   }
5407 
5408   if (regHi != any_reg) {
5409     IntervalList* processed = _spill_intervals[reg];
5410     for (int i = 0; i < _spill_intervals[regHi]->length(); i++) {
5411       Interval* it = _spill_intervals[regHi]->at(i);
5412       if (processed->index_of(it) == -1) {
5413         remove_from_list(it);
5414         split_and_spill_interval(it);
5415       }
5416     }
5417   }
5418 }
5419 
5420 
5421 // Split an Interval and spill it to memory so that cur can be placed in a register
5422 void LinearScanWalker::alloc_locked_reg(Interval* cur) {
5423   TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print());
5424 
5425   // collect current usage of registers
5426   init_use_lists(false);
5427   spill_exclude_active_fixed();
5428 //  spill_block_unhandled_fixed(cur);
5429   assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5430   spill_block_inactive_fixed(cur);
5431   spill_collect_active_any();
5432   spill_collect_inactive_any(cur);
5433 
5434 #ifndef PRODUCT
5435   if (TraceLinearScanLevel >= 4) {
5436     tty->print_cr("      state of registers:");
5437     for (int i = _first_reg; i <= _last_reg; i++) {
5438       tty->print("      reg %d: use_pos: %d, block_pos: %d, intervals: ", i, _use_pos[i], _block_pos[i]);
5439       for (int j = 0; j < _spill_intervals[i]->length(); j++) {
5440         tty->print("%d ", _spill_intervals[i]->at(j)->reg_num());
5441       }
5442       tty->cr();
5443     }
5444   }
5445 #endif
5446 
5447   // the register must be free at least until this position
5448   int reg_needed_until = MIN2(cur->first_usage(mustHaveRegister), cur->from() + 1);
5449   int interval_to = cur->to();
5450   assert (reg_needed_until > 0 && reg_needed_until < max_jint, "interval has no use");
5451 
5452   int split_pos = 0;
5453   int use_pos = 0;
5454   bool need_split = false;
5455   int reg, regHi;
5456 
5457   if (_adjacent_regs) {
5458     reg = find_locked_double_reg(reg_needed_until, interval_to, any_reg, &need_split);
5459     regHi = reg + 1;
5460 
5461     if (reg != any_reg) {
5462       use_pos = MIN2(_use_pos[reg], _use_pos[regHi]);
5463       split_pos = MIN2(_block_pos[reg], _block_pos[regHi]);
5464     }
5465   } else {
5466     reg = find_locked_reg(reg_needed_until, interval_to, any_reg, cur->assigned_reg(), &need_split);
5467     regHi = any_reg;
5468 
5469     if (reg != any_reg) {
5470       use_pos = _use_pos[reg];
5471       split_pos = _block_pos[reg];
5472 
5473       if (_num_phys_regs == 2) {
5474         if (cur->assigned_reg() != any_reg) {
5475           regHi = reg;
5476           reg = cur->assigned_reg();
5477         } else {
5478           regHi = find_locked_reg(reg_needed_until, interval_to, any_reg, reg, &need_split);
5479           if (regHi != any_reg) {
5480             use_pos = MIN2(use_pos, _use_pos[regHi]);
5481             split_pos = MIN2(split_pos, _block_pos[regHi]);
5482           }
5483         }
5484 
5485         if (regHi != any_reg && reg > regHi) {
5486           // sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax
5487           int temp = reg;
5488           reg = regHi;
5489           regHi = temp;
5490         }
5491       }
5492     }
5493   }
5494 
5495   if (reg == any_reg || (_num_phys_regs == 2 && regHi == any_reg) || use_pos <= cur->first_usage(mustHaveRegister)) {
5496     // the first use of cur is later than the spilling position -> spill cur
5497     TRACE_LINEAR_SCAN(4, tty->print_cr("able to spill current interval. first_usage(register): %d, use_pos: %d", cur->first_usage(mustHaveRegister), use_pos));
5498 
5499     if (cur->first_usage(mustHaveRegister) <= cur->from() + 1) {
5500       assert(false, "cannot spill interval that is used in first instruction (possible reason: no register found)");
5501       // assign a reasonable register and do a bailout in product mode to avoid errors
5502       allocator()->assign_spill_slot(cur);
5503       BAILOUT("LinearScan: no register found");
5504     }
5505 
5506     split_and_spill_interval(cur);
5507   } else {
5508     TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi));
5509     assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found");
5510     assert(split_pos > 0, "invalid split_pos");
5511     assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
5512 
5513     cur->assign_reg(reg, regHi);
5514     if (need_split) {
5515       // register not available for full interval, so split it
5516       split_when_partial_register_available(cur, split_pos);
5517     }
5518 
5519     // perform splitting and spilling for all affected intervalls
5520     split_and_spill_intersecting_intervals(reg, regHi);
5521   }
5522 }
5523 
5524 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
5525 #ifdef X86
5526   // fast calculation of intervals that can never get a register because the
5527   // the next instruction is a call that blocks all registers
5528   // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
5529 
5530   // check if this interval is the result of a split operation
5531   // (an interval got a register until this position)
5532   int pos = cur->from();
5533   if ((pos & 1) == 1) {
5534     // the current instruction is a call that blocks all registers
5535     if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) {
5536       TRACE_LINEAR_SCAN(4, tty->print_cr("      free register cannot be available because all registers blocked by following call"));
5537 
5538       // safety check that there is really no register available
5539       assert(alloc_free_reg(cur) == false, "found a register for this interval");
5540       return true;
5541     }
5542 
5543   }
5544 #endif
5545   return false;
5546 }
5547 
5548 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
5549   BasicType type = cur->type();
5550   _num_phys_regs = LinearScan::num_physical_regs(type);
5551   _adjacent_regs = LinearScan::requires_adjacent_regs(type);
5552 
5553   if (pd_init_regs_for_alloc(cur)) {
5554     // the appropriate register range was selected.
5555   } else if (type == T_FLOAT || type == T_DOUBLE) {
5556     _first_reg = pd_first_fpu_reg;
5557     _last_reg = pd_last_fpu_reg;
5558   } else {
5559     _first_reg = pd_first_cpu_reg;
5560     _last_reg = pd_last_cpu_reg;
5561   }
5562 
5563   assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
5564   assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
5565 }
5566 
5567 
5568 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
5569   if (op->code() != lir_move) {
5570     return false;
5571   }
5572   assert(op->as_Op1() != NULL, "move must be LIR_Op1");
5573 
5574   LIR_Opr in = ((LIR_Op1*)op)->in_opr();
5575   LIR_Opr res = ((LIR_Op1*)op)->result_opr();
5576   return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
5577 }
5578 
5579 // optimization (especially for phi functions of nested loops):
5580 // assign same spill slot to non-intersecting intervals
5581 void LinearScanWalker::combine_spilled_intervals(Interval* cur) {
5582   if (cur->is_split_child()) {
5583     // optimization is only suitable for split parents
5584     return;
5585   }
5586 
5587   Interval* register_hint = cur->register_hint(false);
5588   if (register_hint == NULL) {
5589     // cur is not the target of a move, otherwise register_hint would be set
5590     return;
5591   }
5592   assert(register_hint->is_split_parent(), "register hint must be split parent");
5593 
5594   if (cur->spill_state() != noOptimization || register_hint->spill_state() != noOptimization) {
5595     // combining the stack slots for intervals where spill move optimization is applied
5596     // is not benefitial and would cause problems
5597     return;
5598   }
5599 
5600   int begin_pos = cur->from();
5601   int end_pos = cur->to();
5602   if (end_pos > allocator()->max_lir_op_id() || (begin_pos & 1) != 0 || (end_pos & 1) != 0) {
5603     // safety check that lir_op_with_id is allowed
5604     return;
5605   }
5606 
5607   if (!is_move(allocator()->lir_op_with_id(begin_pos), register_hint, cur) || !is_move(allocator()->lir_op_with_id(end_pos), cur, register_hint)) {
5608     // cur and register_hint are not connected with two moves
5609     return;
5610   }
5611 
5612   Interval* begin_hint = register_hint->split_child_at_op_id(begin_pos, LIR_OpVisitState::inputMode);
5613   Interval* end_hint = register_hint->split_child_at_op_id(end_pos, LIR_OpVisitState::outputMode);
5614   if (begin_hint == end_hint || begin_hint->to() != begin_pos || end_hint->from() != end_pos) {
5615     // register_hint must be split, otherwise the re-writing of use positions does not work
5616     return;
5617   }
5618 
5619   assert(begin_hint->assigned_reg() != any_reg, "must have register assigned");
5620   assert(end_hint->assigned_reg() == any_reg, "must not have register assigned");
5621   assert(cur->first_usage(mustHaveRegister) == begin_pos, "must have use position at begin of interval because of move");
5622   assert(end_hint->first_usage(mustHaveRegister) == end_pos, "must have use position at begin of interval because of move");
5623 
5624   if (begin_hint->assigned_reg() < LinearScan::nof_regs) {
5625     // register_hint is not spilled at begin_pos, so it would not be benefitial to immediately spill cur
5626     return;
5627   }
5628   assert(register_hint->canonical_spill_slot() != -1, "must be set when part of interval was spilled");
5629 
5630   // modify intervals such that cur gets the same stack slot as register_hint
5631   // delete use positions to prevent the intervals to get a register at beginning
5632   cur->set_canonical_spill_slot(register_hint->canonical_spill_slot());
5633   cur->remove_first_use_pos();
5634   end_hint->remove_first_use_pos();
5635 }
5636 
5637 
5638 // allocate a physical register or memory location to an interval
5639 bool LinearScanWalker::activate_current() {
5640   Interval* cur = current();
5641   bool result = true;
5642 
5643   TRACE_LINEAR_SCAN(2, tty->print   ("+++++ activating interval "); cur->print());
5644   TRACE_LINEAR_SCAN(4, tty->print_cr("      split_parent: %d, insert_move_when_activated: %d", cur->split_parent()->reg_num(), cur->insert_move_when_activated()));
5645 
5646   if (cur->assigned_reg() >= LinearScan::nof_regs) {
5647     // activating an interval that has a stack slot assigned -> split it at first use position
5648     // used for method parameters
5649     TRACE_LINEAR_SCAN(4, tty->print_cr("      interval has spill slot assigned (method parameter) -> split it before first use"));
5650 
5651     split_stack_interval(cur);
5652     result = false;
5653 
5654   } else if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::must_start_in_memory)) {
5655     // activating an interval that must start in a stack slot, but may get a register later
5656     // used for lir_roundfp: rounding is done by store to stack and reload later
5657     TRACE_LINEAR_SCAN(4, tty->print_cr("      interval must start in stack slot -> split it before first use"));
5658     assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned");
5659 
5660     allocator()->assign_spill_slot(cur);
5661     split_stack_interval(cur);
5662     result = false;
5663 
5664   } else if (cur->assigned_reg() == any_reg) {
5665     // interval has not assigned register -> normal allocation
5666     // (this is the normal case for most intervals)
5667     TRACE_LINEAR_SCAN(4, tty->print_cr("      normal allocation of register"));
5668 
5669     // assign same spill slot to non-intersecting intervals
5670     combine_spilled_intervals(cur);
5671 
5672     init_vars_for_alloc(cur);
5673     if (no_allocation_possible(cur) || !alloc_free_reg(cur)) {
5674       // no empty register available.
5675       // split and spill another interval so that this interval gets a register
5676       alloc_locked_reg(cur);
5677     }
5678 
5679     // spilled intervals need not be move to active-list
5680     if (cur->assigned_reg() >= LinearScan::nof_regs) {
5681       result = false;
5682     }
5683   }
5684 
5685   // load spilled values that become active from stack slot to register
5686   if (cur->insert_move_when_activated()) {
5687     assert(cur->is_split_child(), "must be");
5688     assert(cur->current_split_child() != NULL, "must be");
5689     assert(cur->current_split_child()->reg_num() != cur->reg_num(), "cannot insert move between same interval");
5690     TRACE_LINEAR_SCAN(4, tty->print_cr("Inserting move from interval %d to %d because insert_move_when_activated is set", cur->current_split_child()->reg_num(), cur->reg_num()));
5691 
5692     insert_move(cur->from(), cur->current_split_child(), cur);
5693   }
5694   cur->make_current_split_child();
5695 
5696   return result; // true = interval is moved to active list
5697 }
5698 
5699 
5700 // Implementation of EdgeMoveOptimizer
5701 
5702 EdgeMoveOptimizer::EdgeMoveOptimizer() :
5703   _edge_instructions(4),
5704   _edge_instructions_idx(4)
5705 {
5706 }
5707 
5708 void EdgeMoveOptimizer::optimize(BlockList* code) {
5709   EdgeMoveOptimizer optimizer = EdgeMoveOptimizer();
5710 
5711   // ignore the first block in the list (index 0 is not processed)
5712   for (int i = code->length() - 1; i >= 1; i--) {
5713     BlockBegin* block = code->at(i);
5714 
5715     if (block->number_of_preds() > 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
5716       optimizer.optimize_moves_at_block_end(block);
5717     }
5718     if (block->number_of_sux() == 2) {
5719       optimizer.optimize_moves_at_block_begin(block);
5720     }
5721   }
5722 }
5723 
5724 
5725 // clear all internal data structures
5726 void EdgeMoveOptimizer::init_instructions() {
5727   _edge_instructions.clear();
5728   _edge_instructions_idx.clear();
5729 }
5730 
5731 // append a lir-instruction-list and the index of the current operation in to the list
5732 void EdgeMoveOptimizer::append_instructions(LIR_OpList* instructions, int instructions_idx) {
5733   _edge_instructions.append(instructions);
5734   _edge_instructions_idx.append(instructions_idx);
5735 }
5736 
5737 // return the current operation of the given edge (predecessor or successor)
5738 LIR_Op* EdgeMoveOptimizer::instruction_at(int edge) {
5739   LIR_OpList* instructions = _edge_instructions.at(edge);
5740   int idx = _edge_instructions_idx.at(edge);
5741 
5742   if (idx < instructions->length()) {
5743     return instructions->at(idx);
5744   } else {
5745     return NULL;
5746   }
5747 }
5748 
5749 // removes the current operation of the given edge (predecessor or successor)
5750 void EdgeMoveOptimizer::remove_cur_instruction(int edge, bool decrement_index) {
5751   LIR_OpList* instructions = _edge_instructions.at(edge);
5752   int idx = _edge_instructions_idx.at(edge);
5753   instructions->remove_at(idx);
5754 
5755   if (decrement_index) {
5756     _edge_instructions_idx.at_put(edge, idx - 1);
5757   }
5758 }
5759 
5760 
5761 bool EdgeMoveOptimizer::operations_different(LIR_Op* op1, LIR_Op* op2) {
5762   if (op1 == NULL || op2 == NULL) {
5763     // at least one block is already empty -> no optimization possible
5764     return true;
5765   }
5766 
5767   if (op1->code() == lir_move && op2->code() == lir_move) {
5768     assert(op1->as_Op1() != NULL, "move must be LIR_Op1");
5769     assert(op2->as_Op1() != NULL, "move must be LIR_Op1");
5770     LIR_Op1* move1 = (LIR_Op1*)op1;
5771     LIR_Op1* move2 = (LIR_Op1*)op2;
5772     if (move1->info() == move2->info() && move1->in_opr() == move2->in_opr() && move1->result_opr() == move2->result_opr()) {
5773       // these moves are exactly equal and can be optimized
5774       return false;
5775     }
5776 
5777   } else if (op1->code() == lir_fxch && op2->code() == lir_fxch) {
5778     assert(op1->as_Op1() != NULL, "fxch must be LIR_Op1");
5779     assert(op2->as_Op1() != NULL, "fxch must be LIR_Op1");
5780     LIR_Op1* fxch1 = (LIR_Op1*)op1;
5781     LIR_Op1* fxch2 = (LIR_Op1*)op2;
5782     if (fxch1->in_opr()->as_jint() == fxch2->in_opr()->as_jint()) {
5783       // equal FPU stack operations can be optimized
5784       return false;
5785     }
5786 
5787   } else if (op1->code() == lir_fpop_raw && op2->code() == lir_fpop_raw) {
5788     // equal FPU stack operations can be optimized
5789     return false;
5790   }
5791 
5792   // no optimization possible
5793   return true;
5794 }
5795 
5796 void EdgeMoveOptimizer::optimize_moves_at_block_end(BlockBegin* block) {
5797   TRACE_LINEAR_SCAN(4, tty->print_cr("optimizing moves at end of block B%d", block->block_id()));
5798 
5799   if (block->is_predecessor(block)) {
5800     // currently we can't handle this correctly.
5801     return;
5802   }
5803 
5804   init_instructions();
5805   int num_preds = block->number_of_preds();
5806   assert(num_preds > 1, "do not call otherwise");
5807   assert(!block->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed");
5808 
5809   // setup a list with the lir-instructions of all predecessors
5810   int i;
5811   for (i = 0; i < num_preds; i++) {
5812     BlockBegin* pred = block->pred_at(i);
5813     LIR_OpList* pred_instructions = pred->lir()->instructions_list();
5814 
5815     if (pred->number_of_sux() != 1) {
5816       // this can happen with switch-statements where multiple edges are between
5817       // the same blocks.
5818       return;
5819     }
5820 
5821     assert(pred->number_of_sux() == 1, "can handle only one successor");
5822     assert(pred->sux_at(0) == block, "invalid control flow");
5823     assert(pred_instructions->last()->code() == lir_branch, "block with successor must end with branch");
5824     assert(pred_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
5825     assert(pred_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch");
5826 
5827     if (pred_instructions->last()->info() != NULL) {
5828       // can not optimize instructions when debug info is needed
5829       return;
5830     }
5831 
5832     // ignore the unconditional branch at the end of the block
5833     append_instructions(pred_instructions, pred_instructions->length() - 2);
5834   }
5835 
5836 
5837   // process lir-instructions while all predecessors end with the same instruction
5838   while (true) {
5839     LIR_Op* op = instruction_at(0);
5840     for (i = 1; i < num_preds; i++) {
5841       if (operations_different(op, instruction_at(i))) {
5842         // these instructions are different and cannot be optimized ->
5843         // no further optimization possible
5844         return;
5845       }
5846     }
5847 
5848     TRACE_LINEAR_SCAN(4, tty->print("found instruction that is equal in all %d predecessors: ", num_preds); op->print());
5849 
5850     // insert the instruction at the beginning of the current block
5851     block->lir()->insert_before(1, op);
5852 
5853     // delete the instruction at the end of all predecessors
5854     for (i = 0; i < num_preds; i++) {
5855       remove_cur_instruction(i, true);
5856     }
5857   }
5858 }
5859 
5860 
5861 void EdgeMoveOptimizer::optimize_moves_at_block_begin(BlockBegin* block) {
5862   TRACE_LINEAR_SCAN(4, tty->print_cr("optimization moves at begin of block B%d", block->block_id()));
5863 
5864   init_instructions();
5865   int num_sux = block->number_of_sux();
5866 
5867   LIR_OpList* cur_instructions = block->lir()->instructions_list();
5868 
5869   assert(num_sux == 2, "method should not be called otherwise");
5870   assert(cur_instructions->last()->code() == lir_branch, "block with successor must end with branch");
5871   assert(cur_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
5872   assert(cur_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch");
5873 
5874   if (cur_instructions->last()->info() != NULL) {
5875     // can no optimize instructions when debug info is needed
5876     return;
5877   }
5878 
5879   LIR_Op* branch = cur_instructions->at(cur_instructions->length() - 2);
5880   if (branch->info() != NULL || (branch->code() != lir_branch && branch->code() != lir_cond_float_branch)) {
5881     // not a valid case for optimization
5882     // currently, only blocks that end with two branches (conditional branch followed
5883     // by unconditional branch) are optimized
5884     return;
5885   }
5886 
5887   // now it is guaranteed that the block ends with two branch instructions.
5888   // the instructions are inserted at the end of the block before these two branches
5889   int insert_idx = cur_instructions->length() - 2;
5890 
5891   int i;
5892 #ifdef ASSERT
5893   for (i = insert_idx - 1; i >= 0; i--) {
5894     LIR_Op* op = cur_instructions->at(i);
5895     if ((op->code() == lir_branch || op->code() == lir_cond_float_branch) && ((LIR_OpBranch*)op)->block() != NULL) {
5896       assert(false, "block with two successors can have only two branch instructions");
5897     }
5898   }
5899 #endif
5900 
5901   // setup a list with the lir-instructions of all successors
5902   for (i = 0; i < num_sux; i++) {
5903     BlockBegin* sux = block->sux_at(i);
5904     LIR_OpList* sux_instructions = sux->lir()->instructions_list();
5905 
5906     assert(sux_instructions->at(0)->code() == lir_label, "block must start with label");
5907 
5908     if (sux->number_of_preds() != 1) {
5909       // this can happen with switch-statements where multiple edges are between
5910       // the same blocks.
5911       return;
5912     }
5913     assert(sux->pred_at(0) == block, "invalid control flow");
5914     assert(!sux->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed");
5915 
5916     // ignore the label at the beginning of the block
5917     append_instructions(sux_instructions, 1);
5918   }
5919 
5920   // process lir-instructions while all successors begin with the same instruction
5921   while (true) {
5922     LIR_Op* op = instruction_at(0);
5923     for (i = 1; i < num_sux; i++) {
5924       if (operations_different(op, instruction_at(i))) {
5925         // these instructions are different and cannot be optimized ->
5926         // no further optimization possible
5927         return;
5928       }
5929     }
5930 
5931     TRACE_LINEAR_SCAN(4, tty->print("----- found instruction that is equal in all %d successors: ", num_sux); op->print());
5932 
5933     // insert instruction at end of current block
5934     block->lir()->insert_before(insert_idx, op);
5935     insert_idx++;
5936 
5937     // delete the instructions at the beginning of all successors
5938     for (i = 0; i < num_sux; i++) {
5939       remove_cur_instruction(i, false);
5940     }
5941   }
5942 }
5943 
5944 
5945 // Implementation of ControlFlowOptimizer
5946 
5947 ControlFlowOptimizer::ControlFlowOptimizer() :
5948   _original_preds(4)
5949 {
5950 }
5951 
5952 void ControlFlowOptimizer::optimize(BlockList* code) {
5953   ControlFlowOptimizer optimizer = ControlFlowOptimizer();
5954 
5955   // push the OSR entry block to the end so that we're not jumping over it.
5956   BlockBegin* osr_entry = code->at(0)->end()->as_Base()->osr_entry();
5957   if (osr_entry) {
5958     int index = osr_entry->linear_scan_number();
5959     assert(code->at(index) == osr_entry, "wrong index");
5960     code->remove_at(index);
5961     code->append(osr_entry);
5962   }
5963 
5964   optimizer.reorder_short_loops(code);
5965   optimizer.delete_empty_blocks(code);
5966   optimizer.delete_unnecessary_jumps(code);
5967   optimizer.delete_jumps_to_return(code);
5968 }
5969 
5970 void ControlFlowOptimizer::reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx) {
5971   int i = header_idx + 1;
5972   int max_end = MIN2(header_idx + ShortLoopSize, code->length());
5973   while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) {
5974     i++;
5975   }
5976 
5977   if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) {
5978     int end_idx = i - 1;
5979     BlockBegin* end_block = code->at(end_idx);
5980 
5981     if (end_block->number_of_sux() == 1 && end_block->sux_at(0) == header_block) {
5982       // short loop from header_idx to end_idx found -> reorder blocks such that
5983       // the header_block is the last block instead of the first block of the loop
5984       TRACE_LINEAR_SCAN(1, tty->print_cr("Reordering short loop: length %d, header B%d, end B%d",
5985                                          end_idx - header_idx + 1,
5986                                          header_block->block_id(), end_block->block_id()));
5987 
5988       for (int j = header_idx; j < end_idx; j++) {
5989         code->at_put(j, code->at(j + 1));
5990       }
5991       code->at_put(end_idx, header_block);
5992 
5993       // correct the flags so that any loop alignment occurs in the right place.
5994       assert(code->at(end_idx)->is_set(BlockBegin::backward_branch_target_flag), "must be backward branch target");
5995       code->at(end_idx)->clear(BlockBegin::backward_branch_target_flag);
5996       code->at(header_idx)->set(BlockBegin::backward_branch_target_flag);
5997     }
5998   }
5999 }
6000 
6001 void ControlFlowOptimizer::reorder_short_loops(BlockList* code) {
6002   for (int i = code->length() - 1; i >= 0; i--) {
6003     BlockBegin* block = code->at(i);
6004 
6005     if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
6006       reorder_short_loop(code, block, i);
6007     }
6008   }
6009 
6010   DEBUG_ONLY(verify(code));
6011 }
6012 
6013 // only blocks with exactly one successor can be deleted. Such blocks
6014 // must always end with an unconditional branch to this successor
6015 bool ControlFlowOptimizer::can_delete_block(BlockBegin* block) {
6016   if (block->number_of_sux() != 1 || block->number_of_exception_handlers() != 0 || block->is_entry_block()) {
6017     return false;
6018   }
6019 
6020   LIR_OpList* instructions = block->lir()->instructions_list();
6021 
6022   assert(instructions->length() >= 2, "block must have label and branch");
6023   assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6024   assert(instructions->last()->as_OpBranch() != NULL, "last instrcution must always be a branch");
6025   assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "branch must be unconditional");
6026   assert(instructions->last()->as_OpBranch()->block() == block->sux_at(0), "branch target must be the successor");
6027 
6028   // block must have exactly one successor
6029 
6030   if (instructions->length() == 2 && instructions->last()->info() == NULL) {
6031     return true;
6032   }
6033   return false;
6034 }
6035 
6036 // substitute branch targets in all branch-instructions of this blocks
6037 void ControlFlowOptimizer::substitute_branch_target(BlockBegin* block, BlockBegin* target_from, BlockBegin* target_to) {
6038   TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting empty block: substituting from B%d to B%d inside B%d", target_from->block_id(), target_to->block_id(), block->block_id()));
6039 
6040   LIR_OpList* instructions = block->lir()->instructions_list();
6041 
6042   assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6043   for (int i = instructions->length() - 1; i >= 1; i--) {
6044     LIR_Op* op = instructions->at(i);
6045 
6046     if (op->code() == lir_branch || op->code() == lir_cond_float_branch) {
6047       assert(op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6048       LIR_OpBranch* branch = (LIR_OpBranch*)op;
6049 
6050       if (branch->block() == target_from) {
6051         branch->change_block(target_to);
6052       }
6053       if (branch->ublock() == target_from) {
6054         branch->change_ublock(target_to);
6055       }
6056     }
6057   }
6058 }
6059 
6060 void ControlFlowOptimizer::delete_empty_blocks(BlockList* code) {
6061   int old_pos = 0;
6062   int new_pos = 0;
6063   int num_blocks = code->length();
6064 
6065   while (old_pos < num_blocks) {
6066     BlockBegin* block = code->at(old_pos);
6067 
6068     if (can_delete_block(block)) {
6069       BlockBegin* new_target = block->sux_at(0);
6070 
6071       // propagate backward branch target flag for correct code alignment
6072       if (block->is_set(BlockBegin::backward_branch_target_flag)) {
6073         new_target->set(BlockBegin::backward_branch_target_flag);
6074       }
6075 
6076       // collect a list with all predecessors that contains each predecessor only once
6077       // the predecessors of cur are changed during the substitution, so a copy of the
6078       // predecessor list is necessary
6079       int j;
6080       _original_preds.clear();
6081       for (j = block->number_of_preds() - 1; j >= 0; j--) {
6082         BlockBegin* pred = block->pred_at(j);
6083         if (_original_preds.index_of(pred) == -1) {
6084           _original_preds.append(pred);
6085         }
6086       }
6087 
6088       for (j = _original_preds.length() - 1; j >= 0; j--) {
6089         BlockBegin* pred = _original_preds.at(j);
6090         substitute_branch_target(pred, block, new_target);
6091         pred->substitute_sux(block, new_target);
6092       }
6093     } else {
6094       // adjust position of this block in the block list if blocks before
6095       // have been deleted
6096       if (new_pos != old_pos) {
6097         code->at_put(new_pos, code->at(old_pos));
6098       }
6099       new_pos++;
6100     }
6101     old_pos++;
6102   }
6103   code->truncate(new_pos);
6104 
6105   DEBUG_ONLY(verify(code));
6106 }
6107 
6108 void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
6109   // skip the last block because there a branch is always necessary
6110   for (int i = code->length() - 2; i >= 0; i--) {
6111     BlockBegin* block = code->at(i);
6112     LIR_OpList* instructions = block->lir()->instructions_list();
6113 
6114     LIR_Op* last_op = instructions->last();
6115     if (last_op->code() == lir_branch) {
6116       assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6117       LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op;
6118 
6119       assert(last_branch->block() != NULL, "last branch must always have a block as target");
6120       assert(last_branch->label() == last_branch->block()->label(), "must be equal");
6121 
6122       if (last_branch->info() == NULL) {
6123         if (last_branch->block() == code->at(i + 1)) {
6124 
6125           TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6126 
6127           // delete last branch instruction
6128           instructions->truncate(instructions->length() - 1);
6129 
6130         } else {
6131           LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6132           if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6133             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6134             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6135 
6136             if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6137 
6138               TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6139 
6140               // eliminate a conditional branch to the immediate successor
6141               prev_branch->change_block(last_branch->block());
6142               prev_branch->negate_cond();
6143               instructions->truncate(instructions->length() - 1);
6144             }
6145           }
6146         }
6147       }
6148     }
6149   }
6150 
6151   DEBUG_ONLY(verify(code));
6152 }
6153 
6154 void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
6155 #ifdef ASSERT
6156   BitMap return_converted(BlockBegin::number_of_blocks());
6157   return_converted.clear();
6158 #endif
6159 
6160   for (int i = code->length() - 1; i >= 0; i--) {
6161     BlockBegin* block = code->at(i);
6162     LIR_OpList* cur_instructions = block->lir()->instructions_list();
6163     LIR_Op*     cur_last_op = cur_instructions->last();
6164 
6165     assert(cur_instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6166     if (cur_instructions->length() == 2 && cur_last_op->code() == lir_return) {
6167       // the block contains only a label and a return
6168       // if a predecessor ends with an unconditional jump to this block, then the jump
6169       // can be replaced with a return instruction
6170       //
6171       // Note: the original block with only a return statement cannot be deleted completely
6172       //       because the predecessors might have other (conditional) jumps to this block
6173       //       -> this may lead to unnecesary return instructions in the final code
6174 
6175       assert(cur_last_op->info() == NULL, "return instructions do not have debug information");
6176       assert(block->number_of_sux() == 0 ||
6177              (return_converted.at(block->block_id()) && block->number_of_sux() == 1),
6178              "blocks that end with return must not have successors");
6179 
6180       assert(cur_last_op->as_Op1() != NULL, "return must be LIR_Op1");
6181       LIR_Opr return_opr = ((LIR_Op1*)cur_last_op)->in_opr();
6182 
6183       for (int j = block->number_of_preds() - 1; j >= 0; j--) {
6184         BlockBegin* pred = block->pred_at(j);
6185         LIR_OpList* pred_instructions = pred->lir()->instructions_list();
6186         LIR_Op*     pred_last_op = pred_instructions->last();
6187 
6188         if (pred_last_op->code() == lir_branch) {
6189           assert(pred_last_op->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
6190           LIR_OpBranch* pred_last_branch = (LIR_OpBranch*)pred_last_op;
6191 
6192           if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) {
6193             // replace the jump to a return with a direct return
6194             // Note: currently the edge between the blocks is not deleted
6195             pred_instructions->at_put(pred_instructions->length() - 1, new LIR_Op1(lir_return, return_opr));
6196 #ifdef ASSERT
6197             return_converted.set_bit(pred->block_id());
6198 #endif
6199           }
6200         }
6201       }
6202     }
6203   }
6204 }
6205 
6206 
6207 #ifdef ASSERT
6208 void ControlFlowOptimizer::verify(BlockList* code) {
6209   for (int i = 0; i < code->length(); i++) {
6210     BlockBegin* block = code->at(i);
6211     LIR_OpList* instructions = block->lir()->instructions_list();
6212 
6213     int j;
6214     for (j = 0; j < instructions->length(); j++) {
6215       LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch();
6216 
6217       if (op_branch != NULL) {
6218         assert(op_branch->block() == NULL || code->index_of(op_branch->block()) != -1, "branch target not valid");
6219         assert(op_branch->ublock() == NULL || code->index_of(op_branch->ublock()) != -1, "branch target not valid");
6220       }
6221     }
6222 
6223     for (j = 0; j < block->number_of_sux() - 1; j++) {
6224       BlockBegin* sux = block->sux_at(j);
6225       assert(code->index_of(sux) != -1, "successor not valid");
6226     }
6227 
6228     for (j = 0; j < block->number_of_preds() - 1; j++) {
6229       BlockBegin* pred = block->pred_at(j);
6230       assert(code->index_of(pred) != -1, "successor not valid");
6231     }
6232   }
6233 }
6234 #endif
6235 
6236 
6237 #ifndef PRODUCT
6238 
6239 // Implementation of LinearStatistic
6240 
6241 const char* LinearScanStatistic::counter_name(int counter_idx) {
6242   switch (counter_idx) {
6243     case counter_method:          return "compiled methods";
6244     case counter_fpu_method:      return "methods using fpu";
6245     case counter_loop_method:     return "methods with loops";
6246     case counter_exception_method:return "methods with xhandler";
6247 
6248     case counter_loop:            return "loops";
6249     case counter_block:           return "blocks";
6250     case counter_loop_block:      return "blocks inside loop";
6251     case counter_exception_block: return "exception handler entries";
6252     case counter_interval:        return "intervals";
6253     case counter_fixed_interval:  return "fixed intervals";
6254     case counter_range:           return "ranges";
6255     case counter_fixed_range:     return "fixed ranges";
6256     case counter_use_pos:         return "use positions";
6257     case counter_fixed_use_pos:   return "fixed use positions";
6258     case counter_spill_slots:     return "spill slots";
6259 
6260     // counter for classes of lir instructions
6261     case counter_instruction:     return "total instructions";
6262     case counter_label:           return "labels";
6263     case counter_entry:           return "method entries";
6264     case counter_return:          return "method returns";
6265     case counter_call:            return "method calls";
6266     case counter_move:            return "moves";
6267     case counter_cmp:             return "compare";
6268     case counter_cond_branch:     return "conditional branches";
6269     case counter_uncond_branch:   return "unconditional branches";
6270     case counter_stub_branch:     return "branches to stub";
6271     case counter_alu:             return "artithmetic + logic";
6272     case counter_alloc:           return "allocations";
6273     case counter_sync:            return "synchronisation";
6274     case counter_throw:           return "throw";
6275     case counter_unwind:          return "unwind";
6276     case counter_typecheck:       return "type+null-checks";
6277     case counter_fpu_stack:       return "fpu-stack";
6278     case counter_misc_inst:       return "other instructions";
6279     case counter_other_inst:      return "misc. instructions";
6280 
6281     // counter for different types of moves
6282     case counter_move_total:      return "total moves";
6283     case counter_move_reg_reg:    return "register->register";
6284     case counter_move_reg_stack:  return "register->stack";
6285     case counter_move_stack_reg:  return "stack->register";
6286     case counter_move_stack_stack:return "stack->stack";
6287     case counter_move_reg_mem:    return "register->memory";
6288     case counter_move_mem_reg:    return "memory->register";
6289     case counter_move_const_any:  return "constant->any";
6290 
6291     case blank_line_1:            return "";
6292     case blank_line_2:            return "";
6293 
6294     default: ShouldNotReachHere(); return "";
6295   }
6296 }
6297 
6298 LinearScanStatistic::Counter LinearScanStatistic::base_counter(int counter_idx) {
6299   if (counter_idx == counter_fpu_method || counter_idx == counter_loop_method || counter_idx == counter_exception_method) {
6300     return counter_method;
6301   } else if (counter_idx == counter_loop_block || counter_idx == counter_exception_block) {
6302     return counter_block;
6303   } else if (counter_idx >= counter_instruction && counter_idx <= counter_other_inst) {
6304     return counter_instruction;
6305   } else if (counter_idx >= counter_move_total && counter_idx <= counter_move_const_any) {
6306     return counter_move_total;
6307   }
6308   return invalid_counter;
6309 }
6310 
6311 LinearScanStatistic::LinearScanStatistic() {
6312   for (int i = 0; i < number_of_counters; i++) {
6313     _counters_sum[i] = 0;
6314     _counters_max[i] = -1;
6315   }
6316 
6317 }
6318 
6319 // add the method-local numbers to the total sum
6320 void LinearScanStatistic::sum_up(LinearScanStatistic &method_statistic) {
6321   for (int i = 0; i < number_of_counters; i++) {
6322     _counters_sum[i] += method_statistic._counters_sum[i];
6323     _counters_max[i] = MAX2(_counters_max[i], method_statistic._counters_sum[i]);
6324   }
6325 }
6326 
6327 void LinearScanStatistic::print(const char* title) {
6328   if (CountLinearScan || TraceLinearScanLevel > 0) {
6329     tty->cr();
6330     tty->print_cr("***** LinearScan statistic - %s *****", title);
6331 
6332     for (int i = 0; i < number_of_counters; i++) {
6333       if (_counters_sum[i] > 0 || _counters_max[i] >= 0) {
6334         tty->print("%25s: %8d", counter_name(i), _counters_sum[i]);
6335 
6336         if (base_counter(i) != invalid_counter) {
6337           tty->print("  (%5.1f%%) ", _counters_sum[i] * 100.0 / _counters_sum[base_counter(i)]);
6338         } else {
6339           tty->print("           ");
6340         }
6341 
6342         if (_counters_max[i] >= 0) {
6343           tty->print("%8d", _counters_max[i]);
6344         }
6345       }
6346       tty->cr();
6347     }
6348   }
6349 }
6350 
6351 void LinearScanStatistic::collect(LinearScan* allocator) {
6352   inc_counter(counter_method);
6353   if (allocator->has_fpu_registers()) {
6354     inc_counter(counter_fpu_method);
6355   }
6356   if (allocator->num_loops() > 0) {
6357     inc_counter(counter_loop_method);
6358   }
6359   inc_counter(counter_loop, allocator->num_loops());
6360   inc_counter(counter_spill_slots, allocator->max_spills());
6361 
6362   int i;
6363   for (i = 0; i < allocator->interval_count(); i++) {
6364     Interval* cur = allocator->interval_at(i);
6365 
6366     if (cur != NULL) {
6367       inc_counter(counter_interval);
6368       inc_counter(counter_use_pos, cur->num_use_positions());
6369       if (LinearScan::is_precolored_interval(cur)) {
6370         inc_counter(counter_fixed_interval);
6371         inc_counter(counter_fixed_use_pos, cur->num_use_positions());
6372       }
6373 
6374       Range* range = cur->first();
6375       while (range != Range::end()) {
6376         inc_counter(counter_range);
6377         if (LinearScan::is_precolored_interval(cur)) {
6378           inc_counter(counter_fixed_range);
6379         }
6380         range = range->next();
6381       }
6382     }
6383   }
6384 
6385   bool has_xhandlers = false;
6386   // Note: only count blocks that are in code-emit order
6387   for (i = 0; i < allocator->ir()->code()->length(); i++) {
6388     BlockBegin* cur = allocator->ir()->code()->at(i);
6389 
6390     inc_counter(counter_block);
6391     if (cur->loop_depth() > 0) {
6392       inc_counter(counter_loop_block);
6393     }
6394     if (cur->is_set(BlockBegin::exception_entry_flag)) {
6395       inc_counter(counter_exception_block);
6396       has_xhandlers = true;
6397     }
6398 
6399     LIR_OpList* instructions = cur->lir()->instructions_list();
6400     for (int j = 0; j < instructions->length(); j++) {
6401       LIR_Op* op = instructions->at(j);
6402 
6403       inc_counter(counter_instruction);
6404 
6405       switch (op->code()) {
6406         case lir_label:           inc_counter(counter_label); break;
6407         case lir_std_entry:
6408         case lir_osr_entry:       inc_counter(counter_entry); break;
6409         case lir_return:          inc_counter(counter_return); break;
6410 
6411         case lir_rtcall:
6412         case lir_static_call:
6413         case lir_optvirtual_call:
6414         case lir_virtual_call:    inc_counter(counter_call); break;
6415 
6416         case lir_move: {
6417           inc_counter(counter_move);
6418           inc_counter(counter_move_total);
6419 
6420           LIR_Opr in = op->as_Op1()->in_opr();
6421           LIR_Opr res = op->as_Op1()->result_opr();
6422           if (in->is_register()) {
6423             if (res->is_register()) {
6424               inc_counter(counter_move_reg_reg);
6425             } else if (res->is_stack()) {
6426               inc_counter(counter_move_reg_stack);
6427             } else if (res->is_address()) {
6428               inc_counter(counter_move_reg_mem);
6429             } else {
6430               ShouldNotReachHere();
6431             }
6432           } else if (in->is_stack()) {
6433             if (res->is_register()) {
6434               inc_counter(counter_move_stack_reg);
6435             } else {
6436               inc_counter(counter_move_stack_stack);
6437             }
6438           } else if (in->is_address()) {
6439             assert(res->is_register(), "must be");
6440             inc_counter(counter_move_mem_reg);
6441           } else if (in->is_constant()) {
6442             inc_counter(counter_move_const_any);
6443           } else {
6444             ShouldNotReachHere();
6445           }
6446           break;
6447         }
6448 
6449         case lir_cmp:             inc_counter(counter_cmp); break;
6450 
6451         case lir_branch:
6452         case lir_cond_float_branch: {
6453           LIR_OpBranch* branch = op->as_OpBranch();
6454           if (branch->block() == NULL) {
6455             inc_counter(counter_stub_branch);
6456           } else if (branch->cond() == lir_cond_always) {
6457             inc_counter(counter_uncond_branch);
6458           } else {
6459             inc_counter(counter_cond_branch);
6460           }
6461           break;
6462         }
6463 
6464         case lir_neg:
6465         case lir_add:
6466         case lir_sub:
6467         case lir_mul:
6468         case lir_mul_strictfp:
6469         case lir_div:
6470         case lir_div_strictfp:
6471         case lir_rem:
6472         case lir_sqrt:
6473         case lir_sin:
6474         case lir_cos:
6475         case lir_abs:
6476         case lir_log10:
6477         case lir_log:
6478         case lir_logic_and:
6479         case lir_logic_or:
6480         case lir_logic_xor:
6481         case lir_shl:
6482         case lir_shr:
6483         case lir_ushr:            inc_counter(counter_alu); break;
6484 
6485         case lir_alloc_object:
6486         case lir_alloc_array:     inc_counter(counter_alloc); break;
6487 
6488         case lir_monaddr:
6489         case lir_lock:
6490         case lir_unlock:          inc_counter(counter_sync); break;
6491 
6492         case lir_throw:           inc_counter(counter_throw); break;
6493 
6494         case lir_unwind:          inc_counter(counter_unwind); break;
6495 
6496         case lir_null_check:
6497         case lir_leal:
6498         case lir_instanceof:
6499         case lir_checkcast:
6500         case lir_store_check:     inc_counter(counter_typecheck); break;
6501 
6502         case lir_fpop_raw:
6503         case lir_fxch:
6504         case lir_fld:             inc_counter(counter_fpu_stack); break;
6505 
6506         case lir_nop:
6507         case lir_push:
6508         case lir_pop:
6509         case lir_convert:
6510         case lir_roundfp:
6511         case lir_cmove:           inc_counter(counter_misc_inst); break;
6512 
6513         default:                  inc_counter(counter_other_inst); break;
6514       }
6515     }
6516   }
6517 
6518   if (has_xhandlers) {
6519     inc_counter(counter_exception_method);
6520   }
6521 }
6522 
6523 void LinearScanStatistic::compute(LinearScan* allocator, LinearScanStatistic &global_statistic) {
6524   if (CountLinearScan || TraceLinearScanLevel > 0) {
6525 
6526     LinearScanStatistic local_statistic = LinearScanStatistic();
6527 
6528     local_statistic.collect(allocator);
6529     global_statistic.sum_up(local_statistic);
6530 
6531     if (TraceLinearScanLevel > 2) {
6532       local_statistic.print("current local statistic");
6533     }
6534   }
6535 }
6536 
6537 
6538 // Implementation of LinearTimers
6539 
6540 LinearScanTimers::LinearScanTimers() {
6541   for (int i = 0; i < number_of_timers; i++) {
6542     timer(i)->reset();
6543   }
6544 }
6545 
6546 const char* LinearScanTimers::timer_name(int idx) {
6547   switch (idx) {
6548     case timer_do_nothing:               return "Nothing (Time Check)";
6549     case timer_number_instructions:      return "Number Instructions";
6550     case timer_compute_local_live_sets:  return "Local Live Sets";
6551     case timer_compute_global_live_sets: return "Global Live Sets";
6552     case timer_build_intervals:          return "Build Intervals";
6553     case timer_sort_intervals_before:    return "Sort Intervals Before";
6554     case timer_allocate_registers:       return "Allocate Registers";
6555     case timer_resolve_data_flow:        return "Resolve Data Flow";
6556     case timer_sort_intervals_after:     return "Sort Intervals After";
6557     case timer_eliminate_spill_moves:    return "Spill optimization";
6558     case timer_assign_reg_num:           return "Assign Reg Num";
6559     case timer_allocate_fpu_stack:       return "Allocate FPU Stack";
6560     case timer_optimize_lir:             return "Optimize LIR";
6561     default: ShouldNotReachHere();       return "";
6562   }
6563 }
6564 
6565 void LinearScanTimers::begin_method() {
6566   if (TimeEachLinearScan) {
6567     // reset all timers to measure only current method
6568     for (int i = 0; i < number_of_timers; i++) {
6569       timer(i)->reset();
6570     }
6571   }
6572 }
6573 
6574 void LinearScanTimers::end_method(LinearScan* allocator) {
6575   if (TimeEachLinearScan) {
6576 
6577     double c = timer(timer_do_nothing)->seconds();
6578     double total = 0;
6579     for (int i = 1; i < number_of_timers; i++) {
6580       total += timer(i)->seconds() - c;
6581     }
6582 
6583     if (total >= 0.0005) {
6584       // print all information in one line for automatic processing
6585       tty->print("@"); allocator->compilation()->method()->print_name();
6586 
6587       tty->print("@ %d ", allocator->compilation()->method()->code_size());
6588       tty->print("@ %d ", allocator->block_at(allocator->block_count() - 1)->last_lir_instruction_id() / 2);
6589       tty->print("@ %d ", allocator->block_count());
6590       tty->print("@ %d ", allocator->num_virtual_regs());
6591       tty->print("@ %d ", allocator->interval_count());
6592       tty->print("@ %d ", allocator->_num_calls);
6593       tty->print("@ %d ", allocator->num_loops());
6594 
6595       tty->print("@ %6.6f ", total);
6596       for (int i = 1; i < number_of_timers; i++) {
6597         tty->print("@ %4.1f ", ((timer(i)->seconds() - c) / total) * 100);
6598       }
6599       tty->cr();
6600     }
6601   }
6602 }
6603 
6604 void LinearScanTimers::print(double total_time) {
6605   if (TimeLinearScan) {
6606     // correction value: sum of dummy-timer that only measures the time that
6607     // is necesary to start and stop itself
6608     double c = timer(timer_do_nothing)->seconds();
6609 
6610     for (int i = 0; i < number_of_timers; i++) {
6611       double t = timer(i)->seconds();
6612       tty->print_cr("    %25s: %6.3f s (%4.1f%%)  corrected: %6.3f s (%4.1f%%)", timer_name(i), t, (t / total_time) * 100.0, t - c, (t - c) / (total_time - 2 * number_of_timers * c) * 100);
6613     }
6614   }
6615 }
6616 
6617 #endif // #ifndef PRODUCT