< prev index next >

src/share/vm/c1/c1_LinearScan.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_IR.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_LinearScan.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 #ifdef TARGET_ARCH_x86
  36 # include "vmreg_x86.inline.hpp"
  37 #endif



  38 #ifdef TARGET_ARCH_sparc
  39 # include "vmreg_sparc.inline.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_zero
  42 # include "vmreg_zero.inline.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_arm
  45 # include "vmreg_arm.inline.hpp"
  46 #endif
  47 #ifdef TARGET_ARCH_ppc
  48 # include "vmreg_ppc.inline.hpp"
  49 #endif
  50 
  51 
  52 #ifndef PRODUCT
  53 
  54   static LinearScanStatistic _stat_before_alloc;
  55   static LinearScanStatistic _stat_after_asign;
  56   static LinearScanStatistic _stat_final;
  57 


1076     if (result_in_memory) {
1077       // Move to an interval with must_start_in_memory set.
1078       // To avoid moves from stack to stack (not allowed) force the input operand to a register
1079       return mustHaveRegister;
1080 
1081     } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
1082       // Move from register to register
1083       if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
1084         // special handling of phi-function moves inside osr-entry blocks
1085         // input operand must have a register instead of output operand (leads to better register allocation)
1086         return mustHaveRegister;
1087       }
1088 
1089       // The input operand is not forced to a register (moves from stack to register are allowed),
1090       // but it is faster if the input operand is in a register
1091       return shouldHaveRegister;
1092     }
1093   }
1094 
1095 
1096 #ifdef X86
1097   if (op->code() == lir_cmove) {
1098     // conditional moves can handle stack operands
1099     assert(op->result_opr()->is_register(), "result must always be in a register");
1100     return shouldHaveRegister;
1101   }
1102 
1103   // optimizations for second input operand of arithmehtic operations on Intel
1104   // this operand is allowed to be on the stack in some cases
1105   BasicType opr_type = opr->type_register();
1106   if (opr_type == T_FLOAT || opr_type == T_DOUBLE) {
1107     if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2) {
1108       // SSE float instruction (T_DOUBLE only supported with SSE2)
1109       switch (op->code()) {
1110         case lir_cmp:
1111         case lir_add:
1112         case lir_sub:
1113         case lir_mul:
1114         case lir_div:
1115         {
1116           assert(op->as_Op2() != NULL, "must be LIR_Op2");


2178       // check if spill moves could have been appended at the end of this block, but
2179       // before the branch instruction. So the split child information for this branch would
2180       // be incorrect.
2181       LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch();
2182       if (branch != NULL) {
2183         if (block->live_out().at(opr->vreg_number())) {
2184           assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
2185           assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)");
2186         }
2187       }
2188     }
2189 #endif
2190 
2191     // operands are not changed when an interval is split during allocation,
2192     // so search the right interval here
2193     interval = split_child_at_op_id(interval, op_id, mode);
2194   }
2195 
2196   LIR_Opr res = operand_for_interval(interval);
2197 
2198 #ifdef X86
2199   // new semantic for is_last_use: not only set on definite end of interval,
2200   // but also before hole
2201   // This may still miss some cases (e.g. for dead values), but it is not necessary that the
2202   // last use information is completely correct
2203   // information is only needed for fpu stack allocation
2204   if (res->is_fpu_register()) {
2205     if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) {
2206       assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow");
2207       res = res->make_last_use();
2208     }
2209   }
2210 #endif
2211 
2212   assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation");
2213 
2214   return res;
2215 }
2216 
2217 
2218 #ifdef ASSERT


4521 
4522 #ifndef PRODUCT
4523 void Interval::print(outputStream* out) const {
4524   const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
4525   const char* UseKind2Name[] = { "N", "L", "S", "M" };
4526 
4527   const char* type_name;
4528   LIR_Opr opr = LIR_OprFact::illegal();
4529   if (reg_num() < LIR_OprDesc::vreg_base) {
4530     type_name = "fixed";
4531     // need a temporary operand for fixed intervals because type() cannot be called
4532     if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
4533       opr = LIR_OprFact::single_cpu(assigned_reg());
4534     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
4535       opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
4536 #ifdef X86
4537     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
4538       opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
4539 #endif
4540     } else {

4541       ShouldNotReachHere();

4542     }
4543   } else {
4544     type_name = type2name(type());
4545     if (assigned_reg() != -1 &&
4546         (LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
4547       opr = LinearScan::calc_operand_for_interval(this);
4548     }
4549   }
4550 
4551   out->print("%d %s ", reg_num(), type_name);
4552   if (opr->is_valid()) {
4553     out->print("\"");
4554     opr->print(out);
4555     out->print("\" ");
4556   }
4557   out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
4558 
4559   // print ranges
4560   Range* cur = _first;
4561   while (cur != Range::end()) {


5595 
5596     split_and_spill_interval(cur);
5597   } else {
5598     TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi));
5599     assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found");
5600     assert(split_pos > 0, "invalid split_pos");
5601     assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
5602 
5603     cur->assign_reg(reg, regHi);
5604     if (need_split) {
5605       // register not available for full interval, so split it
5606       split_when_partial_register_available(cur, split_pos);
5607     }
5608 
5609     // perform splitting and spilling for all affected intervalls
5610     split_and_spill_intersecting_intervals(reg, regHi);
5611   }
5612 }
5613 
5614 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
5615 #ifdef X86
5616   // fast calculation of intervals that can never get a register because the
5617   // the next instruction is a call that blocks all registers
5618   // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
5619 
5620   // check if this interval is the result of a split operation
5621   // (an interval got a register until this position)
5622   int pos = cur->from();
5623   if ((pos & 1) == 1) {
5624     // the current instruction is a call that blocks all registers
5625     if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) {
5626       TRACE_LINEAR_SCAN(4, tty->print_cr("      free register cannot be available because all registers blocked by following call"));
5627 
5628       // safety check that there is really no register available
5629       assert(alloc_free_reg(cur) == false, "found a register for this interval");
5630       return true;
5631     }
5632 
5633   }
5634 #endif
5635   return false;




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_IR.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_LinearScan.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 #ifdef TARGET_ARCH_x86
  36 # include "vmreg_x86.inline.hpp"
  37 #endif
  38 #ifdef TARGET_ARCH_aarch64
  39 # include "vmreg_aarch64.inline.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_sparc
  42 # include "vmreg_sparc.inline.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_zero
  45 # include "vmreg_zero.inline.hpp"
  46 #endif
  47 #ifdef TARGET_ARCH_arm
  48 # include "vmreg_arm.inline.hpp"
  49 #endif
  50 #ifdef TARGET_ARCH_ppc
  51 # include "vmreg_ppc.inline.hpp"
  52 #endif
  53 
  54 
  55 #ifndef PRODUCT
  56 
  57   static LinearScanStatistic _stat_before_alloc;
  58   static LinearScanStatistic _stat_after_asign;
  59   static LinearScanStatistic _stat_final;
  60 


1079     if (result_in_memory) {
1080       // Move to an interval with must_start_in_memory set.
1081       // To avoid moves from stack to stack (not allowed) force the input operand to a register
1082       return mustHaveRegister;
1083 
1084     } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
1085       // Move from register to register
1086       if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
1087         // special handling of phi-function moves inside osr-entry blocks
1088         // input operand must have a register instead of output operand (leads to better register allocation)
1089         return mustHaveRegister;
1090       }
1091 
1092       // The input operand is not forced to a register (moves from stack to register are allowed),
1093       // but it is faster if the input operand is in a register
1094       return shouldHaveRegister;
1095     }
1096   }
1097 
1098 
1099 #if defined(X86)
1100   if (op->code() == lir_cmove) {
1101     // conditional moves can handle stack operands
1102     assert(op->result_opr()->is_register(), "result must always be in a register");
1103     return shouldHaveRegister;
1104   }
1105 
1106   // optimizations for second input operand of arithmehtic operations on Intel
1107   // this operand is allowed to be on the stack in some cases
1108   BasicType opr_type = opr->type_register();
1109   if (opr_type == T_FLOAT || opr_type == T_DOUBLE) {
1110     if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2) {
1111       // SSE float instruction (T_DOUBLE only supported with SSE2)
1112       switch (op->code()) {
1113         case lir_cmp:
1114         case lir_add:
1115         case lir_sub:
1116         case lir_mul:
1117         case lir_div:
1118         {
1119           assert(op->as_Op2() != NULL, "must be LIR_Op2");


2181       // check if spill moves could have been appended at the end of this block, but
2182       // before the branch instruction. So the split child information for this branch would
2183       // be incorrect.
2184       LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch();
2185       if (branch != NULL) {
2186         if (block->live_out().at(opr->vreg_number())) {
2187           assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
2188           assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)");
2189         }
2190       }
2191     }
2192 #endif
2193 
2194     // operands are not changed when an interval is split during allocation,
2195     // so search the right interval here
2196     interval = split_child_at_op_id(interval, op_id, mode);
2197   }
2198 
2199   LIR_Opr res = operand_for_interval(interval);
2200 
2201 #if defined(X86) || defined(AARCH64)
2202   // new semantic for is_last_use: not only set on definite end of interval,
2203   // but also before hole
2204   // This may still miss some cases (e.g. for dead values), but it is not necessary that the
2205   // last use information is completely correct
2206   // information is only needed for fpu stack allocation
2207   if (res->is_fpu_register()) {
2208     if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) {
2209       assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow");
2210       res = res->make_last_use();
2211     }
2212   }
2213 #endif
2214 
2215   assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation");
2216 
2217   return res;
2218 }
2219 
2220 
2221 #ifdef ASSERT


4524 
4525 #ifndef PRODUCT
4526 void Interval::print(outputStream* out) const {
4527   const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
4528   const char* UseKind2Name[] = { "N", "L", "S", "M" };
4529 
4530   const char* type_name;
4531   LIR_Opr opr = LIR_OprFact::illegal();
4532   if (reg_num() < LIR_OprDesc::vreg_base) {
4533     type_name = "fixed";
4534     // need a temporary operand for fixed intervals because type() cannot be called
4535     if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
4536       opr = LIR_OprFact::single_cpu(assigned_reg());
4537     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
4538       opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
4539 #ifdef X86
4540     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
4541       opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
4542 #endif
4543     } else {
4544 #if !defined(AARCH64)
4545       ShouldNotReachHere();
4546 #endif
4547     }
4548   } else {
4549     type_name = type2name(type());
4550     if (assigned_reg() != -1 &&
4551         (LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
4552       opr = LinearScan::calc_operand_for_interval(this);
4553     }
4554   }
4555 
4556   out->print("%d %s ", reg_num(), type_name);
4557   if (opr->is_valid()) {
4558     out->print("\"");
4559     opr->print(out);
4560     out->print("\" ");
4561   }
4562   out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
4563 
4564   // print ranges
4565   Range* cur = _first;
4566   while (cur != Range::end()) {


5600 
5601     split_and_spill_interval(cur);
5602   } else {
5603     TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi));
5604     assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found");
5605     assert(split_pos > 0, "invalid split_pos");
5606     assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
5607 
5608     cur->assign_reg(reg, regHi);
5609     if (need_split) {
5610       // register not available for full interval, so split it
5611       split_when_partial_register_available(cur, split_pos);
5612     }
5613 
5614     // perform splitting and spilling for all affected intervalls
5615     split_and_spill_intersecting_intervals(reg, regHi);
5616   }
5617 }
5618 
5619 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
5620 #if defined(X86)
5621   // fast calculation of intervals that can never get a register because the
5622   // the next instruction is a call that blocks all registers
5623   // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
5624 
5625   // check if this interval is the result of a split operation
5626   // (an interval got a register until this position)
5627   int pos = cur->from();
5628   if ((pos & 1) == 1) {
5629     // the current instruction is a call that blocks all registers
5630     if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) {
5631       TRACE_LINEAR_SCAN(4, tty->print_cr("      free register cannot be available because all registers blocked by following call"));
5632 
5633       // safety check that there is really no register available
5634       assert(alloc_free_reg(cur) == false, "found a register for this interval");
5635       return true;
5636     }
5637 
5638   }
5639 #endif
5640   return false;


< prev index next >