< prev index next >

src/cpu/x86/vm/c1_LinearScan_x86.hpp

Print this page




  68 inline void LinearScan::pd_add_temps(LIR_Op* op) {
  69   switch (op->code()) {
  70     case lir_tan:
  71     case lir_sin:
  72     case lir_cos: {
  73       // The slow path for these functions may need to save and
  74       // restore all live registers but we don't want to save and
  75       // restore everything all the time, so mark the xmms as being
  76       // killed.  If the slow path were explicit or we could propagate
  77       // live register masks down to the assembly we could do better
  78       // but we don't have any easy way to do that right now.  We
  79       // could also consider not killing all xmm registers if we
  80       // assume that slow paths are uncommon but it's not clear that
  81       // would be a good idea.
  82       if (UseSSE > 0) {
  83 #ifndef PRODUCT
  84         if (TraceLinearScanLevel >= 2) {
  85           tty->print_cr("killing XMMs for trig");
  86         }
  87 #endif






  88         int op_id = op->id();
  89         for (int xmm = 0; xmm < FrameMap::nof_caller_save_xmm_regs; xmm++) {
  90           LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);
  91           add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);
  92         }
  93       }
  94       break;
  95     }
  96   }
  97 }
  98 
  99 
 100 // Implementation of LinearScanWalker
 101 
 102 inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {




 103   if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
 104     assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
 105     _first_reg = pd_first_byte_reg;
 106     _last_reg = FrameMap::last_byte_reg();
 107     return true;
 108   } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
 109     _first_reg = pd_first_xmm_reg;
 110     _last_reg = pd_last_xmm_reg;
 111     return true;
 112   }
 113 
 114   return false;
 115 }
 116 
 117 
 118 class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
 119  private:
 120   Compilation* _compilation;
 121   LinearScan* _allocator;
 122 
 123   LIR_OpVisitState visitor;
 124 
 125   LIR_List* _lir;
 126   int _pos;
 127   FpuStackSim _sim;
 128   FpuStackSim _temp_sim;
 129 
 130   bool _debug_information_computed;




  68 inline void LinearScan::pd_add_temps(LIR_Op* op) {
  69   switch (op->code()) {
  70     case lir_tan:
  71     case lir_sin:
  72     case lir_cos: {
  73       // The slow path for these functions may need to save and
  74       // restore all live registers but we don't want to save and
  75       // restore everything all the time, so mark the xmms as being
  76       // killed.  If the slow path were explicit or we could propagate
  77       // live register masks down to the assembly we could do better
  78       // but we don't have any easy way to do that right now.  We
  79       // could also consider not killing all xmm registers if we
  80       // assume that slow paths are uncommon but it's not clear that
  81       // would be a good idea.
  82       if (UseSSE > 0) {
  83 #ifndef PRODUCT
  84         if (TraceLinearScanLevel >= 2) {
  85           tty->print_cr("killing XMMs for trig");
  86         }
  87 #endif
  88         int num_caller_save_xmm_regs = FrameMap::nof_caller_save_xmm_regs;
  89 #if _LP64
  90         if (UseAVX < 3) {
  91           num_caller_save_xmm_regs = num_caller_save_xmm_regs / 2;
  92         }
  93 #endif
  94         int op_id = op->id();
  95         for (int xmm = 0; xmm < num_caller_save_xmm_regs; xmm++) {
  96           LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);
  97           add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);
  98         }
  99       }
 100       break;
 101     }
 102   }
 103 }
 104 
 105 
 106 // Implementation of LinearScanWalker
 107 
 108 inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
 109   int last_xmm_reg = pd_last_xmm_reg;
 110   if (UseAVX < 3) {
 111     last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
 112   }
 113   if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
 114     assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
 115     _first_reg = pd_first_byte_reg;
 116     _last_reg = FrameMap::last_byte_reg();
 117     return true;
 118   } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
 119     _first_reg = pd_first_xmm_reg;
 120     _last_reg = last_xmm_reg;
 121     return true;
 122   }
 123 
 124   return false;
 125 }
 126 
 127 
 128 class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
 129  private:
 130   Compilation* _compilation;
 131   LinearScan* _allocator;
 132 
 133   LIR_OpVisitState visitor;
 134 
 135   LIR_List* _lir;
 136   int _pos;
 137   FpuStackSim _sim;
 138   FpuStackSim _temp_sim;
 139 
 140   bool _debug_information_computed;


< prev index next >