50 if (LP64_ONLY(false &&) type == T_LONG) {
51 return 2;
52 }
53 return 1;
54 }
55
56
57 inline bool LinearScan::requires_adjacent_regs(BasicType type) {
58 return false;
59 }
60
61 inline bool LinearScan::is_caller_save(int assigned_reg) {
62 assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
63 return true; // no callee-saved registers on Intel
64
65 }
66
67
68 inline void LinearScan::pd_add_temps(LIR_Op* op) {
69 switch (op->code()) {
70 case lir_tan:
71 case lir_sin:
72 case lir_cos: {
73 // The slow path for these functions may need to save and
74 // restore all live registers but we don't want to save and
75 // restore everything all the time, so mark the xmms as being
76 // killed. If the slow path were explicit or we could propagate
77 // live register masks down to the assembly we could do better
78 // but we don't have any easy way to do that right now. We
79 // could also consider not killing all xmm registers if we
80 // assume that slow paths are uncommon but it's not clear that
81 // would be a good idea.
82 if (UseSSE > 0) {
83 #ifndef PRODUCT
84 if (TraceLinearScanLevel >= 2) {
85 tty->print_cr("killing XMMs for trig");
86 }
87 #endif
88 int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms();
89 int op_id = op->id();
90 for (int xmm = 0; xmm < num_caller_save_xmm_regs; xmm++) {
91 LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);
92 add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);
101 // Implementation of LinearScanWalker
102
103 inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
104 int last_xmm_reg = pd_last_xmm_reg;
105 if (UseAVX < 3) {
106 last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
107 }
108 if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
109 assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
110 _first_reg = pd_first_byte_reg;
111 _last_reg = FrameMap::last_byte_reg();
112 return true;
113 } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
114 _first_reg = pd_first_xmm_reg;
115 _last_reg = last_xmm_reg;
116 return true;
117 }
118
119 return false;
120 }
121
122
123 class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
124 private:
125 Compilation* _compilation;
126 LinearScan* _allocator;
127
128 LIR_OpVisitState visitor;
129
130 LIR_List* _lir;
131 int _pos;
132 FpuStackSim _sim;
133 FpuStackSim _temp_sim;
134
135 bool _debug_information_computed;
136
137 LinearScan* allocator() { return _allocator; }
138 Compilation* compilation() const { return _compilation; }
139
140 // unified bailout support
141 void bailout(const char* msg) const { compilation()->bailout(msg); }
|
50 if (LP64_ONLY(false &&) type == T_LONG) {
51 return 2;
52 }
53 return 1;
54 }
55
56
57 inline bool LinearScan::requires_adjacent_regs(BasicType type) {
58 return false;
59 }
60
61 inline bool LinearScan::is_caller_save(int assigned_reg) {
62 assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
63 return true; // no callee-saved registers on Intel
64
65 }
66
67
68 inline void LinearScan::pd_add_temps(LIR_Op* op) {
69 switch (op->code()) {
70 case lir_tan:{
71 // The slow path for these functions may need to save and
72 // restore all live registers but we don't want to save and
73 // restore everything all the time, so mark the xmms as being
74 // killed. If the slow path were explicit or we could propagate
75 // live register masks down to the assembly we could do better
76 // but we don't have any easy way to do that right now. We
77 // could also consider not killing all xmm registers if we
78 // assume that slow paths are uncommon but it's not clear that
79 // would be a good idea.
80 if (UseSSE > 0) {
81 #ifndef PRODUCT
82 if (TraceLinearScanLevel >= 2) {
83 tty->print_cr("killing XMMs for trig");
84 }
85 #endif
86 int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms();
87 int op_id = op->id();
88 for (int xmm = 0; xmm < num_caller_save_xmm_regs; xmm++) {
89 LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);
90 add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);
99 // Implementation of LinearScanWalker
100
101 inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
102 int last_xmm_reg = pd_last_xmm_reg;
103 if (UseAVX < 3) {
104 last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
105 }
106 if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
107 assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
108 _first_reg = pd_first_byte_reg;
109 _last_reg = FrameMap::last_byte_reg();
110 return true;
111 } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
112 _first_reg = pd_first_xmm_reg;
113 _last_reg = last_xmm_reg;
114 return true;
115 }
116
117 return false;
118 }
119
120 class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
121 private:
122 Compilation* _compilation;
123 LinearScan* _allocator;
124
125 LIR_OpVisitState visitor;
126
127 LIR_List* _lir;
128 int _pos;
129 FpuStackSim _sim;
130 FpuStackSim _temp_sim;
131
132 bool _debug_information_computed;
133
134 LinearScan* allocator() { return _allocator; }
135 Compilation* compilation() const { return _compilation; }
136
137 // unified bailout support
138 void bailout(const char* msg) const { compilation()->bailout(msg); }
|