1 /* 2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP 26 #define CPU_X86_VM_C1_LINEARSCAN_X86_HPP 27 28 inline bool LinearScan::is_processed_reg_num(int reg_num) { 29 #ifndef _LP64 30 // rsp and rbp (numbers 6 ancd 7) are ignored 31 assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); 32 assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); 33 assert(reg_num >= 0, "invalid reg_num"); 34 #else 35 // rsp and rbp, r10, r15 (numbers [12,15]) are ignored 36 // r12 (number 11) is conditional on compressed oops. 37 assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below"); 38 assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below"); 39 assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below"); 40 assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below"); 41 assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below"); 42 assert(reg_num >= 0, "invalid reg_num"); 43 #endif // _LP64 44 return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map; 45 } 46 47 inline int LinearScan::num_physical_regs(BasicType type) { 48 // Intel requires two cpu registers for long, 49 // but requires only one fpu register for double 50 if (LP64_ONLY(false &&) type == T_LONG) { 51 return 2; 52 } 53 return 1; 54 } 55 56 57 inline bool LinearScan::requires_adjacent_regs(BasicType type) { 58 return false; 59 } 60 61 inline bool LinearScan::is_caller_save(int assigned_reg) { 62 assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers"); 63 return true; // no callee-saved registers on Intel 64 65 } 66 67 68 inline void LinearScan::pd_add_temps(LIR_Op* op) { 69 switch (op->code()) { 70 case lir_tan: 71 case lir_sin: 72 case lir_cos: { 73 // The slow path for these functions may need to save and 74 // restore all live registers but we don't want to save and 75 // restore everything all the time, so mark the xmms as being 76 // killed. If the slow path were explicit or we could propagate 77 // live register masks down to the assembly we could do better 78 // but we don't have any easy way to do that right now. We 79 // could also consider not killing all xmm registers if we 80 // assume that slow paths are uncommon but it's not clear that 81 // would be a good idea. 82 if (UseSSE > 0) { 83 #ifndef PRODUCT 84 if (TraceLinearScanLevel >= 2) { 85 tty->print_cr("killing XMMs for trig"); 86 } 87 #endif 88 int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms(); 89 int op_id = op->id(); 90 for (int xmm = 0; xmm < num_caller_save_xmm_regs; xmm++) { 91 LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm); 92 add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL); 93 } 94 } 95 break; 96 } 97 } 98 } 99 100 101 // Implementation of LinearScanWalker 102 103 inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) { 104 int last_xmm_reg = pd_last_xmm_reg; 105 if (UseAVX < 3) { 106 last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1; 107 } 108 if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) { 109 assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only"); 110 _first_reg = pd_first_byte_reg; 111 _last_reg = FrameMap::last_byte_reg(); 112 return true; 113 } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) { 114 _first_reg = pd_first_xmm_reg; 115 _last_reg = last_xmm_reg; 116 return true; 117 } 118 119 return false; 120 } 121 122 123 class FpuStackAllocator VALUE_OBJ_CLASS_SPEC { 124 private: 125 Compilation* _compilation; 126 LinearScan* _allocator; 127 128 LIR_OpVisitState visitor; 129 130 LIR_List* _lir; 131 int _pos; 132 FpuStackSim _sim; 133 FpuStackSim _temp_sim; 134 135 bool _debug_information_computed; 136 137 LinearScan* allocator() { return _allocator; } 138 Compilation* compilation() const { return _compilation; } 139 140 // unified bailout support 141 void bailout(const char* msg) const { compilation()->bailout(msg); } 142 bool bailed_out() const { return compilation()->bailed_out(); } 143 144 int pos() { return _pos; } 145 void set_pos(int pos) { _pos = pos; } 146 LIR_Op* cur_op() { return lir()->instructions_list()->at(pos()); } 147 LIR_List* lir() { return _lir; } 148 void set_lir(LIR_List* lir) { _lir = lir; } 149 FpuStackSim* sim() { return &_sim; } 150 FpuStackSim* temp_sim() { return &_temp_sim; } 151 152 int fpu_num(LIR_Opr opr); 153 int tos_offset(LIR_Opr opr); 154 LIR_Opr to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset = false); 155 156 // Helper functions for handling operations 157 void insert_op(LIR_Op* op); 158 void insert_exchange(int offset); 159 void insert_exchange(LIR_Opr opr); 160 void insert_free(int offset); 161 void insert_free_if_dead(LIR_Opr opr); 162 void insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore); 163 void insert_copy(LIR_Opr from, LIR_Opr to); 164 void do_rename(LIR_Opr from, LIR_Opr to); 165 void do_push(LIR_Opr opr); 166 void pop_if_last_use(LIR_Op* op, LIR_Opr opr); 167 void pop_always(LIR_Op* op, LIR_Opr opr); 168 void clear_fpu_stack(LIR_Opr preserve); 169 void handle_op1(LIR_Op1* op1); 170 void handle_op2(LIR_Op2* op2); 171 void handle_opCall(LIR_OpCall* opCall); 172 void compute_debug_information(LIR_Op* op); 173 void allocate_exception_handler(XHandler* xhandler); 174 void allocate_block(BlockBegin* block); 175 176 #ifndef PRODUCT 177 void check_invalid_lir_op(LIR_Op* op); 178 #endif 179 180 // Helper functions for merging of fpu stacks 181 void merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg); 182 void merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot); 183 void merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim); 184 bool merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot); 185 void merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim); 186 void merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs); 187 bool merge_fpu_stack_with_successors(BlockBegin* block); 188 189 public: 190 LIR_Opr to_fpu_stack(LIR_Opr opr); // used by LinearScan for creation of debug information 191 192 FpuStackAllocator(Compilation* compilation, LinearScan* allocator); 193 void allocate(); 194 }; 195 196 #endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP