1 /* 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_FrameMap.hpp" 27 #include "c1/c1_LIR.hpp" 28 #include "runtime/sharedRuntime.hpp" 29 #include "vmreg_x86.inline.hpp" 30 31 const int FrameMap::pd_c_runtime_reserved_arg_size = 0; 32 33 LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { 34 LIR_Opr opr = LIR_OprFact::illegalOpr; 35 VMReg r_1 = reg->first(); 36 VMReg r_2 = reg->second(); 37 if (r_1->is_stack()) { 38 // Convert stack slot to an SP offset 39 // The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value 40 // so we must add it in here. 41 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 42 opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type)); 43 } else if (r_1->is_Register()) { 44 Register reg = r_1->as_Register(); 45 if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) { 46 Register reg2 = r_2->as_Register(); 47 #ifdef _LP64 48 assert(reg2 == reg, "must be same register"); 49 opr = as_long_opr(reg); 50 #else 51 opr = as_long_opr(reg2, reg); 52 #endif // _LP64 53 } else if (type == T_OBJECT || type == T_ARRAY) { 54 opr = as_oop_opr(reg); 55 } else if (type == T_METADATA) { 56 opr = as_metadata_opr(reg); 57 } else { 58 opr = as_opr(reg); 59 } 60 } else if (r_1->is_FloatRegister()) { 61 assert(type == T_DOUBLE || type == T_FLOAT, "wrong type"); 62 int num = r_1->as_FloatRegister()->encoding(); 63 if (type == T_FLOAT) { 64 opr = LIR_OprFact::single_fpu(num); 65 } else { 66 opr = LIR_OprFact::double_fpu(num); 67 } 68 } else if (r_1->is_XMMRegister()) { 69 assert(type == T_DOUBLE || type == T_FLOAT, "wrong type"); 70 int num = r_1->as_XMMRegister()->encoding(); 71 if (type == T_FLOAT) { 72 opr = LIR_OprFact::single_xmm(num); 73 } else { 74 opr = LIR_OprFact::double_xmm(num); 75 } 76 } else { 77 ShouldNotReachHere(); 78 } 79 return opr; 80 } 81 82 83 LIR_Opr FrameMap::rsi_opr; 84 LIR_Opr FrameMap::rdi_opr; 85 LIR_Opr FrameMap::rbx_opr; 86 LIR_Opr FrameMap::rax_opr; 87 LIR_Opr FrameMap::rdx_opr; 88 LIR_Opr FrameMap::rcx_opr; 89 LIR_Opr FrameMap::rsp_opr; 90 LIR_Opr FrameMap::rbp_opr; 91 92 LIR_Opr FrameMap::receiver_opr; 93 94 LIR_Opr FrameMap::rsi_oop_opr; 95 LIR_Opr FrameMap::rdi_oop_opr; 96 LIR_Opr FrameMap::rbx_oop_opr; 97 LIR_Opr FrameMap::rax_oop_opr; 98 LIR_Opr FrameMap::rdx_oop_opr; 99 LIR_Opr FrameMap::rcx_oop_opr; 100 101 LIR_Opr FrameMap::rsi_metadata_opr; 102 LIR_Opr FrameMap::rdi_metadata_opr; 103 LIR_Opr FrameMap::rbx_metadata_opr; 104 LIR_Opr FrameMap::rax_metadata_opr; 105 LIR_Opr FrameMap::rdx_metadata_opr; 106 LIR_Opr FrameMap::rcx_metadata_opr; 107 108 LIR_Opr FrameMap::long0_opr; 109 LIR_Opr FrameMap::long1_opr; 110 LIR_Opr FrameMap::fpu0_float_opr; 111 LIR_Opr FrameMap::fpu0_double_opr; 112 LIR_Opr FrameMap::xmm0_float_opr; 113 LIR_Opr FrameMap::xmm0_double_opr; 114 115 #ifdef _LP64 116 117 LIR_Opr FrameMap::r8_opr; 118 LIR_Opr FrameMap::r9_opr; 119 LIR_Opr FrameMap::r10_opr; 120 LIR_Opr FrameMap::r11_opr; 121 LIR_Opr FrameMap::r12_opr; 122 LIR_Opr FrameMap::r13_opr; 123 LIR_Opr FrameMap::r14_opr; 124 LIR_Opr FrameMap::r15_opr; 125 126 // r10 and r15 can never contain oops since they aren't available to 127 // the allocator 128 LIR_Opr FrameMap::r8_oop_opr; 129 LIR_Opr FrameMap::r9_oop_opr; 130 LIR_Opr FrameMap::r11_oop_opr; 131 LIR_Opr FrameMap::r12_oop_opr; 132 LIR_Opr FrameMap::r13_oop_opr; 133 LIR_Opr FrameMap::r14_oop_opr; 134 135 LIR_Opr FrameMap::r8_metadata_opr; 136 LIR_Opr FrameMap::r9_metadata_opr; 137 LIR_Opr FrameMap::r11_metadata_opr; 138 LIR_Opr FrameMap::r12_metadata_opr; 139 LIR_Opr FrameMap::r13_metadata_opr; 140 LIR_Opr FrameMap::r14_metadata_opr; 141 #endif // _LP64 142 143 LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; 144 LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; 145 LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, }; 146 147 XMMRegister FrameMap::_xmm_regs [] = { 0, }; 148 149 XMMRegister FrameMap::nr2xmmreg(int rnr) { 150 assert(_init_done, "tables not initialized"); 151 return _xmm_regs[rnr]; 152 } 153 154 //-------------------------------------------------------- 155 // FrameMap 156 //-------------------------------------------------------- 157 158 void FrameMap::initialize() { 159 assert(!_init_done, "once"); 160 161 assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers"); 162 map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); 163 map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); 164 map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); 165 map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); 166 map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); 167 map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); 168 169 #ifndef _LP64 170 // The unallocatable registers are at the end 171 map_register(6, rsp); 172 map_register(7, rbp); 173 #else 174 map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); 175 map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); 176 map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); 177 map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); 178 map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); 179 // r12 is allocated conditionally. With compressed oops it holds 180 // the heapbase value and is not visible to the allocator. 181 map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); 182 // The unallocatable registers are at the end 183 map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); 184 map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); 185 map_register(14, rsp); 186 map_register(15, rbp); 187 #endif // _LP64 188 189 #ifdef _LP64 190 long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/); 191 long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/); 192 #else 193 long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); 194 long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); 195 #endif // _LP64 196 fpu0_float_opr = LIR_OprFact::single_fpu(0); 197 fpu0_double_opr = LIR_OprFact::double_fpu(0); 198 xmm0_float_opr = LIR_OprFact::single_xmm(0); 199 xmm0_double_opr = LIR_OprFact::double_xmm(0); 200 201 _caller_save_cpu_regs[0] = rsi_opr; 202 _caller_save_cpu_regs[1] = rdi_opr; 203 _caller_save_cpu_regs[2] = rbx_opr; 204 _caller_save_cpu_regs[3] = rax_opr; 205 _caller_save_cpu_regs[4] = rdx_opr; 206 _caller_save_cpu_regs[5] = rcx_opr; 207 208 #ifdef _LP64 209 _caller_save_cpu_regs[6] = r8_opr; 210 _caller_save_cpu_regs[7] = r9_opr; 211 _caller_save_cpu_regs[8] = r11_opr; 212 _caller_save_cpu_regs[9] = r13_opr; 213 _caller_save_cpu_regs[10] = r14_opr; 214 _caller_save_cpu_regs[11] = r12_opr; 215 #endif // _LP64 216 217 218 _xmm_regs[0] = xmm0; 219 _xmm_regs[1] = xmm1; 220 _xmm_regs[2] = xmm2; 221 _xmm_regs[3] = xmm3; 222 _xmm_regs[4] = xmm4; 223 _xmm_regs[5] = xmm5; 224 _xmm_regs[6] = xmm6; 225 _xmm_regs[7] = xmm7; 226 227 #ifdef _LP64 228 _xmm_regs[8] = xmm8; 229 _xmm_regs[9] = xmm9; 230 _xmm_regs[10] = xmm10; 231 _xmm_regs[11] = xmm11; 232 _xmm_regs[12] = xmm12; 233 _xmm_regs[13] = xmm13; 234 _xmm_regs[14] = xmm14; 235 _xmm_regs[15] = xmm15; 236 #endif // _LP64 237 238 for (int i = 0; i < 8; i++) { 239 _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); 240 } 241 242 for (int i = 0; i < nof_caller_save_xmm_regs ; i++) { 243 _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i); 244 } 245 246 _init_done = true; 247 248 rsi_oop_opr = as_oop_opr(rsi); 249 rdi_oop_opr = as_oop_opr(rdi); 250 rbx_oop_opr = as_oop_opr(rbx); 251 rax_oop_opr = as_oop_opr(rax); 252 rdx_oop_opr = as_oop_opr(rdx); 253 rcx_oop_opr = as_oop_opr(rcx); 254 255 rsi_metadata_opr = as_metadata_opr(rsi); 256 rdi_metadata_opr = as_metadata_opr(rdi); 257 rbx_metadata_opr = as_metadata_opr(rbx); 258 rax_metadata_opr = as_metadata_opr(rax); 259 rdx_metadata_opr = as_metadata_opr(rdx); 260 rcx_metadata_opr = as_metadata_opr(rcx); 261 262 rsp_opr = as_pointer_opr(rsp); 263 rbp_opr = as_pointer_opr(rbp); 264 265 #ifdef _LP64 266 r8_oop_opr = as_oop_opr(r8); 267 r9_oop_opr = as_oop_opr(r9); 268 r11_oop_opr = as_oop_opr(r11); 269 r12_oop_opr = as_oop_opr(r12); 270 r13_oop_opr = as_oop_opr(r13); 271 r14_oop_opr = as_oop_opr(r14); 272 273 r8_metadata_opr = as_metadata_opr(r8); 274 r9_metadata_opr = as_metadata_opr(r9); 275 r11_metadata_opr = as_metadata_opr(r11); 276 r12_metadata_opr = as_metadata_opr(r12); 277 r13_metadata_opr = as_metadata_opr(r13); 278 r14_metadata_opr = as_metadata_opr(r14); 279 #endif // _LP64 280 281 VMRegPair regs; 282 BasicType sig_bt = T_OBJECT; 283 SharedRuntime::java_calling_convention(&sig_bt, ®s, 1, true); 284 receiver_opr = as_oop_opr(regs.first()->as_Register()); 285 286 } 287 288 289 Address FrameMap::make_new_address(ByteSize sp_offset) const { 290 // for rbp, based address use this: 291 // return Address(rbp, in_bytes(sp_offset) - (framesize() - 2) * 4); 292 return Address(rsp, in_bytes(sp_offset)); 293 } 294 295 296 // ----------------mapping----------------------- 297 // all mapping is based on rbp, addressing, except for simple leaf methods where we access 298 // the locals rsp based (and no frame is built) 299 300 301 // Frame for simple leaf methods (quick entries) 302 // 303 // +----------+ 304 // | ret addr | <- TOS 305 // +----------+ 306 // | args | 307 // | ...... | 308 309 // Frame for standard methods 310 // 311 // | .........| <- TOS 312 // | locals | 313 // +----------+ 314 // | old rbp, | <- EBP 315 // +----------+ 316 // | ret addr | 317 // +----------+ 318 // | args | 319 // | .........| 320 321 322 // For OopMaps, map a local variable or spill index to an VMRegImpl name. 323 // This is the offset from sp() in the frame of the slot for the index, 324 // skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.) 325 // 326 // framesize + 327 // stack0 stack0 0 <- VMReg 328 // | | <registers> | 329 // ...........|..............|.............| 330 // 0 1 2 3 x x 4 5 6 ... | <- local indices 331 // ^ ^ sp() ( x x indicate link 332 // | | and return addr) 333 // arguments non-argument locals 334 335 336 VMReg FrameMap::fpu_regname (int n) { 337 // Return the OptoReg name for the fpu stack slot "n" 338 // A spilled fpu stack slot comprises to two single-word OptoReg's. 339 return as_FloatRegister(n)->as_VMReg(); 340 } 341 342 LIR_Opr FrameMap::stack_pointer() { 343 return FrameMap::rsp_opr; 344 } 345 346 347 // JSR 292 348 LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { 349 assert(rbp == rbp_mh_SP_save, "must be same register"); 350 return rbp_opr; 351 } 352 353 354 bool FrameMap::validate_frame() { 355 return true; 356 }