1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "oops/compiledICHolder.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "register_x86.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/signature.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "utilities/macros.hpp" 40 #include "vmreg_x86.inline.hpp" 41 42 // Implementation of StubAssembler 43 44 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 45 // setup registers 46 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 47 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 48 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 49 assert(args_size >= 0, "illegal args_size"); 50 bool align_stack = false; 51 #ifdef _LP64 52 // At a method handle call, the stack may not be properly aligned 53 // when returning with an exception. 54 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 55 #endif 56 57 #ifdef _LP64 58 mov(c_rarg0, thread); 59 set_num_rt_args(0); // Nothing on stack 60 #else 61 set_num_rt_args(1 + args_size); 62 63 // push java thread (becomes first argument of C function) 64 get_thread(thread); 65 push(thread); 66 #endif // _LP64 67 68 int call_offset; 69 if (!align_stack) { 70 set_last_Java_frame(thread, noreg, rbp, NULL); 71 } else { 72 address the_pc = pc(); 73 call_offset = offset(); 74 set_last_Java_frame(thread, noreg, rbp, the_pc); 75 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 76 } 77 78 // do the call 79 call(RuntimeAddress(entry)); 80 if (!align_stack) { 81 call_offset = offset(); 82 } 83 // verify callee-saved register 84 #ifdef ASSERT 85 guarantee(thread != rax, "change this code"); 86 push(rax); 87 { Label L; 88 get_thread(rax); 89 cmpptr(thread, rax); 90 jcc(Assembler::equal, L); 91 int3(); 92 stop("StubAssembler::call_RT: rdi not callee saved?"); 93 bind(L); 94 } 95 pop(rax); 96 #endif 97 reset_last_Java_frame(thread, true); 98 99 // discard thread and arguments 100 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 101 102 // check for pending exceptions 103 { Label L; 104 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 105 jcc(Assembler::equal, L); 106 // exception pending => remove activation and forward to exception handler 107 movptr(rax, Address(thread, Thread::pending_exception_offset())); 108 // make sure that the vm_results are cleared 109 if (oop_result1->is_valid()) { 110 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 111 } 112 if (metadata_result->is_valid()) { 113 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 114 } 115 if (frame_size() == no_frame_size) { 116 leave(); 117 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 118 } else if (_stub_id == Runtime1::forward_exception_id) { 119 should_not_reach_here(); 120 } else { 121 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 122 } 123 bind(L); 124 } 125 // get oop results if there are any and reset the values in the thread 126 if (oop_result1->is_valid()) { 127 get_vm_result(oop_result1, thread); 128 } 129 if (metadata_result->is_valid()) { 130 get_vm_result_2(metadata_result, thread); 131 } 132 return call_offset; 133 } 134 135 136 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 137 #ifdef _LP64 138 mov(c_rarg1, arg1); 139 #else 140 push(arg1); 141 #endif // _LP64 142 return call_RT(oop_result1, metadata_result, entry, 1); 143 } 144 145 146 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 147 #ifdef _LP64 148 if (c_rarg1 == arg2) { 149 if (c_rarg2 == arg1) { 150 xchgq(arg1, arg2); 151 } else { 152 mov(c_rarg2, arg2); 153 mov(c_rarg1, arg1); 154 } 155 } else { 156 mov(c_rarg1, arg1); 157 mov(c_rarg2, arg2); 158 } 159 #else 160 push(arg2); 161 push(arg1); 162 #endif // _LP64 163 return call_RT(oop_result1, metadata_result, entry, 2); 164 } 165 166 167 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 168 #ifdef _LP64 169 // if there is any conflict use the stack 170 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 171 arg2 == c_rarg1 || arg1 == c_rarg3 || 172 arg3 == c_rarg1 || arg1 == c_rarg2) { 173 push(arg3); 174 push(arg2); 175 push(arg1); 176 pop(c_rarg1); 177 pop(c_rarg2); 178 pop(c_rarg3); 179 } else { 180 mov(c_rarg1, arg1); 181 mov(c_rarg2, arg2); 182 mov(c_rarg3, arg3); 183 } 184 #else 185 push(arg3); 186 push(arg2); 187 push(arg1); 188 #endif // _LP64 189 return call_RT(oop_result1, metadata_result, entry, 3); 190 } 191 192 193 // Implementation of StubFrame 194 195 class StubFrame: public StackObj { 196 private: 197 StubAssembler* _sasm; 198 199 public: 200 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 201 void load_argument(int offset_in_words, Register reg); 202 203 ~StubFrame(); 204 }; 205 206 void StubAssembler::prologue(const char* name, bool must_gc_arguments) { 207 set_info(name, must_gc_arguments); 208 enter(); 209 } 210 211 void StubAssembler::epilogue() { 212 leave(); 213 ret(0); 214 } 215 216 #define __ _sasm-> 217 218 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 219 _sasm = sasm; 220 __ prologue(name, must_gc_arguments); 221 } 222 223 // load parameters that were stored with LIR_Assembler::store_parameter 224 // Note: offsets for store_parameter and load_argument must match 225 void StubFrame::load_argument(int offset_in_words, Register reg) { 226 __ load_parameter(offset_in_words, reg); 227 } 228 229 230 StubFrame::~StubFrame() { 231 __ epilogue(); 232 } 233 234 #undef __ 235 236 237 // Implementation of Runtime1 238 239 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 240 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 241 242 // Stack layout for saving/restoring all the registers needed during a runtime 243 // call (this includes deoptimization) 244 // Note: note that users of this frame may well have arguments to some runtime 245 // while these values are on the stack. These positions neglect those arguments 246 // but the code in save_live_registers will take the argument count into 247 // account. 248 // 249 #ifdef _LP64 250 #define SLOT2(x) x, 251 #define SLOT_PER_WORD 2 252 #else 253 #define SLOT2(x) 254 #define SLOT_PER_WORD 1 255 #endif // _LP64 256 257 enum reg_save_layout { 258 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 259 // happen and will assert if the stack size we create is misaligned 260 #ifdef _LP64 261 align_dummy_0, align_dummy_1, 262 #endif // _LP64 263 #ifdef _WIN64 264 // Windows always allocates space for it's argument registers (see 265 // frame::arg_reg_save_area_bytes). 266 arg_reg_save_1, arg_reg_save_1H, // 0, 4 267 arg_reg_save_2, arg_reg_save_2H, // 8, 12 268 arg_reg_save_3, arg_reg_save_3H, // 16, 20 269 arg_reg_save_4, arg_reg_save_4H, // 24, 28 270 #endif // _WIN64 271 xmm_regs_as_doubles_off, // 32 272 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 273 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 274 // fpu_state_end_off is exclusive 275 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 276 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 277 extra_space_offset, // 360 278 #ifdef _LP64 279 r15_off = extra_space_offset, r15H_off, // 360, 364 280 r14_off, r14H_off, // 368, 372 281 r13_off, r13H_off, // 376, 380 282 r12_off, r12H_off, // 384, 388 283 r11_off, r11H_off, // 392, 396 284 r10_off, r10H_off, // 400, 404 285 r9_off, r9H_off, // 408, 412 286 r8_off, r8H_off, // 416, 420 287 rdi_off, rdiH_off, // 424, 428 288 #else 289 rdi_off = extra_space_offset, 290 #endif // _LP64 291 rsi_off, SLOT2(rsiH_off) // 432, 436 292 rbp_off, SLOT2(rbpH_off) // 440, 444 293 rsp_off, SLOT2(rspH_off) // 448, 452 294 rbx_off, SLOT2(rbxH_off) // 456, 460 295 rdx_off, SLOT2(rdxH_off) // 464, 468 296 rcx_off, SLOT2(rcxH_off) // 472, 476 297 rax_off, SLOT2(raxH_off) // 480, 484 298 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 299 return_off, SLOT2(returnH_off) // 496, 500 300 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 301 }; 302 303 // Save off registers which might be killed by calls into the runtime. 304 // Tries to smart of about FP registers. In particular we separate 305 // saving and describing the FPU registers for deoptimization since we 306 // have to save the FPU registers twice if we describe them and on P4 307 // saving FPU registers which don't contain anything appears 308 // expensive. The deopt blob is the only thing which needs to 309 // describe FPU registers. In all other cases it should be sufficient 310 // to simply save their current value. 311 312 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 313 bool save_fpu_registers = true) { 314 315 // In 64bit all the args are in regs so there are no additional stack slots 316 LP64_ONLY(num_rt_args = 0); 317 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 318 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 319 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 320 321 // record saved value locations in an OopMap 322 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 323 OopMap* map = new OopMap(frame_size_in_slots, 0); 324 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 325 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 326 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 327 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 328 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 329 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 330 #ifdef _LP64 331 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 332 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 333 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 334 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 335 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 336 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 337 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 338 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 339 340 // This is stupid but needed. 341 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 342 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 343 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 344 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 345 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 346 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 347 348 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 349 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 350 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 351 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 352 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 353 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 354 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 355 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 356 #endif // _LP64 357 358 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 359 #ifdef _LP64 360 if (UseAVX < 3) { 361 xmm_bypass_limit = xmm_bypass_limit / 2; 362 } 363 #endif 364 365 if (save_fpu_registers) { 366 if (UseSSE < 2) { 367 int fpu_off = float_regs_as_doubles_off; 368 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 369 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 370 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 371 // %%% This is really a waste but we'll keep things as they were for now 372 if (true) { 373 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 374 } 375 fpu_off += 2; 376 } 377 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 378 } 379 380 if (UseSSE >= 2) { 381 int xmm_off = xmm_regs_as_doubles_off; 382 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 383 if (n < xmm_bypass_limit) { 384 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 385 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 386 // %%% This is really a waste but we'll keep things as they were for now 387 if (true) { 388 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 389 } 390 } 391 xmm_off += 2; 392 } 393 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 394 395 } else if (UseSSE == 1) { 396 int xmm_off = xmm_regs_as_doubles_off; 397 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 398 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 399 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 400 xmm_off += 2; 401 } 402 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 403 } 404 } 405 406 return map; 407 } 408 409 #define __ this-> 410 411 void C1_MacroAssembler::save_live_registers_no_oop_map(int num_rt_args, bool save_fpu_registers) { 412 __ block_comment("save_live_registers"); 413 414 __ pusha(); // integer registers 415 416 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 417 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 418 419 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 420 421 #ifdef ASSERT 422 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 423 #endif 424 425 if (save_fpu_registers) { 426 if (UseSSE < 2) { 427 // save FPU stack 428 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 429 __ fwait(); 430 431 #ifdef ASSERT 432 Label ok; 433 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 434 __ jccb(Assembler::equal, ok); 435 __ stop("corrupted control word detected"); 436 __ bind(ok); 437 #endif 438 439 // Reset the control word to guard against exceptions being unmasked 440 // since fstp_d can cause FPU stack underflow exceptions. Write it 441 // into the on stack copy and then reload that to make sure that the 442 // current and future values are correct. 443 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 444 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 445 446 // Save the FPU registers in de-opt-able form 447 int offset = 0; 448 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 449 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 450 offset += 8; 451 } 452 } 453 454 if (UseSSE >= 2) { 455 // save XMM registers 456 // XMM registers can contain float or double values, but this is not known here, 457 // so always save them as doubles. 458 // note that float values are _not_ converted automatically, so for float values 459 // the second word contains only garbage data. 460 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 461 int offset = 0; 462 #ifdef _LP64 463 if (UseAVX < 3) { 464 xmm_bypass_limit = xmm_bypass_limit / 2; 465 } 466 #endif 467 for (int n = 0; n < xmm_bypass_limit; n++) { 468 XMMRegister xmm_name = as_XMMRegister(n); 469 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 470 offset += 8; 471 } 472 } else if (UseSSE == 1) { 473 // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) 474 int offset = 0; 475 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 476 XMMRegister xmm_name = as_XMMRegister(n); 477 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 478 offset += 8; 479 } 480 } 481 } 482 483 // FPU stack must be empty now 484 __ verify_FPU(0, "save_live_registers"); 485 } 486 487 #undef __ 488 #define __ sasm-> 489 490 static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { 491 if (restore_fpu_registers) { 492 if (UseSSE >= 2) { 493 // restore XMM registers 494 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 495 #ifdef _LP64 496 if (UseAVX < 3) { 497 xmm_bypass_limit = xmm_bypass_limit / 2; 498 } 499 #endif 500 int offset = 0; 501 for (int n = 0; n < xmm_bypass_limit; n++) { 502 XMMRegister xmm_name = as_XMMRegister(n); 503 __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 504 offset += 8; 505 } 506 } else if (UseSSE == 1) { 507 // restore XMM registers(num MMX == num fpu) 508 int offset = 0; 509 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 510 XMMRegister xmm_name = as_XMMRegister(n); 511 __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 512 offset += 8; 513 } 514 } 515 516 if (UseSSE < 2) { 517 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 518 } else { 519 // check that FPU stack is really empty 520 __ verify_FPU(0, "restore_live_registers"); 521 } 522 523 } else { 524 // check that FPU stack is really empty 525 __ verify_FPU(0, "restore_live_registers"); 526 } 527 528 #ifdef ASSERT 529 { 530 Label ok; 531 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 532 __ jcc(Assembler::equal, ok); 533 __ stop("bad offsets in frame"); 534 __ bind(ok); 535 } 536 #endif // ASSERT 537 538 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 539 } 540 541 #undef __ 542 #define __ this-> 543 544 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) { 545 __ block_comment("restore_live_registers"); 546 547 restore_fpu(this, restore_fpu_registers); 548 __ popa(); 549 } 550 551 #undef __ 552 #define __ sasm-> 553 554 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 555 bool save_fpu_registers = true) { 556 sasm->save_live_registers_no_oop_map(num_rt_args, save_fpu_registers); 557 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 558 } 559 560 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 561 sasm->restore_live_registers(restore_fpu_registers); 562 } 563 564 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 565 __ block_comment("restore_live_registers_except_rax"); 566 567 restore_fpu(sasm, restore_fpu_registers); 568 569 #ifdef _LP64 570 __ movptr(r15, Address(rsp, 0)); 571 __ movptr(r14, Address(rsp, wordSize)); 572 __ movptr(r13, Address(rsp, 2 * wordSize)); 573 __ movptr(r12, Address(rsp, 3 * wordSize)); 574 __ movptr(r11, Address(rsp, 4 * wordSize)); 575 __ movptr(r10, Address(rsp, 5 * wordSize)); 576 __ movptr(r9, Address(rsp, 6 * wordSize)); 577 __ movptr(r8, Address(rsp, 7 * wordSize)); 578 __ movptr(rdi, Address(rsp, 8 * wordSize)); 579 __ movptr(rsi, Address(rsp, 9 * wordSize)); 580 __ movptr(rbp, Address(rsp, 10 * wordSize)); 581 // skip rsp 582 __ movptr(rbx, Address(rsp, 12 * wordSize)); 583 __ movptr(rdx, Address(rsp, 13 * wordSize)); 584 __ movptr(rcx, Address(rsp, 14 * wordSize)); 585 586 __ addptr(rsp, 16 * wordSize); 587 #else 588 589 __ pop(rdi); 590 __ pop(rsi); 591 __ pop(rbp); 592 __ pop(rbx); // skip this value 593 __ pop(rbx); 594 __ pop(rdx); 595 __ pop(rcx); 596 __ addptr(rsp, BytesPerWord); 597 #endif // _LP64 598 } 599 600 601 void Runtime1::initialize_pd() { 602 // nothing to do 603 } 604 605 606 // target: the entry point of the method that creates and posts the exception oop 607 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 608 609 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 610 // preserve all registers 611 int num_rt_args = has_argument ? 2 : 1; 612 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 613 614 // now all registers are saved and can be used freely 615 // verify that no old value is used accidentally 616 __ invalidate_registers(true, true, true, true, true, true); 617 618 // registers used by this stub 619 const Register temp_reg = rbx; 620 621 // load argument for exception that is passed as an argument into the stub 622 if (has_argument) { 623 #ifdef _LP64 624 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 625 #else 626 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 627 __ push(temp_reg); 628 #endif // _LP64 629 } 630 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 631 632 OopMapSet* oop_maps = new OopMapSet(); 633 oop_maps->add_gc_map(call_offset, oop_map); 634 635 __ stop("should not reach here"); 636 637 return oop_maps; 638 } 639 640 641 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 642 __ block_comment("generate_handle_exception"); 643 644 // incoming parameters 645 const Register exception_oop = rax; 646 const Register exception_pc = rdx; 647 // other registers used in this stub 648 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 649 650 // Save registers, if required. 651 OopMapSet* oop_maps = new OopMapSet(); 652 OopMap* oop_map = NULL; 653 switch (id) { 654 case forward_exception_id: 655 // We're handling an exception in the context of a compiled frame. 656 // The registers have been saved in the standard places. Perform 657 // an exception lookup in the caller and dispatch to the handler 658 // if found. Otherwise unwind and dispatch to the callers 659 // exception handler. 660 oop_map = generate_oop_map(sasm, 1 /*thread*/); 661 662 // load and clear pending exception oop into RAX 663 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 664 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 665 666 // load issuing PC (the return address for this stub) into rdx 667 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 668 669 // make sure that the vm_results are cleared (may be unnecessary) 670 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 671 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 672 break; 673 case handle_exception_nofpu_id: 674 case handle_exception_id: 675 // At this point all registers MAY be live. 676 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 677 break; 678 case handle_exception_from_callee_id: { 679 // At this point all registers except exception oop (RAX) and 680 // exception pc (RDX) are dead. 681 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 682 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 683 sasm->set_frame_size(frame_size); 684 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 685 break; 686 } 687 default: ShouldNotReachHere(); 688 } 689 690 #ifdef TIERED 691 // C2 can leave the fpu stack dirty 692 if (UseSSE < 2) { 693 __ empty_FPU_stack(); 694 } 695 #endif // TIERED 696 697 // verify that only rax, and rdx is valid at this time 698 __ invalidate_registers(false, true, true, false, true, true); 699 // verify that rax, contains a valid exception 700 __ verify_not_null_oop(exception_oop); 701 702 // load address of JavaThread object for thread-local data 703 NOT_LP64(__ get_thread(thread);) 704 705 #ifdef ASSERT 706 // check that fields in JavaThread for exception oop and issuing pc are 707 // empty before writing to them 708 Label oop_empty; 709 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 710 __ jcc(Assembler::equal, oop_empty); 711 __ stop("exception oop already set"); 712 __ bind(oop_empty); 713 714 Label pc_empty; 715 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 716 __ jcc(Assembler::equal, pc_empty); 717 __ stop("exception pc already set"); 718 __ bind(pc_empty); 719 #endif 720 721 // save exception oop and issuing pc into JavaThread 722 // (exception handler will load it from here) 723 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 724 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 725 726 // patch throwing pc into return address (has bci & oop map) 727 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 728 729 // compute the exception handler. 730 // the exception oop and the throwing pc are read from the fields in JavaThread 731 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 732 oop_maps->add_gc_map(call_offset, oop_map); 733 734 // rax: handler address 735 // will be the deopt blob if nmethod was deoptimized while we looked up 736 // handler regardless of whether handler existed in the nmethod. 737 738 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 739 __ invalidate_registers(false, true, true, true, true, true); 740 741 // patch the return address, this stub will directly return to the exception handler 742 __ movptr(Address(rbp, 1*BytesPerWord), rax); 743 744 switch (id) { 745 case forward_exception_id: 746 case handle_exception_nofpu_id: 747 case handle_exception_id: 748 // Restore the registers that were saved at the beginning. 749 restore_live_registers(sasm, id != handle_exception_nofpu_id); 750 break; 751 case handle_exception_from_callee_id: 752 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 753 // since we do a leave anyway. 754 755 // Pop the return address. 756 __ leave(); 757 __ pop(rcx); 758 __ jmp(rcx); // jump to exception handler 759 break; 760 default: ShouldNotReachHere(); 761 } 762 763 return oop_maps; 764 } 765 766 767 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 768 // incoming parameters 769 const Register exception_oop = rax; 770 // callee-saved copy of exception_oop during runtime call 771 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 772 // other registers used in this stub 773 const Register exception_pc = rdx; 774 const Register handler_addr = rbx; 775 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 776 777 // verify that only rax, is valid at this time 778 __ invalidate_registers(false, true, true, true, true, true); 779 780 #ifdef ASSERT 781 // check that fields in JavaThread for exception oop and issuing pc are empty 782 NOT_LP64(__ get_thread(thread);) 783 Label oop_empty; 784 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 785 __ jcc(Assembler::equal, oop_empty); 786 __ stop("exception oop must be empty"); 787 __ bind(oop_empty); 788 789 Label pc_empty; 790 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 791 __ jcc(Assembler::equal, pc_empty); 792 __ stop("exception pc must be empty"); 793 __ bind(pc_empty); 794 #endif 795 796 // clear the FPU stack in case any FPU results are left behind 797 __ empty_FPU_stack(); 798 799 // save exception_oop in callee-saved register to preserve it during runtime calls 800 __ verify_not_null_oop(exception_oop); 801 __ movptr(exception_oop_callee_saved, exception_oop); 802 803 NOT_LP64(__ get_thread(thread);) 804 // Get return address (is on top of stack after leave). 805 __ movptr(exception_pc, Address(rsp, 0)); 806 807 // search the exception handler address of the caller (using the return address) 808 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 809 // rax: exception handler address of the caller 810 811 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 812 __ invalidate_registers(false, true, true, true, false, true); 813 814 // move result of call into correct register 815 __ movptr(handler_addr, rax); 816 817 // Restore exception oop to RAX (required convention of exception handler). 818 __ movptr(exception_oop, exception_oop_callee_saved); 819 820 // verify that there is really a valid exception in rax 821 __ verify_not_null_oop(exception_oop); 822 823 // get throwing pc (= return address). 824 // rdx has been destroyed by the call, so it must be set again 825 // the pop is also necessary to simulate the effect of a ret(0) 826 __ pop(exception_pc); 827 828 // continue at exception handler (return address removed) 829 // note: do *not* remove arguments when unwinding the 830 // activation since the caller assumes having 831 // all arguments on the stack when entering the 832 // runtime to determine the exception handler 833 // (GC happens at call site with arguments!) 834 // rax: exception oop 835 // rdx: throwing pc 836 // rbx: exception handler 837 __ jmp(handler_addr); 838 } 839 840 841 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 842 // use the maximum number of runtime-arguments here because it is difficult to 843 // distinguish each RT-Call. 844 // Note: This number affects also the RT-Call in generate_handle_exception because 845 // the oop-map is shared for all calls. 846 const int num_rt_args = 2; // thread + dummy 847 848 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 849 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 850 851 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 852 853 #ifdef _LP64 854 const Register thread = r15_thread; 855 // No need to worry about dummy 856 __ mov(c_rarg0, thread); 857 #else 858 __ push(rax); // push dummy 859 860 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 861 // push java thread (becomes first argument of C function) 862 __ get_thread(thread); 863 __ push(thread); 864 #endif // _LP64 865 __ set_last_Java_frame(thread, noreg, rbp, NULL); 866 // do the call 867 __ call(RuntimeAddress(target)); 868 OopMapSet* oop_maps = new OopMapSet(); 869 oop_maps->add_gc_map(__ offset(), oop_map); 870 // verify callee-saved register 871 #ifdef ASSERT 872 guarantee(thread != rax, "change this code"); 873 __ push(rax); 874 { Label L; 875 __ get_thread(rax); 876 __ cmpptr(thread, rax); 877 __ jcc(Assembler::equal, L); 878 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 879 __ bind(L); 880 } 881 __ pop(rax); 882 #endif 883 __ reset_last_Java_frame(thread, true); 884 #ifndef _LP64 885 __ pop(rcx); // discard thread arg 886 __ pop(rcx); // discard dummy 887 #endif // _LP64 888 889 // check for pending exceptions 890 { Label L; 891 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 892 __ jcc(Assembler::equal, L); 893 // exception pending => remove activation and forward to exception handler 894 895 __ testptr(rax, rax); // have we deoptimized? 896 __ jump_cc(Assembler::equal, 897 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 898 899 // the deopt blob expects exceptions in the special fields of 900 // JavaThread, so copy and clear pending exception. 901 902 // load and clear pending exception 903 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 904 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 905 906 // check that there is really a valid exception 907 __ verify_not_null_oop(rax); 908 909 // load throwing pc: this is the return address of the stub 910 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 911 912 #ifdef ASSERT 913 // check that fields in JavaThread for exception oop and issuing pc are empty 914 Label oop_empty; 915 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 916 __ jcc(Assembler::equal, oop_empty); 917 __ stop("exception oop must be empty"); 918 __ bind(oop_empty); 919 920 Label pc_empty; 921 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 922 __ jcc(Assembler::equal, pc_empty); 923 __ stop("exception pc must be empty"); 924 __ bind(pc_empty); 925 #endif 926 927 // store exception oop and throwing pc to JavaThread 928 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 929 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 930 931 restore_live_registers(sasm); 932 933 __ leave(); 934 __ addptr(rsp, BytesPerWord); // remove return address from stack 935 936 // Forward the exception directly to deopt blob. We can blow no 937 // registers and must leave throwing pc on the stack. A patch may 938 // have values live in registers so the entry point with the 939 // exception in tls. 940 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 941 942 __ bind(L); 943 } 944 945 946 // Runtime will return true if the nmethod has been deoptimized during 947 // the patching process. In that case we must do a deopt reexecute instead. 948 949 Label reexecuteEntry, cont; 950 951 __ testptr(rax, rax); // have we deoptimized? 952 __ jcc(Assembler::equal, cont); // no 953 954 // Will reexecute. Proper return address is already on the stack we just restore 955 // registers, pop all of our frame but the return address and jump to the deopt blob 956 restore_live_registers(sasm); 957 __ leave(); 958 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 959 960 __ bind(cont); 961 restore_live_registers(sasm); 962 __ leave(); 963 __ ret(0); 964 965 return oop_maps; 966 } 967 968 969 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 970 971 // for better readability 972 const bool must_gc_arguments = true; 973 const bool dont_gc_arguments = false; 974 975 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 976 bool save_fpu_registers = true; 977 978 // stub code & info for the different stubs 979 OopMapSet* oop_maps = NULL; 980 switch (id) { 981 case forward_exception_id: 982 { 983 oop_maps = generate_handle_exception(id, sasm); 984 __ leave(); 985 __ ret(0); 986 } 987 break; 988 989 case new_instance_id: 990 case fast_new_instance_id: 991 case fast_new_instance_init_check_id: 992 { 993 Register klass = rdx; // Incoming 994 Register obj = rax; // Result 995 996 if (id == new_instance_id) { 997 __ set_info("new_instance", dont_gc_arguments); 998 } else if (id == fast_new_instance_id) { 999 __ set_info("fast new_instance", dont_gc_arguments); 1000 } else { 1001 assert(id == fast_new_instance_init_check_id, "bad StubID"); 1002 __ set_info("fast new_instance init check", dont_gc_arguments); 1003 } 1004 1005 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 1006 UseTLAB && FastTLABRefill) { 1007 Label slow_path; 1008 Register obj_size = rcx; 1009 Register t1 = rbx; 1010 Register t2 = rsi; 1011 assert_different_registers(klass, obj, obj_size, t1, t2); 1012 1013 __ push(rdi); 1014 __ push(rbx); 1015 1016 if (id == fast_new_instance_init_check_id) { 1017 // make sure the klass is initialized 1018 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1019 __ jcc(Assembler::notEqual, slow_path); 1020 } 1021 1022 #ifdef ASSERT 1023 // assert object can be fast path allocated 1024 { 1025 Label ok, not_ok; 1026 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1027 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1028 __ jcc(Assembler::lessEqual, not_ok); 1029 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1030 __ jcc(Assembler::zero, ok); 1031 __ bind(not_ok); 1032 __ stop("assert(can be fast path allocated)"); 1033 __ should_not_reach_here(); 1034 __ bind(ok); 1035 } 1036 #endif // ASSERT 1037 1038 // if we got here then the TLAB allocation failed, so try 1039 // refilling the TLAB or allocating directly from eden. 1040 Label retry_tlab, try_eden; 1041 const Register thread = 1042 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi 1043 1044 __ bind(retry_tlab); 1045 1046 // get the instance size (size is postive so movl is fine for 64bit) 1047 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1048 1049 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 1050 1051 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true); 1052 __ verify_oop(obj); 1053 __ pop(rbx); 1054 __ pop(rdi); 1055 __ ret(0); 1056 1057 __ bind(try_eden); 1058 // get the instance size (size is postive so movl is fine for 64bit) 1059 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1060 1061 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1062 __ incr_allocated_bytes(thread, obj_size, 0); 1063 1064 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 1065 __ verify_oop(obj); 1066 __ pop(rbx); 1067 __ pop(rdi); 1068 __ ret(0); 1069 1070 __ bind(slow_path); 1071 __ pop(rbx); 1072 __ pop(rdi); 1073 } 1074 1075 __ enter(); 1076 OopMap* map = save_live_registers(sasm, 2); 1077 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1078 oop_maps = new OopMapSet(); 1079 oop_maps->add_gc_map(call_offset, map); 1080 restore_live_registers_except_rax(sasm); 1081 __ verify_oop(obj); 1082 __ leave(); 1083 __ ret(0); 1084 1085 // rax,: new instance 1086 } 1087 1088 break; 1089 1090 case counter_overflow_id: 1091 { 1092 Register bci = rax, method = rbx; 1093 __ enter(); 1094 OopMap* map = save_live_registers(sasm, 3); 1095 // Retrieve bci 1096 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1097 // And a pointer to the Method* 1098 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1099 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1100 oop_maps = new OopMapSet(); 1101 oop_maps->add_gc_map(call_offset, map); 1102 restore_live_registers(sasm); 1103 __ leave(); 1104 __ ret(0); 1105 } 1106 break; 1107 1108 case new_type_array_id: 1109 case new_object_array_id: 1110 { 1111 Register length = rbx; // Incoming 1112 Register klass = rdx; // Incoming 1113 Register obj = rax; // Result 1114 1115 if (id == new_type_array_id) { 1116 __ set_info("new_type_array", dont_gc_arguments); 1117 } else { 1118 __ set_info("new_object_array", dont_gc_arguments); 1119 } 1120 1121 #ifdef ASSERT 1122 // assert object type is really an array of the proper kind 1123 { 1124 Label ok; 1125 Register t0 = obj; 1126 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1127 __ sarl(t0, Klass::_lh_array_tag_shift); 1128 int tag = ((id == new_type_array_id) 1129 ? Klass::_lh_array_tag_type_value 1130 : Klass::_lh_array_tag_obj_value); 1131 __ cmpl(t0, tag); 1132 __ jcc(Assembler::equal, ok); 1133 __ stop("assert(is an array klass)"); 1134 __ should_not_reach_here(); 1135 __ bind(ok); 1136 } 1137 #endif // ASSERT 1138 1139 if (UseTLAB && FastTLABRefill) { 1140 Register arr_size = rsi; 1141 Register t1 = rcx; // must be rcx for use as shift count 1142 Register t2 = rdi; 1143 Label slow_path; 1144 assert_different_registers(length, klass, obj, arr_size, t1, t2); 1145 1146 // check that array length is small enough for fast path. 1147 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); 1148 __ jcc(Assembler::above, slow_path); 1149 1150 // if we got here then the TLAB allocation failed, so try 1151 // refilling the TLAB or allocating directly from eden. 1152 Label retry_tlab, try_eden; 1153 const Register thread = 1154 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi 1155 1156 __ bind(retry_tlab); 1157 1158 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1159 // since size is positive movl does right thing on 64bit 1160 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1161 // since size is postive movl does right thing on 64bit 1162 __ movl(arr_size, length); 1163 assert(t1 == rcx, "fixed register usage"); 1164 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1165 __ shrptr(t1, Klass::_lh_header_size_shift); 1166 __ andptr(t1, Klass::_lh_header_size_mask); 1167 __ addptr(arr_size, t1); 1168 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1169 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1170 1171 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size 1172 1173 __ initialize_header(obj, klass, length, t1, t2); 1174 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1175 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1176 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1177 __ andptr(t1, Klass::_lh_header_size_mask); 1178 __ subptr(arr_size, t1); // body length 1179 __ addptr(t1, obj); // body start 1180 if (!ZeroTLAB) { 1181 __ initialize_body(t1, arr_size, 0, t2); 1182 } 1183 __ verify_oop(obj); 1184 __ ret(0); 1185 1186 __ bind(try_eden); 1187 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1188 // since size is positive movl does right thing on 64bit 1189 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1190 // since size is postive movl does right thing on 64bit 1191 __ movl(arr_size, length); 1192 assert(t1 == rcx, "fixed register usage"); 1193 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1194 __ shrptr(t1, Klass::_lh_header_size_shift); 1195 __ andptr(t1, Klass::_lh_header_size_mask); 1196 __ addptr(arr_size, t1); 1197 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1198 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1199 1200 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1201 __ incr_allocated_bytes(thread, arr_size, 0); 1202 1203 __ initialize_header(obj, klass, length, t1, t2); 1204 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1205 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1206 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1207 __ andptr(t1, Klass::_lh_header_size_mask); 1208 __ subptr(arr_size, t1); // body length 1209 __ addptr(t1, obj); // body start 1210 __ initialize_body(t1, arr_size, 0, t2); 1211 __ verify_oop(obj); 1212 __ ret(0); 1213 1214 __ bind(slow_path); 1215 } 1216 1217 __ enter(); 1218 OopMap* map = save_live_registers(sasm, 3); 1219 int call_offset; 1220 if (id == new_type_array_id) { 1221 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1222 } else { 1223 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1224 } 1225 1226 oop_maps = new OopMapSet(); 1227 oop_maps->add_gc_map(call_offset, map); 1228 restore_live_registers_except_rax(sasm); 1229 1230 __ verify_oop(obj); 1231 __ leave(); 1232 __ ret(0); 1233 1234 // rax,: new array 1235 } 1236 break; 1237 1238 case new_multi_array_id: 1239 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1240 // rax,: klass 1241 // rbx,: rank 1242 // rcx: address of 1st dimension 1243 OopMap* map = save_live_registers(sasm, 4); 1244 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1245 1246 oop_maps = new OopMapSet(); 1247 oop_maps->add_gc_map(call_offset, map); 1248 restore_live_registers_except_rax(sasm); 1249 1250 // rax,: new multi array 1251 __ verify_oop(rax); 1252 } 1253 break; 1254 1255 case register_finalizer_id: 1256 { 1257 __ set_info("register_finalizer", dont_gc_arguments); 1258 1259 // This is called via call_runtime so the arguments 1260 // will be place in C abi locations 1261 1262 #ifdef _LP64 1263 __ verify_oop(c_rarg0); 1264 __ mov(rax, c_rarg0); 1265 #else 1266 // The object is passed on the stack and we haven't pushed a 1267 // frame yet so it's one work away from top of stack. 1268 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1269 __ verify_oop(rax); 1270 #endif // _LP64 1271 1272 // load the klass and check the has finalizer flag 1273 Label register_finalizer; 1274 Register t = rsi; 1275 __ load_klass(t, rax); 1276 __ movl(t, Address(t, Klass::access_flags_offset())); 1277 __ testl(t, JVM_ACC_HAS_FINALIZER); 1278 __ jcc(Assembler::notZero, register_finalizer); 1279 __ ret(0); 1280 1281 __ bind(register_finalizer); 1282 __ enter(); 1283 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1284 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1285 oop_maps = new OopMapSet(); 1286 oop_maps->add_gc_map(call_offset, oop_map); 1287 1288 // Now restore all the live registers 1289 restore_live_registers(sasm); 1290 1291 __ leave(); 1292 __ ret(0); 1293 } 1294 break; 1295 1296 case throw_range_check_failed_id: 1297 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1298 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1299 } 1300 break; 1301 1302 case throw_index_exception_id: 1303 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1304 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1305 } 1306 break; 1307 1308 case throw_div0_exception_id: 1309 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1310 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1311 } 1312 break; 1313 1314 case throw_null_pointer_exception_id: 1315 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1316 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1317 } 1318 break; 1319 1320 case handle_exception_nofpu_id: 1321 case handle_exception_id: 1322 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1323 oop_maps = generate_handle_exception(id, sasm); 1324 } 1325 break; 1326 1327 case handle_exception_from_callee_id: 1328 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1329 oop_maps = generate_handle_exception(id, sasm); 1330 } 1331 break; 1332 1333 case unwind_exception_id: 1334 { __ set_info("unwind_exception", dont_gc_arguments); 1335 // note: no stubframe since we are about to leave the current 1336 // activation and we are calling a leaf VM function only. 1337 generate_unwind_exception(sasm); 1338 } 1339 break; 1340 1341 case throw_array_store_exception_id: 1342 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1343 // tos + 0: link 1344 // + 1: return address 1345 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1346 } 1347 break; 1348 1349 case throw_class_cast_exception_id: 1350 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1351 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1352 } 1353 break; 1354 1355 case throw_incompatible_class_change_error_id: 1356 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1357 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1358 } 1359 break; 1360 1361 case slow_subtype_check_id: 1362 { 1363 // Typical calling sequence: 1364 // __ push(klass_RInfo); // object klass or other subclass 1365 // __ push(sup_k_RInfo); // array element klass or other superclass 1366 // __ call(slow_subtype_check); 1367 // Note that the subclass is pushed first, and is therefore deepest. 1368 // Previous versions of this code reversed the names 'sub' and 'super'. 1369 // This was operationally harmless but made the code unreadable. 1370 enum layout { 1371 rax_off, SLOT2(raxH_off) 1372 rcx_off, SLOT2(rcxH_off) 1373 rsi_off, SLOT2(rsiH_off) 1374 rdi_off, SLOT2(rdiH_off) 1375 // saved_rbp_off, SLOT2(saved_rbpH_off) 1376 return_off, SLOT2(returnH_off) 1377 sup_k_off, SLOT2(sup_kH_off) 1378 klass_off, SLOT2(superH_off) 1379 framesize, 1380 result_off = klass_off // deepest argument is also the return value 1381 }; 1382 1383 __ set_info("slow_subtype_check", dont_gc_arguments); 1384 __ push(rdi); 1385 __ push(rsi); 1386 __ push(rcx); 1387 __ push(rax); 1388 1389 // This is called by pushing args and not with C abi 1390 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1391 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1392 1393 Label miss; 1394 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1395 1396 // fallthrough on success: 1397 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1398 __ pop(rax); 1399 __ pop(rcx); 1400 __ pop(rsi); 1401 __ pop(rdi); 1402 __ ret(0); 1403 1404 __ bind(miss); 1405 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1406 __ pop(rax); 1407 __ pop(rcx); 1408 __ pop(rsi); 1409 __ pop(rdi); 1410 __ ret(0); 1411 } 1412 break; 1413 1414 case monitorenter_nofpu_id: 1415 save_fpu_registers = false; 1416 // fall through 1417 case monitorenter_id: 1418 { 1419 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1420 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1421 1422 // Called with store_parameter and not C abi 1423 1424 f.load_argument(1, rax); // rax,: object 1425 f.load_argument(0, rbx); // rbx,: lock address 1426 1427 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1428 1429 oop_maps = new OopMapSet(); 1430 oop_maps->add_gc_map(call_offset, map); 1431 restore_live_registers(sasm, save_fpu_registers); 1432 } 1433 break; 1434 1435 case monitorexit_nofpu_id: 1436 save_fpu_registers = false; 1437 // fall through 1438 case monitorexit_id: 1439 { 1440 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1441 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1442 1443 // Called with store_parameter and not C abi 1444 1445 f.load_argument(0, rax); // rax,: lock address 1446 1447 // note: really a leaf routine but must setup last java sp 1448 // => use call_RT for now (speed can be improved by 1449 // doing last java sp setup manually) 1450 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1451 1452 oop_maps = new OopMapSet(); 1453 oop_maps->add_gc_map(call_offset, map); 1454 restore_live_registers(sasm, save_fpu_registers); 1455 } 1456 break; 1457 1458 case deoptimize_id: 1459 { 1460 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1461 const int num_rt_args = 2; // thread, trap_request 1462 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1463 f.load_argument(0, rax); 1464 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1465 oop_maps = new OopMapSet(); 1466 oop_maps->add_gc_map(call_offset, oop_map); 1467 restore_live_registers(sasm); 1468 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1469 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1470 __ leave(); 1471 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1472 } 1473 break; 1474 1475 case access_field_patching_id: 1476 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1477 // we should set up register map 1478 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1479 } 1480 break; 1481 1482 case load_klass_patching_id: 1483 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1484 // we should set up register map 1485 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1486 } 1487 break; 1488 1489 case load_mirror_patching_id: 1490 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1491 // we should set up register map 1492 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1493 } 1494 break; 1495 1496 case load_appendix_patching_id: 1497 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1498 // we should set up register map 1499 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1500 } 1501 break; 1502 1503 case dtrace_object_alloc_id: 1504 { // rax,: object 1505 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1506 // we can't gc here so skip the oopmap but make sure that all 1507 // the live registers get saved. 1508 save_live_registers(sasm, 1); 1509 1510 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1511 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1512 NOT_LP64(__ pop(rax)); 1513 1514 restore_live_registers(sasm); 1515 } 1516 break; 1517 1518 case fpu2long_stub_id: 1519 { 1520 // rax, and rdx are destroyed, but should be free since the result is returned there 1521 // preserve rsi,ecx 1522 __ push(rsi); 1523 __ push(rcx); 1524 LP64_ONLY(__ push(rdx);) 1525 1526 // check for NaN 1527 Label return0, do_return, return_min_jlong, do_convert; 1528 1529 Address value_high_word(rsp, wordSize + 4); 1530 Address value_low_word(rsp, wordSize); 1531 Address result_high_word(rsp, 3*wordSize + 4); 1532 Address result_low_word(rsp, 3*wordSize); 1533 1534 __ subptr(rsp, 32); // more than enough on 32bit 1535 __ fst_d(value_low_word); 1536 __ movl(rax, value_high_word); 1537 __ andl(rax, 0x7ff00000); 1538 __ cmpl(rax, 0x7ff00000); 1539 __ jcc(Assembler::notEqual, do_convert); 1540 __ movl(rax, value_high_word); 1541 __ andl(rax, 0xfffff); 1542 __ orl(rax, value_low_word); 1543 __ jcc(Assembler::notZero, return0); 1544 1545 __ bind(do_convert); 1546 __ fnstcw(Address(rsp, 0)); 1547 __ movzwl(rax, Address(rsp, 0)); 1548 __ orl(rax, 0xc00); 1549 __ movw(Address(rsp, 2), rax); 1550 __ fldcw(Address(rsp, 2)); 1551 __ fwait(); 1552 __ fistp_d(result_low_word); 1553 __ fldcw(Address(rsp, 0)); 1554 __ fwait(); 1555 // This gets the entire long in rax on 64bit 1556 __ movptr(rax, result_low_word); 1557 // testing of high bits 1558 __ movl(rdx, result_high_word); 1559 __ mov(rcx, rax); 1560 // What the heck is the point of the next instruction??? 1561 __ xorl(rcx, 0x0); 1562 __ movl(rsi, 0x80000000); 1563 __ xorl(rsi, rdx); 1564 __ orl(rcx, rsi); 1565 __ jcc(Assembler::notEqual, do_return); 1566 __ fldz(); 1567 __ fcomp_d(value_low_word); 1568 __ fnstsw_ax(); 1569 #ifdef _LP64 1570 __ testl(rax, 0x4100); // ZF & CF == 0 1571 __ jcc(Assembler::equal, return_min_jlong); 1572 #else 1573 __ sahf(); 1574 __ jcc(Assembler::above, return_min_jlong); 1575 #endif // _LP64 1576 // return max_jlong 1577 #ifndef _LP64 1578 __ movl(rdx, 0x7fffffff); 1579 __ movl(rax, 0xffffffff); 1580 #else 1581 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1582 #endif // _LP64 1583 __ jmp(do_return); 1584 1585 __ bind(return_min_jlong); 1586 #ifndef _LP64 1587 __ movl(rdx, 0x80000000); 1588 __ xorl(rax, rax); 1589 #else 1590 __ mov64(rax, UCONST64(0x8000000000000000)); 1591 #endif // _LP64 1592 __ jmp(do_return); 1593 1594 __ bind(return0); 1595 __ fpop(); 1596 #ifndef _LP64 1597 __ xorptr(rdx,rdx); 1598 __ xorptr(rax,rax); 1599 #else 1600 __ xorptr(rax, rax); 1601 #endif // _LP64 1602 1603 __ bind(do_return); 1604 __ addptr(rsp, 32); 1605 LP64_ONLY(__ pop(rdx);) 1606 __ pop(rcx); 1607 __ pop(rsi); 1608 __ ret(0); 1609 } 1610 break; 1611 1612 case predicate_failed_trap_id: 1613 { 1614 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1615 1616 OopMap* map = save_live_registers(sasm, 1); 1617 1618 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1619 oop_maps = new OopMapSet(); 1620 oop_maps->add_gc_map(call_offset, map); 1621 restore_live_registers(sasm); 1622 __ leave(); 1623 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1624 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1625 1626 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1627 } 1628 break; 1629 1630 default: 1631 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1632 __ movptr(rax, (int)id); 1633 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1634 __ should_not_reach_here(); 1635 } 1636 break; 1637 } 1638 return oop_maps; 1639 } 1640 1641 #undef __ 1642 1643 const char *Runtime1::pd_name_for_address(address entry) { 1644 return "<unknown function>"; 1645 }