1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "register_x86.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/signature.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/macros.hpp" 43 #include "vmreg_x86.inline.hpp" 44 45 // Implementation of StubAssembler 46 47 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 48 // setup registers 49 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 50 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 51 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 52 assert(args_size >= 0, "illegal args_size"); 53 bool align_stack = false; 54 #ifdef _LP64 55 // At a method handle call, the stack may not be properly aligned 56 // when returning with an exception. 57 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 58 #endif 59 60 #ifdef _LP64 61 mov(c_rarg0, thread); 62 set_num_rt_args(0); // Nothing on stack 63 #else 64 set_num_rt_args(1 + args_size); 65 66 // push java thread (becomes first argument of C function) 67 get_thread(thread); 68 push(thread); 69 #endif // _LP64 70 71 int call_offset; 72 if (!align_stack) { 73 set_last_Java_frame(thread, noreg, rbp, NULL); 74 } else { 75 address the_pc = pc(); 76 call_offset = offset(); 77 set_last_Java_frame(thread, noreg, rbp, the_pc); 78 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 79 } 80 81 // do the call 82 call(RuntimeAddress(entry)); 83 if (!align_stack) { 84 call_offset = offset(); 85 } 86 // verify callee-saved register 87 #ifdef ASSERT 88 guarantee(thread != rax, "change this code"); 89 push(rax); 90 { Label L; 91 get_thread(rax); 92 cmpptr(thread, rax); 93 jcc(Assembler::equal, L); 94 int3(); 95 stop("StubAssembler::call_RT: rdi not callee saved?"); 96 bind(L); 97 } 98 pop(rax); 99 #endif 100 reset_last_Java_frame(thread, true); 101 102 // discard thread and arguments 103 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 104 105 // check for pending exceptions 106 { Label L; 107 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 108 jcc(Assembler::equal, L); 109 // exception pending => remove activation and forward to exception handler 110 movptr(rax, Address(thread, Thread::pending_exception_offset())); 111 // make sure that the vm_results are cleared 112 if (oop_result1->is_valid()) { 113 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 114 } 115 if (metadata_result->is_valid()) { 116 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 117 } 118 if (frame_size() == no_frame_size) { 119 leave(); 120 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 121 } else if (_stub_id == Runtime1::forward_exception_id) { 122 should_not_reach_here(); 123 } else { 124 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 125 } 126 bind(L); 127 } 128 // get oop results if there are any and reset the values in the thread 129 if (oop_result1->is_valid()) { 130 get_vm_result(oop_result1, thread); 131 } 132 if (metadata_result->is_valid()) { 133 get_vm_result_2(metadata_result, thread); 134 } 135 return call_offset; 136 } 137 138 139 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 140 #ifdef _LP64 141 mov(c_rarg1, arg1); 142 #else 143 push(arg1); 144 #endif // _LP64 145 return call_RT(oop_result1, metadata_result, entry, 1); 146 } 147 148 149 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 150 #ifdef _LP64 151 if (c_rarg1 == arg2) { 152 if (c_rarg2 == arg1) { 153 xchgq(arg1, arg2); 154 } else { 155 mov(c_rarg2, arg2); 156 mov(c_rarg1, arg1); 157 } 158 } else { 159 mov(c_rarg1, arg1); 160 mov(c_rarg2, arg2); 161 } 162 #else 163 push(arg2); 164 push(arg1); 165 #endif // _LP64 166 return call_RT(oop_result1, metadata_result, entry, 2); 167 } 168 169 170 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 171 #ifdef _LP64 172 // if there is any conflict use the stack 173 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 174 arg2 == c_rarg1 || arg1 == c_rarg3 || 175 arg3 == c_rarg1 || arg1 == c_rarg2) { 176 push(arg3); 177 push(arg2); 178 push(arg1); 179 pop(c_rarg1); 180 pop(c_rarg2); 181 pop(c_rarg3); 182 } else { 183 mov(c_rarg1, arg1); 184 mov(c_rarg2, arg2); 185 mov(c_rarg3, arg3); 186 } 187 #else 188 push(arg3); 189 push(arg2); 190 push(arg1); 191 #endif // _LP64 192 return call_RT(oop_result1, metadata_result, entry, 3); 193 } 194 195 196 // Implementation of StubFrame 197 198 class StubFrame: public StackObj { 199 private: 200 StubAssembler* _sasm; 201 202 public: 203 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 204 void load_argument(int offset_in_words, Register reg); 205 206 ~StubFrame(); 207 }; 208 209 void StubAssembler::prologue(const char* name, bool must_gc_arguments) { 210 set_info(name, must_gc_arguments); 211 enter(); 212 } 213 214 void StubAssembler::epilogue() { 215 leave(); 216 ret(0); 217 } 218 219 #define __ _sasm-> 220 221 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 222 _sasm = sasm; 223 __ prologue(name, must_gc_arguments); 224 } 225 226 // load parameters that were stored with LIR_Assembler::store_parameter 227 // Note: offsets for store_parameter and load_argument must match 228 void StubFrame::load_argument(int offset_in_words, Register reg) { 229 __ load_parameter(offset_in_words, reg); 230 } 231 232 233 StubFrame::~StubFrame() { 234 __ epilogue(); 235 } 236 237 #undef __ 238 239 240 // Implementation of Runtime1 241 242 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 243 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 244 245 // Stack layout for saving/restoring all the registers needed during a runtime 246 // call (this includes deoptimization) 247 // Note: note that users of this frame may well have arguments to some runtime 248 // while these values are on the stack. These positions neglect those arguments 249 // but the code in save_live_registers will take the argument count into 250 // account. 251 // 252 #ifdef _LP64 253 #define SLOT2(x) x, 254 #define SLOT_PER_WORD 2 255 #else 256 #define SLOT2(x) 257 #define SLOT_PER_WORD 1 258 #endif // _LP64 259 260 enum reg_save_layout { 261 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 262 // happen and will assert if the stack size we create is misaligned 263 #ifdef _LP64 264 align_dummy_0, align_dummy_1, 265 #endif // _LP64 266 #ifdef _WIN64 267 // Windows always allocates space for it's argument registers (see 268 // frame::arg_reg_save_area_bytes). 269 arg_reg_save_1, arg_reg_save_1H, // 0, 4 270 arg_reg_save_2, arg_reg_save_2H, // 8, 12 271 arg_reg_save_3, arg_reg_save_3H, // 16, 20 272 arg_reg_save_4, arg_reg_save_4H, // 24, 28 273 #endif // _WIN64 274 xmm_regs_as_doubles_off, // 32 275 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 276 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 277 // fpu_state_end_off is exclusive 278 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 279 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 280 extra_space_offset, // 360 281 #ifdef _LP64 282 r15_off = extra_space_offset, r15H_off, // 360, 364 283 r14_off, r14H_off, // 368, 372 284 r13_off, r13H_off, // 376, 380 285 r12_off, r12H_off, // 384, 388 286 r11_off, r11H_off, // 392, 396 287 r10_off, r10H_off, // 400, 404 288 r9_off, r9H_off, // 408, 412 289 r8_off, r8H_off, // 416, 420 290 rdi_off, rdiH_off, // 424, 428 291 #else 292 rdi_off = extra_space_offset, 293 #endif // _LP64 294 rsi_off, SLOT2(rsiH_off) // 432, 436 295 rbp_off, SLOT2(rbpH_off) // 440, 444 296 rsp_off, SLOT2(rspH_off) // 448, 452 297 rbx_off, SLOT2(rbxH_off) // 456, 460 298 rdx_off, SLOT2(rdxH_off) // 464, 468 299 rcx_off, SLOT2(rcxH_off) // 472, 476 300 rax_off, SLOT2(raxH_off) // 480, 484 301 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 302 return_off, SLOT2(returnH_off) // 496, 500 303 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 304 }; 305 306 // Save off registers which might be killed by calls into the runtime. 307 // Tries to smart of about FP registers. In particular we separate 308 // saving and describing the FPU registers for deoptimization since we 309 // have to save the FPU registers twice if we describe them and on P4 310 // saving FPU registers which don't contain anything appears 311 // expensive. The deopt blob is the only thing which needs to 312 // describe FPU registers. In all other cases it should be sufficient 313 // to simply save their current value. 314 315 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 316 bool save_fpu_registers = true) { 317 318 // In 64bit all the args are in regs so there are no additional stack slots 319 LP64_ONLY(num_rt_args = 0); 320 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 321 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 322 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 323 324 // record saved value locations in an OopMap 325 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 326 OopMap* map = new OopMap(frame_size_in_slots, 0); 327 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 328 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 329 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 330 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 331 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 332 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 333 #ifdef _LP64 334 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 335 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 336 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 337 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 338 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 339 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 340 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 341 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 342 343 // This is stupid but needed. 344 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 345 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 346 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 347 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 348 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 349 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 350 351 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 352 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 353 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 354 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 355 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 356 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 357 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 358 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 359 #endif // _LP64 360 361 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 362 #ifdef _LP64 363 if (UseAVX < 3) { 364 xmm_bypass_limit = xmm_bypass_limit / 2; 365 } 366 #endif 367 368 if (save_fpu_registers) { 369 if (UseSSE < 2) { 370 int fpu_off = float_regs_as_doubles_off; 371 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 372 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 373 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 374 // %%% This is really a waste but we'll keep things as they were for now 375 if (true) { 376 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 377 } 378 fpu_off += 2; 379 } 380 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 381 } 382 383 if (UseSSE >= 2) { 384 int xmm_off = xmm_regs_as_doubles_off; 385 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 386 if (n < xmm_bypass_limit) { 387 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 388 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 389 // %%% This is really a waste but we'll keep things as they were for now 390 if (true) { 391 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 392 } 393 } 394 xmm_off += 2; 395 } 396 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 397 398 } else if (UseSSE == 1) { 399 int xmm_off = xmm_regs_as_doubles_off; 400 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 401 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 402 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 403 xmm_off += 2; 404 } 405 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 406 } 407 } 408 409 return map; 410 } 411 412 #define __ this-> 413 414 void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) { 415 __ block_comment("save_live_registers"); 416 417 __ pusha(); // integer registers 418 419 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 420 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 421 422 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 423 424 #ifdef ASSERT 425 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 426 #endif 427 428 if (save_fpu_registers) { 429 if (UseSSE < 2) { 430 // save FPU stack 431 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 432 __ fwait(); 433 434 #ifdef ASSERT 435 Label ok; 436 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 437 __ jccb(Assembler::equal, ok); 438 __ stop("corrupted control word detected"); 439 __ bind(ok); 440 #endif 441 442 // Reset the control word to guard against exceptions being unmasked 443 // since fstp_d can cause FPU stack underflow exceptions. Write it 444 // into the on stack copy and then reload that to make sure that the 445 // current and future values are correct. 446 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 447 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 448 449 // Save the FPU registers in de-opt-able form 450 int offset = 0; 451 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 452 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 453 offset += 8; 454 } 455 } 456 457 if (UseSSE >= 2) { 458 // save XMM registers 459 // XMM registers can contain float or double values, but this is not known here, 460 // so always save them as doubles. 461 // note that float values are _not_ converted automatically, so for float values 462 // the second word contains only garbage data. 463 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 464 int offset = 0; 465 #ifdef _LP64 466 if (UseAVX < 3) { 467 xmm_bypass_limit = xmm_bypass_limit / 2; 468 } 469 #endif 470 for (int n = 0; n < xmm_bypass_limit; n++) { 471 XMMRegister xmm_name = as_XMMRegister(n); 472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 473 offset += 8; 474 } 475 } else if (UseSSE == 1) { 476 // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) 477 int offset = 0; 478 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 479 XMMRegister xmm_name = as_XMMRegister(n); 480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 481 offset += 8; 482 } 483 } 484 } 485 486 // FPU stack must be empty now 487 __ verify_FPU(0, "save_live_registers"); 488 } 489 490 #undef __ 491 #define __ sasm-> 492 493 static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { 494 if (restore_fpu_registers) { 495 if (UseSSE >= 2) { 496 // restore XMM registers 497 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 498 #ifdef _LP64 499 if (UseAVX < 3) { 500 xmm_bypass_limit = xmm_bypass_limit / 2; 501 } 502 #endif 503 int offset = 0; 504 for (int n = 0; n < xmm_bypass_limit; n++) { 505 XMMRegister xmm_name = as_XMMRegister(n); 506 __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 507 offset += 8; 508 } 509 } else if (UseSSE == 1) { 510 // restore XMM registers(num MMX == num fpu) 511 int offset = 0; 512 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 513 XMMRegister xmm_name = as_XMMRegister(n); 514 __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 515 offset += 8; 516 } 517 } 518 519 if (UseSSE < 2) { 520 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 521 } else { 522 // check that FPU stack is really empty 523 __ verify_FPU(0, "restore_live_registers"); 524 } 525 526 } else { 527 // check that FPU stack is really empty 528 __ verify_FPU(0, "restore_live_registers"); 529 } 530 531 #ifdef ASSERT 532 { 533 Label ok; 534 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 535 __ jcc(Assembler::equal, ok); 536 __ stop("bad offsets in frame"); 537 __ bind(ok); 538 } 539 #endif // ASSERT 540 541 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 542 } 543 544 #undef __ 545 #define __ this-> 546 547 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) { 548 __ block_comment("restore_live_registers"); 549 550 restore_fpu(this, restore_fpu_registers); 551 __ popa(); 552 } 553 554 555 void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_registers) { 556 __ block_comment("restore_live_registers_except_rax"); 557 558 restore_fpu(this, restore_fpu_registers); 559 560 #ifdef _LP64 561 __ movptr(r15, Address(rsp, 0)); 562 __ movptr(r14, Address(rsp, wordSize)); 563 __ movptr(r13, Address(rsp, 2 * wordSize)); 564 __ movptr(r12, Address(rsp, 3 * wordSize)); 565 __ movptr(r11, Address(rsp, 4 * wordSize)); 566 __ movptr(r10, Address(rsp, 5 * wordSize)); 567 __ movptr(r9, Address(rsp, 6 * wordSize)); 568 __ movptr(r8, Address(rsp, 7 * wordSize)); 569 __ movptr(rdi, Address(rsp, 8 * wordSize)); 570 __ movptr(rsi, Address(rsp, 9 * wordSize)); 571 __ movptr(rbp, Address(rsp, 10 * wordSize)); 572 // skip rsp 573 __ movptr(rbx, Address(rsp, 12 * wordSize)); 574 __ movptr(rdx, Address(rsp, 13 * wordSize)); 575 __ movptr(rcx, Address(rsp, 14 * wordSize)); 576 577 __ addptr(rsp, 16 * wordSize); 578 #else 579 580 __ pop(rdi); 581 __ pop(rsi); 582 __ pop(rbp); 583 __ pop(rbx); // skip this value 584 __ pop(rbx); 585 __ pop(rdx); 586 __ pop(rcx); 587 __ addptr(rsp, BytesPerWord); 588 #endif // _LP64 589 } 590 591 #undef __ 592 #define __ sasm-> 593 594 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 595 bool save_fpu_registers = true) { 596 __ save_live_registers_no_oop_map(save_fpu_registers); 597 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 598 } 599 600 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 601 __ restore_live_registers(restore_fpu_registers); 602 } 603 604 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 605 sasm->restore_live_registers_except_rax(restore_fpu_registers); 606 } 607 608 609 void Runtime1::initialize_pd() { 610 // nothing to do 611 } 612 613 614 // target: the entry point of the method that creates and posts the exception oop 615 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 616 617 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 618 // preserve all registers 619 int num_rt_args = has_argument ? 2 : 1; 620 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 621 622 // now all registers are saved and can be used freely 623 // verify that no old value is used accidentally 624 __ invalidate_registers(true, true, true, true, true, true); 625 626 // registers used by this stub 627 const Register temp_reg = rbx; 628 629 // load argument for exception that is passed as an argument into the stub 630 if (has_argument) { 631 #ifdef _LP64 632 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 633 #else 634 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 635 __ push(temp_reg); 636 #endif // _LP64 637 } 638 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 639 640 OopMapSet* oop_maps = new OopMapSet(); 641 oop_maps->add_gc_map(call_offset, oop_map); 642 643 __ stop("should not reach here"); 644 645 return oop_maps; 646 } 647 648 649 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 650 __ block_comment("generate_handle_exception"); 651 652 // incoming parameters 653 const Register exception_oop = rax; 654 const Register exception_pc = rdx; 655 // other registers used in this stub 656 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 657 658 // Save registers, if required. 659 OopMapSet* oop_maps = new OopMapSet(); 660 OopMap* oop_map = NULL; 661 switch (id) { 662 case forward_exception_id: 663 // We're handling an exception in the context of a compiled frame. 664 // The registers have been saved in the standard places. Perform 665 // an exception lookup in the caller and dispatch to the handler 666 // if found. Otherwise unwind and dispatch to the callers 667 // exception handler. 668 oop_map = generate_oop_map(sasm, 1 /*thread*/); 669 670 // load and clear pending exception oop into RAX 671 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 672 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 673 674 // load issuing PC (the return address for this stub) into rdx 675 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 676 677 // make sure that the vm_results are cleared (may be unnecessary) 678 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 679 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 680 break; 681 case handle_exception_nofpu_id: 682 case handle_exception_id: 683 // At this point all registers MAY be live. 684 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 685 break; 686 case handle_exception_from_callee_id: { 687 // At this point all registers except exception oop (RAX) and 688 // exception pc (RDX) are dead. 689 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 690 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 691 sasm->set_frame_size(frame_size); 692 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 693 break; 694 } 695 default: ShouldNotReachHere(); 696 } 697 698 #ifdef TIERED 699 // C2 can leave the fpu stack dirty 700 if (UseSSE < 2) { 701 __ empty_FPU_stack(); 702 } 703 #endif // TIERED 704 705 // verify that only rax, and rdx is valid at this time 706 __ invalidate_registers(false, true, true, false, true, true); 707 // verify that rax, contains a valid exception 708 __ verify_not_null_oop(exception_oop); 709 710 // load address of JavaThread object for thread-local data 711 NOT_LP64(__ get_thread(thread);) 712 713 #ifdef ASSERT 714 // check that fields in JavaThread for exception oop and issuing pc are 715 // empty before writing to them 716 Label oop_empty; 717 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 718 __ jcc(Assembler::equal, oop_empty); 719 __ stop("exception oop already set"); 720 __ bind(oop_empty); 721 722 Label pc_empty; 723 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 724 __ jcc(Assembler::equal, pc_empty); 725 __ stop("exception pc already set"); 726 __ bind(pc_empty); 727 #endif 728 729 // save exception oop and issuing pc into JavaThread 730 // (exception handler will load it from here) 731 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 732 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 733 734 // patch throwing pc into return address (has bci & oop map) 735 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 736 737 // compute the exception handler. 738 // the exception oop and the throwing pc are read from the fields in JavaThread 739 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 740 oop_maps->add_gc_map(call_offset, oop_map); 741 742 // rax: handler address 743 // will be the deopt blob if nmethod was deoptimized while we looked up 744 // handler regardless of whether handler existed in the nmethod. 745 746 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 747 __ invalidate_registers(false, true, true, true, true, true); 748 749 // patch the return address, this stub will directly return to the exception handler 750 __ movptr(Address(rbp, 1*BytesPerWord), rax); 751 752 switch (id) { 753 case forward_exception_id: 754 case handle_exception_nofpu_id: 755 case handle_exception_id: 756 // Restore the registers that were saved at the beginning. 757 restore_live_registers(sasm, id != handle_exception_nofpu_id); 758 break; 759 case handle_exception_from_callee_id: 760 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 761 // since we do a leave anyway. 762 763 // Pop the return address. 764 __ leave(); 765 __ pop(rcx); 766 __ jmp(rcx); // jump to exception handler 767 break; 768 default: ShouldNotReachHere(); 769 } 770 771 return oop_maps; 772 } 773 774 775 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 776 // incoming parameters 777 const Register exception_oop = rax; 778 // callee-saved copy of exception_oop during runtime call 779 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 780 // other registers used in this stub 781 const Register exception_pc = rdx; 782 const Register handler_addr = rbx; 783 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 784 785 // verify that only rax, is valid at this time 786 __ invalidate_registers(false, true, true, true, true, true); 787 788 #ifdef ASSERT 789 // check that fields in JavaThread for exception oop and issuing pc are empty 790 NOT_LP64(__ get_thread(thread);) 791 Label oop_empty; 792 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 793 __ jcc(Assembler::equal, oop_empty); 794 __ stop("exception oop must be empty"); 795 __ bind(oop_empty); 796 797 Label pc_empty; 798 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 799 __ jcc(Assembler::equal, pc_empty); 800 __ stop("exception pc must be empty"); 801 __ bind(pc_empty); 802 #endif 803 804 // clear the FPU stack in case any FPU results are left behind 805 __ empty_FPU_stack(); 806 807 // save exception_oop in callee-saved register to preserve it during runtime calls 808 __ verify_not_null_oop(exception_oop); 809 __ movptr(exception_oop_callee_saved, exception_oop); 810 811 NOT_LP64(__ get_thread(thread);) 812 // Get return address (is on top of stack after leave). 813 __ movptr(exception_pc, Address(rsp, 0)); 814 815 // search the exception handler address of the caller (using the return address) 816 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 817 // rax: exception handler address of the caller 818 819 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 820 __ invalidate_registers(false, true, true, true, false, true); 821 822 // move result of call into correct register 823 __ movptr(handler_addr, rax); 824 825 // Restore exception oop to RAX (required convention of exception handler). 826 __ movptr(exception_oop, exception_oop_callee_saved); 827 828 // verify that there is really a valid exception in rax 829 __ verify_not_null_oop(exception_oop); 830 831 // get throwing pc (= return address). 832 // rdx has been destroyed by the call, so it must be set again 833 // the pop is also necessary to simulate the effect of a ret(0) 834 __ pop(exception_pc); 835 836 // continue at exception handler (return address removed) 837 // note: do *not* remove arguments when unwinding the 838 // activation since the caller assumes having 839 // all arguments on the stack when entering the 840 // runtime to determine the exception handler 841 // (GC happens at call site with arguments!) 842 // rax: exception oop 843 // rdx: throwing pc 844 // rbx: exception handler 845 __ jmp(handler_addr); 846 } 847 848 849 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 850 // use the maximum number of runtime-arguments here because it is difficult to 851 // distinguish each RT-Call. 852 // Note: This number affects also the RT-Call in generate_handle_exception because 853 // the oop-map is shared for all calls. 854 const int num_rt_args = 2; // thread + dummy 855 856 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 857 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 858 859 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 860 861 #ifdef _LP64 862 const Register thread = r15_thread; 863 // No need to worry about dummy 864 __ mov(c_rarg0, thread); 865 #else 866 __ push(rax); // push dummy 867 868 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 869 // push java thread (becomes first argument of C function) 870 __ get_thread(thread); 871 __ push(thread); 872 #endif // _LP64 873 __ set_last_Java_frame(thread, noreg, rbp, NULL); 874 // do the call 875 __ call(RuntimeAddress(target)); 876 OopMapSet* oop_maps = new OopMapSet(); 877 oop_maps->add_gc_map(__ offset(), oop_map); 878 // verify callee-saved register 879 #ifdef ASSERT 880 guarantee(thread != rax, "change this code"); 881 __ push(rax); 882 { Label L; 883 __ get_thread(rax); 884 __ cmpptr(thread, rax); 885 __ jcc(Assembler::equal, L); 886 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 887 __ bind(L); 888 } 889 __ pop(rax); 890 #endif 891 __ reset_last_Java_frame(thread, true); 892 #ifndef _LP64 893 __ pop(rcx); // discard thread arg 894 __ pop(rcx); // discard dummy 895 #endif // _LP64 896 897 // check for pending exceptions 898 { Label L; 899 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 900 __ jcc(Assembler::equal, L); 901 // exception pending => remove activation and forward to exception handler 902 903 __ testptr(rax, rax); // have we deoptimized? 904 __ jump_cc(Assembler::equal, 905 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 906 907 // the deopt blob expects exceptions in the special fields of 908 // JavaThread, so copy and clear pending exception. 909 910 // load and clear pending exception 911 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 912 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 913 914 // check that there is really a valid exception 915 __ verify_not_null_oop(rax); 916 917 // load throwing pc: this is the return address of the stub 918 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 919 920 #ifdef ASSERT 921 // check that fields in JavaThread for exception oop and issuing pc are empty 922 Label oop_empty; 923 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 924 __ jcc(Assembler::equal, oop_empty); 925 __ stop("exception oop must be empty"); 926 __ bind(oop_empty); 927 928 Label pc_empty; 929 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 930 __ jcc(Assembler::equal, pc_empty); 931 __ stop("exception pc must be empty"); 932 __ bind(pc_empty); 933 #endif 934 935 // store exception oop and throwing pc to JavaThread 936 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 937 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 938 939 restore_live_registers(sasm); 940 941 __ leave(); 942 __ addptr(rsp, BytesPerWord); // remove return address from stack 943 944 // Forward the exception directly to deopt blob. We can blow no 945 // registers and must leave throwing pc on the stack. A patch may 946 // have values live in registers so the entry point with the 947 // exception in tls. 948 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 949 950 __ bind(L); 951 } 952 953 954 // Runtime will return true if the nmethod has been deoptimized during 955 // the patching process. In that case we must do a deopt reexecute instead. 956 957 Label reexecuteEntry, cont; 958 959 __ testptr(rax, rax); // have we deoptimized? 960 __ jcc(Assembler::equal, cont); // no 961 962 // Will reexecute. Proper return address is already on the stack we just restore 963 // registers, pop all of our frame but the return address and jump to the deopt blob 964 restore_live_registers(sasm); 965 __ leave(); 966 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 967 968 __ bind(cont); 969 restore_live_registers(sasm); 970 __ leave(); 971 __ ret(0); 972 973 return oop_maps; 974 } 975 976 977 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 978 979 // for better readability 980 const bool must_gc_arguments = true; 981 const bool dont_gc_arguments = false; 982 983 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 984 bool save_fpu_registers = true; 985 986 // stub code & info for the different stubs 987 OopMapSet* oop_maps = NULL; 988 switch (id) { 989 case forward_exception_id: 990 { 991 oop_maps = generate_handle_exception(id, sasm); 992 __ leave(); 993 __ ret(0); 994 } 995 break; 996 997 case new_instance_id: 998 case fast_new_instance_id: 999 case fast_new_instance_init_check_id: 1000 { 1001 Register klass = rdx; // Incoming 1002 Register obj = rax; // Result 1003 1004 if (id == new_instance_id) { 1005 __ set_info("new_instance", dont_gc_arguments); 1006 } else if (id == fast_new_instance_id) { 1007 __ set_info("fast new_instance", dont_gc_arguments); 1008 } else { 1009 assert(id == fast_new_instance_init_check_id, "bad StubID"); 1010 __ set_info("fast new_instance init check", dont_gc_arguments); 1011 } 1012 1013 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB 1014 && Universe::heap()->supports_inline_contig_alloc()) { 1015 Label slow_path; 1016 Register obj_size = rcx; 1017 Register t1 = rbx; 1018 Register t2 = rsi; 1019 assert_different_registers(klass, obj, obj_size, t1, t2); 1020 1021 __ push(rdi); 1022 __ push(rbx); 1023 1024 if (id == fast_new_instance_init_check_id) { 1025 // make sure the klass is initialized 1026 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1027 __ jcc(Assembler::notEqual, slow_path); 1028 } 1029 1030 #ifdef ASSERT 1031 // assert object can be fast path allocated 1032 { 1033 Label ok, not_ok; 1034 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1035 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1036 __ jcc(Assembler::lessEqual, not_ok); 1037 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1038 __ jcc(Assembler::zero, ok); 1039 __ bind(not_ok); 1040 __ stop("assert(can be fast path allocated)"); 1041 __ should_not_reach_here(); 1042 __ bind(ok); 1043 } 1044 #endif // ASSERT 1045 1046 // if we got here then the TLAB allocation failed, so try 1047 // refilling the TLAB or allocating directly from eden. 1048 Label retry_tlab, try_eden; 1049 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 1050 NOT_LP64(__ get_thread(thread)); 1051 1052 __ bind(try_eden); 1053 // get the instance size (size is postive so movl is fine for 64bit) 1054 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1055 1056 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1057 __ incr_allocated_bytes(thread, obj_size, 0); 1058 1059 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 1060 __ verify_oop(obj); 1061 __ pop(rbx); 1062 __ pop(rdi); 1063 __ ret(0); 1064 1065 __ bind(slow_path); 1066 __ pop(rbx); 1067 __ pop(rdi); 1068 } 1069 1070 __ enter(); 1071 OopMap* map = save_live_registers(sasm, 2); 1072 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1073 oop_maps = new OopMapSet(); 1074 oop_maps->add_gc_map(call_offset, map); 1075 restore_live_registers_except_rax(sasm); 1076 __ verify_oop(obj); 1077 __ leave(); 1078 __ ret(0); 1079 1080 // rax,: new instance 1081 } 1082 1083 break; 1084 1085 case counter_overflow_id: 1086 { 1087 Register bci = rax, method = rbx; 1088 __ enter(); 1089 OopMap* map = save_live_registers(sasm, 3); 1090 // Retrieve bci 1091 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1092 // And a pointer to the Method* 1093 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1094 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1095 oop_maps = new OopMapSet(); 1096 oop_maps->add_gc_map(call_offset, map); 1097 restore_live_registers(sasm); 1098 __ leave(); 1099 __ ret(0); 1100 } 1101 break; 1102 1103 case new_type_array_id: 1104 case new_object_array_id: 1105 { 1106 Register length = rbx; // Incoming 1107 Register klass = rdx; // Incoming 1108 Register obj = rax; // Result 1109 1110 if (id == new_type_array_id) { 1111 __ set_info("new_type_array", dont_gc_arguments); 1112 } else { 1113 __ set_info("new_object_array", dont_gc_arguments); 1114 } 1115 1116 #ifdef ASSERT 1117 // assert object type is really an array of the proper kind 1118 { 1119 Label ok; 1120 Register t0 = obj; 1121 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1122 __ sarl(t0, Klass::_lh_array_tag_shift); 1123 int tag = ((id == new_type_array_id) 1124 ? Klass::_lh_array_tag_type_value 1125 : Klass::_lh_array_tag_obj_value); 1126 __ cmpl(t0, tag); 1127 __ jcc(Assembler::equal, ok); 1128 __ stop("assert(is an array klass)"); 1129 __ should_not_reach_here(); 1130 __ bind(ok); 1131 } 1132 #endif // ASSERT 1133 1134 // If we got here, the TLAB allocation failed, so try allocating from 1135 // eden if inline contiguous allocations are supported. 1136 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 1137 Register arr_size = rsi; 1138 Register t1 = rcx; // must be rcx for use as shift count 1139 Register t2 = rdi; 1140 Label slow_path; 1141 1142 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1143 // since size is positive movl does right thing on 64bit 1144 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1145 // since size is postive movl does right thing on 64bit 1146 __ movl(arr_size, length); 1147 assert(t1 == rcx, "fixed register usage"); 1148 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1149 __ shrptr(t1, Klass::_lh_header_size_shift); 1150 __ andptr(t1, Klass::_lh_header_size_mask); 1151 __ addptr(arr_size, t1); 1152 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1153 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1154 1155 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1156 1157 // Using t2 for non 64-bit. 1158 const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread); 1159 NOT_LP64(__ get_thread(thread)); 1160 __ incr_allocated_bytes(thread, arr_size, 0); 1161 1162 __ initialize_header(obj, klass, length, t1, t2); 1163 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1164 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1165 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1166 __ andptr(t1, Klass::_lh_header_size_mask); 1167 __ subptr(arr_size, t1); // body length 1168 __ addptr(t1, obj); // body start 1169 __ initialize_body(t1, arr_size, 0, t2); 1170 __ verify_oop(obj); 1171 __ ret(0); 1172 1173 __ bind(slow_path); 1174 } 1175 1176 __ enter(); 1177 OopMap* map = save_live_registers(sasm, 3); 1178 int call_offset; 1179 if (id == new_type_array_id) { 1180 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1181 } else { 1182 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1183 } 1184 1185 oop_maps = new OopMapSet(); 1186 oop_maps->add_gc_map(call_offset, map); 1187 restore_live_registers_except_rax(sasm); 1188 1189 __ verify_oop(obj); 1190 __ leave(); 1191 __ ret(0); 1192 1193 // rax,: new array 1194 } 1195 break; 1196 1197 case new_multi_array_id: 1198 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1199 // rax,: klass 1200 // rbx,: rank 1201 // rcx: address of 1st dimension 1202 OopMap* map = save_live_registers(sasm, 4); 1203 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1204 1205 oop_maps = new OopMapSet(); 1206 oop_maps->add_gc_map(call_offset, map); 1207 restore_live_registers_except_rax(sasm); 1208 1209 // rax,: new multi array 1210 __ verify_oop(rax); 1211 } 1212 break; 1213 1214 case register_finalizer_id: 1215 { 1216 __ set_info("register_finalizer", dont_gc_arguments); 1217 1218 // This is called via call_runtime so the arguments 1219 // will be place in C abi locations 1220 1221 #ifdef _LP64 1222 __ verify_oop(c_rarg0); 1223 __ mov(rax, c_rarg0); 1224 #else 1225 // The object is passed on the stack and we haven't pushed a 1226 // frame yet so it's one work away from top of stack. 1227 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1228 __ verify_oop(rax); 1229 #endif // _LP64 1230 1231 // load the klass and check the has finalizer flag 1232 Label register_finalizer; 1233 Register t = rsi; 1234 __ load_klass(t, rax); 1235 __ movl(t, Address(t, Klass::access_flags_offset())); 1236 __ testl(t, JVM_ACC_HAS_FINALIZER); 1237 __ jcc(Assembler::notZero, register_finalizer); 1238 __ ret(0); 1239 1240 __ bind(register_finalizer); 1241 __ enter(); 1242 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1243 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1244 oop_maps = new OopMapSet(); 1245 oop_maps->add_gc_map(call_offset, oop_map); 1246 1247 // Now restore all the live registers 1248 restore_live_registers(sasm); 1249 1250 __ leave(); 1251 __ ret(0); 1252 } 1253 break; 1254 1255 case throw_range_check_failed_id: 1256 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1257 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1258 } 1259 break; 1260 1261 case throw_index_exception_id: 1262 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1263 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1264 } 1265 break; 1266 1267 case throw_div0_exception_id: 1268 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1269 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1270 } 1271 break; 1272 1273 case throw_null_pointer_exception_id: 1274 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1275 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1276 } 1277 break; 1278 1279 case handle_exception_nofpu_id: 1280 case handle_exception_id: 1281 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1282 oop_maps = generate_handle_exception(id, sasm); 1283 } 1284 break; 1285 1286 case handle_exception_from_callee_id: 1287 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1288 oop_maps = generate_handle_exception(id, sasm); 1289 } 1290 break; 1291 1292 case unwind_exception_id: 1293 { __ set_info("unwind_exception", dont_gc_arguments); 1294 // note: no stubframe since we are about to leave the current 1295 // activation and we are calling a leaf VM function only. 1296 generate_unwind_exception(sasm); 1297 } 1298 break; 1299 1300 case throw_array_store_exception_id: 1301 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1302 // tos + 0: link 1303 // + 1: return address 1304 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1305 } 1306 break; 1307 1308 case throw_class_cast_exception_id: 1309 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1310 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1311 } 1312 break; 1313 1314 case throw_incompatible_class_change_error_id: 1315 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1316 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1317 } 1318 break; 1319 1320 case slow_subtype_check_id: 1321 { 1322 // Typical calling sequence: 1323 // __ push(klass_RInfo); // object klass or other subclass 1324 // __ push(sup_k_RInfo); // array element klass or other superclass 1325 // __ call(slow_subtype_check); 1326 // Note that the subclass is pushed first, and is therefore deepest. 1327 // Previous versions of this code reversed the names 'sub' and 'super'. 1328 // This was operationally harmless but made the code unreadable. 1329 enum layout { 1330 rax_off, SLOT2(raxH_off) 1331 rcx_off, SLOT2(rcxH_off) 1332 rsi_off, SLOT2(rsiH_off) 1333 rdi_off, SLOT2(rdiH_off) 1334 // saved_rbp_off, SLOT2(saved_rbpH_off) 1335 return_off, SLOT2(returnH_off) 1336 sup_k_off, SLOT2(sup_kH_off) 1337 klass_off, SLOT2(superH_off) 1338 framesize, 1339 result_off = klass_off // deepest argument is also the return value 1340 }; 1341 1342 __ set_info("slow_subtype_check", dont_gc_arguments); 1343 __ push(rdi); 1344 __ push(rsi); 1345 __ push(rcx); 1346 __ push(rax); 1347 1348 // This is called by pushing args and not with C abi 1349 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1350 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1351 1352 Label miss; 1353 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1354 1355 // fallthrough on success: 1356 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1357 __ pop(rax); 1358 __ pop(rcx); 1359 __ pop(rsi); 1360 __ pop(rdi); 1361 __ ret(0); 1362 1363 __ bind(miss); 1364 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1365 __ pop(rax); 1366 __ pop(rcx); 1367 __ pop(rsi); 1368 __ pop(rdi); 1369 __ ret(0); 1370 } 1371 break; 1372 1373 case monitorenter_nofpu_id: 1374 save_fpu_registers = false; 1375 // fall through 1376 case monitorenter_id: 1377 { 1378 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1379 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1380 1381 // Called with store_parameter and not C abi 1382 1383 f.load_argument(1, rax); // rax,: object 1384 f.load_argument(0, rbx); // rbx,: lock address 1385 1386 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1387 1388 oop_maps = new OopMapSet(); 1389 oop_maps->add_gc_map(call_offset, map); 1390 restore_live_registers(sasm, save_fpu_registers); 1391 } 1392 break; 1393 1394 case monitorexit_nofpu_id: 1395 save_fpu_registers = false; 1396 // fall through 1397 case monitorexit_id: 1398 { 1399 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1400 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1401 1402 // Called with store_parameter and not C abi 1403 1404 f.load_argument(0, rax); // rax,: lock address 1405 1406 // note: really a leaf routine but must setup last java sp 1407 // => use call_RT for now (speed can be improved by 1408 // doing last java sp setup manually) 1409 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1410 1411 oop_maps = new OopMapSet(); 1412 oop_maps->add_gc_map(call_offset, map); 1413 restore_live_registers(sasm, save_fpu_registers); 1414 } 1415 break; 1416 1417 case deoptimize_id: 1418 { 1419 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1420 const int num_rt_args = 2; // thread, trap_request 1421 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1422 f.load_argument(0, rax); 1423 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1424 oop_maps = new OopMapSet(); 1425 oop_maps->add_gc_map(call_offset, oop_map); 1426 restore_live_registers(sasm); 1427 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1428 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1429 __ leave(); 1430 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1431 } 1432 break; 1433 1434 case access_field_patching_id: 1435 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1436 // we should set up register map 1437 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1438 } 1439 break; 1440 1441 case load_klass_patching_id: 1442 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1443 // we should set up register map 1444 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1445 } 1446 break; 1447 1448 case load_mirror_patching_id: 1449 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1450 // we should set up register map 1451 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1452 } 1453 break; 1454 1455 case load_appendix_patching_id: 1456 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1457 // we should set up register map 1458 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1459 } 1460 break; 1461 1462 case dtrace_object_alloc_id: 1463 { // rax,: object 1464 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1465 // we can't gc here so skip the oopmap but make sure that all 1466 // the live registers get saved. 1467 save_live_registers(sasm, 1); 1468 1469 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1470 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1471 NOT_LP64(__ pop(rax)); 1472 1473 restore_live_registers(sasm); 1474 } 1475 break; 1476 1477 case fpu2long_stub_id: 1478 { 1479 // rax, and rdx are destroyed, but should be free since the result is returned there 1480 // preserve rsi,ecx 1481 __ push(rsi); 1482 __ push(rcx); 1483 LP64_ONLY(__ push(rdx);) 1484 1485 // check for NaN 1486 Label return0, do_return, return_min_jlong, do_convert; 1487 1488 Address value_high_word(rsp, wordSize + 4); 1489 Address value_low_word(rsp, wordSize); 1490 Address result_high_word(rsp, 3*wordSize + 4); 1491 Address result_low_word(rsp, 3*wordSize); 1492 1493 __ subptr(rsp, 32); // more than enough on 32bit 1494 __ fst_d(value_low_word); 1495 __ movl(rax, value_high_word); 1496 __ andl(rax, 0x7ff00000); 1497 __ cmpl(rax, 0x7ff00000); 1498 __ jcc(Assembler::notEqual, do_convert); 1499 __ movl(rax, value_high_word); 1500 __ andl(rax, 0xfffff); 1501 __ orl(rax, value_low_word); 1502 __ jcc(Assembler::notZero, return0); 1503 1504 __ bind(do_convert); 1505 __ fnstcw(Address(rsp, 0)); 1506 __ movzwl(rax, Address(rsp, 0)); 1507 __ orl(rax, 0xc00); 1508 __ movw(Address(rsp, 2), rax); 1509 __ fldcw(Address(rsp, 2)); 1510 __ fwait(); 1511 __ fistp_d(result_low_word); 1512 __ fldcw(Address(rsp, 0)); 1513 __ fwait(); 1514 // This gets the entire long in rax on 64bit 1515 __ movptr(rax, result_low_word); 1516 // testing of high bits 1517 __ movl(rdx, result_high_word); 1518 __ mov(rcx, rax); 1519 // What the heck is the point of the next instruction??? 1520 __ xorl(rcx, 0x0); 1521 __ movl(rsi, 0x80000000); 1522 __ xorl(rsi, rdx); 1523 __ orl(rcx, rsi); 1524 __ jcc(Assembler::notEqual, do_return); 1525 __ fldz(); 1526 __ fcomp_d(value_low_word); 1527 __ fnstsw_ax(); 1528 #ifdef _LP64 1529 __ testl(rax, 0x4100); // ZF & CF == 0 1530 __ jcc(Assembler::equal, return_min_jlong); 1531 #else 1532 __ sahf(); 1533 __ jcc(Assembler::above, return_min_jlong); 1534 #endif // _LP64 1535 // return max_jlong 1536 #ifndef _LP64 1537 __ movl(rdx, 0x7fffffff); 1538 __ movl(rax, 0xffffffff); 1539 #else 1540 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1541 #endif // _LP64 1542 __ jmp(do_return); 1543 1544 __ bind(return_min_jlong); 1545 #ifndef _LP64 1546 __ movl(rdx, 0x80000000); 1547 __ xorl(rax, rax); 1548 #else 1549 __ mov64(rax, UCONST64(0x8000000000000000)); 1550 #endif // _LP64 1551 __ jmp(do_return); 1552 1553 __ bind(return0); 1554 __ fpop(); 1555 #ifndef _LP64 1556 __ xorptr(rdx,rdx); 1557 __ xorptr(rax,rax); 1558 #else 1559 __ xorptr(rax, rax); 1560 #endif // _LP64 1561 1562 __ bind(do_return); 1563 __ addptr(rsp, 32); 1564 LP64_ONLY(__ pop(rdx);) 1565 __ pop(rcx); 1566 __ pop(rsi); 1567 __ ret(0); 1568 } 1569 break; 1570 1571 case predicate_failed_trap_id: 1572 { 1573 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1574 1575 OopMap* map = save_live_registers(sasm, 1); 1576 1577 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1578 oop_maps = new OopMapSet(); 1579 oop_maps->add_gc_map(call_offset, map); 1580 restore_live_registers(sasm); 1581 __ leave(); 1582 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1583 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1584 1585 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1586 } 1587 break; 1588 1589 default: 1590 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1591 __ movptr(rax, (int)id); 1592 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1593 __ should_not_reach_here(); 1594 } 1595 break; 1596 } 1597 return oop_maps; 1598 } 1599 1600 #undef __ 1601 1602 const char *Runtime1::pd_name_for_address(address entry) { 1603 return "<unknown function>"; 1604 }