1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "register_x86.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/signature.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/macros.hpp" 43 #include "vmreg_x86.inline.hpp" 44 #if INCLUDE_ALL_GCS 45 #include "gc/g1/g1BarrierSet.hpp" 46 #include "gc/g1/g1CardTable.hpp" 47 #include "gc/g1/g1ThreadLocalData.hpp" 48 #endif 49 50 51 // Implementation of StubAssembler 52 53 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 54 // setup registers 55 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 56 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 57 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 58 assert(args_size >= 0, "illegal args_size"); 59 bool align_stack = false; 60 #ifdef _LP64 61 // At a method handle call, the stack may not be properly aligned 62 // when returning with an exception. 63 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 64 #endif 65 66 #ifdef _LP64 67 mov(c_rarg0, thread); 68 set_num_rt_args(0); // Nothing on stack 69 #else 70 set_num_rt_args(1 + args_size); 71 72 // push java thread (becomes first argument of C function) 73 get_thread(thread); 74 push(thread); 75 #endif // _LP64 76 77 int call_offset; 78 if (!align_stack) { 79 set_last_Java_frame(thread, noreg, rbp, NULL); 80 } else { 81 address the_pc = pc(); 82 call_offset = offset(); 83 set_last_Java_frame(thread, noreg, rbp, the_pc); 84 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 85 } 86 87 // do the call 88 call(RuntimeAddress(entry)); 89 if (!align_stack) { 90 call_offset = offset(); 91 } 92 // verify callee-saved register 93 #ifdef ASSERT 94 guarantee(thread != rax, "change this code"); 95 push(rax); 96 { Label L; 97 get_thread(rax); 98 cmpptr(thread, rax); 99 jcc(Assembler::equal, L); 100 int3(); 101 stop("StubAssembler::call_RT: rdi not callee saved?"); 102 bind(L); 103 } 104 pop(rax); 105 #endif 106 reset_last_Java_frame(thread, true); 107 108 // discard thread and arguments 109 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 110 111 // check for pending exceptions 112 { Label L; 113 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 114 jcc(Assembler::equal, L); 115 // exception pending => remove activation and forward to exception handler 116 movptr(rax, Address(thread, Thread::pending_exception_offset())); 117 // make sure that the vm_results are cleared 118 if (oop_result1->is_valid()) { 119 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 120 } 121 if (metadata_result->is_valid()) { 122 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 123 } 124 if (frame_size() == no_frame_size) { 125 leave(); 126 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 127 } else if (_stub_id == Runtime1::forward_exception_id) { 128 should_not_reach_here(); 129 } else { 130 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 131 } 132 bind(L); 133 } 134 // get oop results if there are any and reset the values in the thread 135 if (oop_result1->is_valid()) { 136 get_vm_result(oop_result1, thread); 137 } 138 if (metadata_result->is_valid()) { 139 get_vm_result_2(metadata_result, thread); 140 } 141 return call_offset; 142 } 143 144 145 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 146 #ifdef _LP64 147 mov(c_rarg1, arg1); 148 #else 149 push(arg1); 150 #endif // _LP64 151 return call_RT(oop_result1, metadata_result, entry, 1); 152 } 153 154 155 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 156 #ifdef _LP64 157 if (c_rarg1 == arg2) { 158 if (c_rarg2 == arg1) { 159 xchgq(arg1, arg2); 160 } else { 161 mov(c_rarg2, arg2); 162 mov(c_rarg1, arg1); 163 } 164 } else { 165 mov(c_rarg1, arg1); 166 mov(c_rarg2, arg2); 167 } 168 #else 169 push(arg2); 170 push(arg1); 171 #endif // _LP64 172 return call_RT(oop_result1, metadata_result, entry, 2); 173 } 174 175 176 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 177 #ifdef _LP64 178 // if there is any conflict use the stack 179 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 180 arg2 == c_rarg1 || arg1 == c_rarg3 || 181 arg3 == c_rarg1 || arg1 == c_rarg2) { 182 push(arg3); 183 push(arg2); 184 push(arg1); 185 pop(c_rarg1); 186 pop(c_rarg2); 187 pop(c_rarg3); 188 } else { 189 mov(c_rarg1, arg1); 190 mov(c_rarg2, arg2); 191 mov(c_rarg3, arg3); 192 } 193 #else 194 push(arg3); 195 push(arg2); 196 push(arg1); 197 #endif // _LP64 198 return call_RT(oop_result1, metadata_result, entry, 3); 199 } 200 201 202 // Implementation of StubFrame 203 204 class StubFrame: public StackObj { 205 private: 206 StubAssembler* _sasm; 207 208 public: 209 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 210 void load_argument(int offset_in_words, Register reg); 211 212 ~StubFrame(); 213 }; 214 215 216 #define __ _sasm-> 217 218 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 219 _sasm = sasm; 220 __ set_info(name, must_gc_arguments); 221 __ enter(); 222 } 223 224 // load parameters that were stored with LIR_Assembler::store_parameter 225 // Note: offsets for store_parameter and load_argument must match 226 void StubFrame::load_argument(int offset_in_words, Register reg) { 227 // rbp, + 0: link 228 // + 1: return address 229 // + 2: argument with offset 0 230 // + 3: argument with offset 1 231 // + 4: ... 232 233 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 234 } 235 236 237 StubFrame::~StubFrame() { 238 __ leave(); 239 __ ret(0); 240 } 241 242 #undef __ 243 244 245 // Implementation of Runtime1 246 247 #define __ sasm-> 248 249 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 250 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 251 252 // Stack layout for saving/restoring all the registers needed during a runtime 253 // call (this includes deoptimization) 254 // Note: note that users of this frame may well have arguments to some runtime 255 // while these values are on the stack. These positions neglect those arguments 256 // but the code in save_live_registers will take the argument count into 257 // account. 258 // 259 #ifdef _LP64 260 #define SLOT2(x) x, 261 #define SLOT_PER_WORD 2 262 #else 263 #define SLOT2(x) 264 #define SLOT_PER_WORD 1 265 #endif // _LP64 266 267 enum reg_save_layout { 268 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 269 // happen and will assert if the stack size we create is misaligned 270 #ifdef _LP64 271 align_dummy_0, align_dummy_1, 272 #endif // _LP64 273 #ifdef _WIN64 274 // Windows always allocates space for it's argument registers (see 275 // frame::arg_reg_save_area_bytes). 276 arg_reg_save_1, arg_reg_save_1H, // 0, 4 277 arg_reg_save_2, arg_reg_save_2H, // 8, 12 278 arg_reg_save_3, arg_reg_save_3H, // 16, 20 279 arg_reg_save_4, arg_reg_save_4H, // 24, 28 280 #endif // _WIN64 281 xmm_regs_as_doubles_off, // 32 282 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 283 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 284 // fpu_state_end_off is exclusive 285 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 286 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 287 extra_space_offset, // 360 288 #ifdef _LP64 289 r15_off = extra_space_offset, r15H_off, // 360, 364 290 r14_off, r14H_off, // 368, 372 291 r13_off, r13H_off, // 376, 380 292 r12_off, r12H_off, // 384, 388 293 r11_off, r11H_off, // 392, 396 294 r10_off, r10H_off, // 400, 404 295 r9_off, r9H_off, // 408, 412 296 r8_off, r8H_off, // 416, 420 297 rdi_off, rdiH_off, // 424, 428 298 #else 299 rdi_off = extra_space_offset, 300 #endif // _LP64 301 rsi_off, SLOT2(rsiH_off) // 432, 436 302 rbp_off, SLOT2(rbpH_off) // 440, 444 303 rsp_off, SLOT2(rspH_off) // 448, 452 304 rbx_off, SLOT2(rbxH_off) // 456, 460 305 rdx_off, SLOT2(rdxH_off) // 464, 468 306 rcx_off, SLOT2(rcxH_off) // 472, 476 307 rax_off, SLOT2(raxH_off) // 480, 484 308 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 309 return_off, SLOT2(returnH_off) // 496, 500 310 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 311 }; 312 313 314 315 // Save off registers which might be killed by calls into the runtime. 316 // Tries to smart of about FP registers. In particular we separate 317 // saving and describing the FPU registers for deoptimization since we 318 // have to save the FPU registers twice if we describe them and on P4 319 // saving FPU registers which don't contain anything appears 320 // expensive. The deopt blob is the only thing which needs to 321 // describe FPU registers. In all other cases it should be sufficient 322 // to simply save their current value. 323 324 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 325 bool save_fpu_registers = true) { 326 327 // In 64bit all the args are in regs so there are no additional stack slots 328 LP64_ONLY(num_rt_args = 0); 329 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 330 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 331 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 332 333 // record saved value locations in an OopMap 334 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 335 OopMap* map = new OopMap(frame_size_in_slots, 0); 336 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 337 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 338 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 339 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 340 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 341 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 342 #ifdef _LP64 343 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 344 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 345 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 346 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 347 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 348 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 349 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 350 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 351 352 // This is stupid but needed. 353 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 354 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 355 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 356 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 357 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 358 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 359 360 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 361 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 362 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 363 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 364 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 365 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 366 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 367 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 368 #endif // _LP64 369 370 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 371 #ifdef _LP64 372 if (UseAVX < 3) { 373 xmm_bypass_limit = xmm_bypass_limit / 2; 374 } 375 #endif 376 377 if (save_fpu_registers) { 378 if (UseSSE < 2) { 379 int fpu_off = float_regs_as_doubles_off; 380 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 381 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 382 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 383 // %%% This is really a waste but we'll keep things as they were for now 384 if (true) { 385 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 386 } 387 fpu_off += 2; 388 } 389 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 390 } 391 392 if (UseSSE >= 2) { 393 int xmm_off = xmm_regs_as_doubles_off; 394 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 395 if (n < xmm_bypass_limit) { 396 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 397 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 398 // %%% This is really a waste but we'll keep things as they were for now 399 if (true) { 400 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 401 } 402 } 403 xmm_off += 2; 404 } 405 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 406 407 } else if (UseSSE == 1) { 408 int xmm_off = xmm_regs_as_doubles_off; 409 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 410 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 411 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 412 xmm_off += 2; 413 } 414 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 415 } 416 } 417 418 return map; 419 } 420 421 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 422 bool save_fpu_registers = true) { 423 __ block_comment("save_live_registers"); 424 425 __ pusha(); // integer registers 426 427 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 428 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 429 430 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 431 432 #ifdef ASSERT 433 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 434 #endif 435 436 if (save_fpu_registers) { 437 if (UseSSE < 2) { 438 // save FPU stack 439 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 440 __ fwait(); 441 442 #ifdef ASSERT 443 Label ok; 444 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 445 __ jccb(Assembler::equal, ok); 446 __ stop("corrupted control word detected"); 447 __ bind(ok); 448 #endif 449 450 // Reset the control word to guard against exceptions being unmasked 451 // since fstp_d can cause FPU stack underflow exceptions. Write it 452 // into the on stack copy and then reload that to make sure that the 453 // current and future values are correct. 454 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 455 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 456 457 // Save the FPU registers in de-opt-able form 458 int offset = 0; 459 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 460 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 461 offset += 8; 462 } 463 } 464 465 if (UseSSE >= 2) { 466 // save XMM registers 467 // XMM registers can contain float or double values, but this is not known here, 468 // so always save them as doubles. 469 // note that float values are _not_ converted automatically, so for float values 470 // the second word contains only garbage data. 471 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 472 int offset = 0; 473 #ifdef _LP64 474 if (UseAVX < 3) { 475 xmm_bypass_limit = xmm_bypass_limit / 2; 476 } 477 #endif 478 for (int n = 0; n < xmm_bypass_limit; n++) { 479 XMMRegister xmm_name = as_XMMRegister(n); 480 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 481 offset += 8; 482 } 483 } else if (UseSSE == 1) { 484 // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) 485 int offset = 0; 486 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 487 XMMRegister xmm_name = as_XMMRegister(n); 488 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 489 offset += 8; 490 } 491 } 492 } 493 494 // FPU stack must be empty now 495 __ verify_FPU(0, "save_live_registers"); 496 497 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 498 } 499 500 501 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { 502 if (restore_fpu_registers) { 503 if (UseSSE >= 2) { 504 // restore XMM registers 505 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 506 #ifdef _LP64 507 if (UseAVX < 3) { 508 xmm_bypass_limit = xmm_bypass_limit / 2; 509 } 510 #endif 511 int offset = 0; 512 for (int n = 0; n < xmm_bypass_limit; n++) { 513 XMMRegister xmm_name = as_XMMRegister(n); 514 __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 515 offset += 8; 516 } 517 } else if (UseSSE == 1) { 518 // restore XMM registers(num MMX == num fpu) 519 int offset = 0; 520 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 521 XMMRegister xmm_name = as_XMMRegister(n); 522 __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 523 offset += 8; 524 } 525 } 526 527 if (UseSSE < 2) { 528 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 529 } else { 530 // check that FPU stack is really empty 531 __ verify_FPU(0, "restore_live_registers"); 532 } 533 534 } else { 535 // check that FPU stack is really empty 536 __ verify_FPU(0, "restore_live_registers"); 537 } 538 539 #ifdef ASSERT 540 { 541 Label ok; 542 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 543 __ jcc(Assembler::equal, ok); 544 __ stop("bad offsets in frame"); 545 __ bind(ok); 546 } 547 #endif // ASSERT 548 549 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 550 } 551 552 553 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 554 __ block_comment("restore_live_registers"); 555 556 restore_fpu(sasm, restore_fpu_registers); 557 __ popa(); 558 } 559 560 561 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 562 __ block_comment("restore_live_registers_except_rax"); 563 564 restore_fpu(sasm, restore_fpu_registers); 565 566 #ifdef _LP64 567 __ movptr(r15, Address(rsp, 0)); 568 __ movptr(r14, Address(rsp, wordSize)); 569 __ movptr(r13, Address(rsp, 2 * wordSize)); 570 __ movptr(r12, Address(rsp, 3 * wordSize)); 571 __ movptr(r11, Address(rsp, 4 * wordSize)); 572 __ movptr(r10, Address(rsp, 5 * wordSize)); 573 __ movptr(r9, Address(rsp, 6 * wordSize)); 574 __ movptr(r8, Address(rsp, 7 * wordSize)); 575 __ movptr(rdi, Address(rsp, 8 * wordSize)); 576 __ movptr(rsi, Address(rsp, 9 * wordSize)); 577 __ movptr(rbp, Address(rsp, 10 * wordSize)); 578 // skip rsp 579 __ movptr(rbx, Address(rsp, 12 * wordSize)); 580 __ movptr(rdx, Address(rsp, 13 * wordSize)); 581 __ movptr(rcx, Address(rsp, 14 * wordSize)); 582 583 __ addptr(rsp, 16 * wordSize); 584 #else 585 586 __ pop(rdi); 587 __ pop(rsi); 588 __ pop(rbp); 589 __ pop(rbx); // skip this value 590 __ pop(rbx); 591 __ pop(rdx); 592 __ pop(rcx); 593 __ addptr(rsp, BytesPerWord); 594 #endif // _LP64 595 } 596 597 598 void Runtime1::initialize_pd() { 599 // nothing to do 600 } 601 602 603 // target: the entry point of the method that creates and posts the exception oop 604 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 605 606 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 607 // preserve all registers 608 int num_rt_args = has_argument ? 2 : 1; 609 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 610 611 // now all registers are saved and can be used freely 612 // verify that no old value is used accidentally 613 __ invalidate_registers(true, true, true, true, true, true); 614 615 // registers used by this stub 616 const Register temp_reg = rbx; 617 618 // load argument for exception that is passed as an argument into the stub 619 if (has_argument) { 620 #ifdef _LP64 621 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 622 #else 623 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 624 __ push(temp_reg); 625 #endif // _LP64 626 } 627 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 628 629 OopMapSet* oop_maps = new OopMapSet(); 630 oop_maps->add_gc_map(call_offset, oop_map); 631 632 __ stop("should not reach here"); 633 634 return oop_maps; 635 } 636 637 638 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 639 __ block_comment("generate_handle_exception"); 640 641 // incoming parameters 642 const Register exception_oop = rax; 643 const Register exception_pc = rdx; 644 // other registers used in this stub 645 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 646 647 // Save registers, if required. 648 OopMapSet* oop_maps = new OopMapSet(); 649 OopMap* oop_map = NULL; 650 switch (id) { 651 case forward_exception_id: 652 // We're handling an exception in the context of a compiled frame. 653 // The registers have been saved in the standard places. Perform 654 // an exception lookup in the caller and dispatch to the handler 655 // if found. Otherwise unwind and dispatch to the callers 656 // exception handler. 657 oop_map = generate_oop_map(sasm, 1 /*thread*/); 658 659 // load and clear pending exception oop into RAX 660 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 661 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 662 663 // load issuing PC (the return address for this stub) into rdx 664 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 665 666 // make sure that the vm_results are cleared (may be unnecessary) 667 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 668 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 669 break; 670 case handle_exception_nofpu_id: 671 case handle_exception_id: 672 // At this point all registers MAY be live. 673 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 674 break; 675 case handle_exception_from_callee_id: { 676 // At this point all registers except exception oop (RAX) and 677 // exception pc (RDX) are dead. 678 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 679 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 680 sasm->set_frame_size(frame_size); 681 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 682 break; 683 } 684 default: ShouldNotReachHere(); 685 } 686 687 #ifdef TIERED 688 // C2 can leave the fpu stack dirty 689 if (UseSSE < 2) { 690 __ empty_FPU_stack(); 691 } 692 #endif // TIERED 693 694 // verify that only rax, and rdx is valid at this time 695 __ invalidate_registers(false, true, true, false, true, true); 696 // verify that rax, contains a valid exception 697 __ verify_not_null_oop(exception_oop); 698 699 // load address of JavaThread object for thread-local data 700 NOT_LP64(__ get_thread(thread);) 701 702 #ifdef ASSERT 703 // check that fields in JavaThread for exception oop and issuing pc are 704 // empty before writing to them 705 Label oop_empty; 706 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 707 __ jcc(Assembler::equal, oop_empty); 708 __ stop("exception oop already set"); 709 __ bind(oop_empty); 710 711 Label pc_empty; 712 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 713 __ jcc(Assembler::equal, pc_empty); 714 __ stop("exception pc already set"); 715 __ bind(pc_empty); 716 #endif 717 718 // save exception oop and issuing pc into JavaThread 719 // (exception handler will load it from here) 720 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 721 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 722 723 // patch throwing pc into return address (has bci & oop map) 724 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 725 726 // compute the exception handler. 727 // the exception oop and the throwing pc are read from the fields in JavaThread 728 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 729 oop_maps->add_gc_map(call_offset, oop_map); 730 731 // rax: handler address 732 // will be the deopt blob if nmethod was deoptimized while we looked up 733 // handler regardless of whether handler existed in the nmethod. 734 735 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 736 __ invalidate_registers(false, true, true, true, true, true); 737 738 // patch the return address, this stub will directly return to the exception handler 739 __ movptr(Address(rbp, 1*BytesPerWord), rax); 740 741 switch (id) { 742 case forward_exception_id: 743 case handle_exception_nofpu_id: 744 case handle_exception_id: 745 // Restore the registers that were saved at the beginning. 746 restore_live_registers(sasm, id != handle_exception_nofpu_id); 747 break; 748 case handle_exception_from_callee_id: 749 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 750 // since we do a leave anyway. 751 752 // Pop the return address. 753 __ leave(); 754 __ pop(rcx); 755 __ jmp(rcx); // jump to exception handler 756 break; 757 default: ShouldNotReachHere(); 758 } 759 760 return oop_maps; 761 } 762 763 764 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 765 // incoming parameters 766 const Register exception_oop = rax; 767 // callee-saved copy of exception_oop during runtime call 768 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 769 // other registers used in this stub 770 const Register exception_pc = rdx; 771 const Register handler_addr = rbx; 772 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 773 774 // verify that only rax, is valid at this time 775 __ invalidate_registers(false, true, true, true, true, true); 776 777 #ifdef ASSERT 778 // check that fields in JavaThread for exception oop and issuing pc are empty 779 NOT_LP64(__ get_thread(thread);) 780 Label oop_empty; 781 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 782 __ jcc(Assembler::equal, oop_empty); 783 __ stop("exception oop must be empty"); 784 __ bind(oop_empty); 785 786 Label pc_empty; 787 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 788 __ jcc(Assembler::equal, pc_empty); 789 __ stop("exception pc must be empty"); 790 __ bind(pc_empty); 791 #endif 792 793 // clear the FPU stack in case any FPU results are left behind 794 __ empty_FPU_stack(); 795 796 // save exception_oop in callee-saved register to preserve it during runtime calls 797 __ verify_not_null_oop(exception_oop); 798 __ movptr(exception_oop_callee_saved, exception_oop); 799 800 NOT_LP64(__ get_thread(thread);) 801 // Get return address (is on top of stack after leave). 802 __ movptr(exception_pc, Address(rsp, 0)); 803 804 // search the exception handler address of the caller (using the return address) 805 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 806 // rax: exception handler address of the caller 807 808 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 809 __ invalidate_registers(false, true, true, true, false, true); 810 811 // move result of call into correct register 812 __ movptr(handler_addr, rax); 813 814 // Restore exception oop to RAX (required convention of exception handler). 815 __ movptr(exception_oop, exception_oop_callee_saved); 816 817 // verify that there is really a valid exception in rax 818 __ verify_not_null_oop(exception_oop); 819 820 // get throwing pc (= return address). 821 // rdx has been destroyed by the call, so it must be set again 822 // the pop is also necessary to simulate the effect of a ret(0) 823 __ pop(exception_pc); 824 825 // continue at exception handler (return address removed) 826 // note: do *not* remove arguments when unwinding the 827 // activation since the caller assumes having 828 // all arguments on the stack when entering the 829 // runtime to determine the exception handler 830 // (GC happens at call site with arguments!) 831 // rax: exception oop 832 // rdx: throwing pc 833 // rbx: exception handler 834 __ jmp(handler_addr); 835 } 836 837 838 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 839 // use the maximum number of runtime-arguments here because it is difficult to 840 // distinguish each RT-Call. 841 // Note: This number affects also the RT-Call in generate_handle_exception because 842 // the oop-map is shared for all calls. 843 const int num_rt_args = 2; // thread + dummy 844 845 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 846 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 847 848 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 849 850 #ifdef _LP64 851 const Register thread = r15_thread; 852 // No need to worry about dummy 853 __ mov(c_rarg0, thread); 854 #else 855 __ push(rax); // push dummy 856 857 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 858 // push java thread (becomes first argument of C function) 859 __ get_thread(thread); 860 __ push(thread); 861 #endif // _LP64 862 __ set_last_Java_frame(thread, noreg, rbp, NULL); 863 // do the call 864 __ call(RuntimeAddress(target)); 865 OopMapSet* oop_maps = new OopMapSet(); 866 oop_maps->add_gc_map(__ offset(), oop_map); 867 // verify callee-saved register 868 #ifdef ASSERT 869 guarantee(thread != rax, "change this code"); 870 __ push(rax); 871 { Label L; 872 __ get_thread(rax); 873 __ cmpptr(thread, rax); 874 __ jcc(Assembler::equal, L); 875 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 876 __ bind(L); 877 } 878 __ pop(rax); 879 #endif 880 __ reset_last_Java_frame(thread, true); 881 #ifndef _LP64 882 __ pop(rcx); // discard thread arg 883 __ pop(rcx); // discard dummy 884 #endif // _LP64 885 886 // check for pending exceptions 887 { Label L; 888 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 889 __ jcc(Assembler::equal, L); 890 // exception pending => remove activation and forward to exception handler 891 892 __ testptr(rax, rax); // have we deoptimized? 893 __ jump_cc(Assembler::equal, 894 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 895 896 // the deopt blob expects exceptions in the special fields of 897 // JavaThread, so copy and clear pending exception. 898 899 // load and clear pending exception 900 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 901 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 902 903 // check that there is really a valid exception 904 __ verify_not_null_oop(rax); 905 906 // load throwing pc: this is the return address of the stub 907 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 908 909 #ifdef ASSERT 910 // check that fields in JavaThread for exception oop and issuing pc are empty 911 Label oop_empty; 912 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 913 __ jcc(Assembler::equal, oop_empty); 914 __ stop("exception oop must be empty"); 915 __ bind(oop_empty); 916 917 Label pc_empty; 918 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 919 __ jcc(Assembler::equal, pc_empty); 920 __ stop("exception pc must be empty"); 921 __ bind(pc_empty); 922 #endif 923 924 // store exception oop and throwing pc to JavaThread 925 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 926 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 927 928 restore_live_registers(sasm); 929 930 __ leave(); 931 __ addptr(rsp, BytesPerWord); // remove return address from stack 932 933 // Forward the exception directly to deopt blob. We can blow no 934 // registers and must leave throwing pc on the stack. A patch may 935 // have values live in registers so the entry point with the 936 // exception in tls. 937 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 938 939 __ bind(L); 940 } 941 942 943 // Runtime will return true if the nmethod has been deoptimized during 944 // the patching process. In that case we must do a deopt reexecute instead. 945 946 Label reexecuteEntry, cont; 947 948 __ testptr(rax, rax); // have we deoptimized? 949 __ jcc(Assembler::equal, cont); // no 950 951 // Will reexecute. Proper return address is already on the stack we just restore 952 // registers, pop all of our frame but the return address and jump to the deopt blob 953 restore_live_registers(sasm); 954 __ leave(); 955 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 956 957 __ bind(cont); 958 restore_live_registers(sasm); 959 __ leave(); 960 __ ret(0); 961 962 return oop_maps; 963 } 964 965 966 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 967 968 // for better readability 969 const bool must_gc_arguments = true; 970 const bool dont_gc_arguments = false; 971 972 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 973 bool save_fpu_registers = true; 974 975 // stub code & info for the different stubs 976 OopMapSet* oop_maps = NULL; 977 switch (id) { 978 case forward_exception_id: 979 { 980 oop_maps = generate_handle_exception(id, sasm); 981 __ leave(); 982 __ ret(0); 983 } 984 break; 985 986 case new_instance_id: 987 case fast_new_instance_id: 988 case fast_new_instance_init_check_id: 989 { 990 Register klass = rdx; // Incoming 991 Register obj = rax; // Result 992 993 if (id == new_instance_id) { 994 __ set_info("new_instance", dont_gc_arguments); 995 } else if (id == fast_new_instance_id) { 996 __ set_info("fast new_instance", dont_gc_arguments); 997 } else { 998 assert(id == fast_new_instance_init_check_id, "bad StubID"); 999 __ set_info("fast new_instance init check", dont_gc_arguments); 1000 } 1001 1002 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB 1003 && Universe::heap()->supports_inline_contig_alloc()) { 1004 Label slow_path; 1005 Register obj_size = rcx; 1006 Register t1 = rbx; 1007 Register t2 = rsi; 1008 assert_different_registers(klass, obj, obj_size, t1, t2); 1009 1010 __ push(rdi); 1011 __ push(rbx); 1012 1013 if (id == fast_new_instance_init_check_id) { 1014 // make sure the klass is initialized 1015 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1016 __ jcc(Assembler::notEqual, slow_path); 1017 } 1018 1019 #ifdef ASSERT 1020 // assert object can be fast path allocated 1021 { 1022 Label ok, not_ok; 1023 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1024 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1025 __ jcc(Assembler::lessEqual, not_ok); 1026 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1027 __ jcc(Assembler::zero, ok); 1028 __ bind(not_ok); 1029 __ stop("assert(can be fast path allocated)"); 1030 __ should_not_reach_here(); 1031 __ bind(ok); 1032 } 1033 #endif // ASSERT 1034 1035 // if we got here then the TLAB allocation failed, so try 1036 // refilling the TLAB or allocating directly from eden. 1037 Label retry_tlab, try_eden; 1038 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 1039 NOT_LP64(__ get_thread(thread)); 1040 1041 __ bind(try_eden); 1042 // get the instance size (size is postive so movl is fine for 64bit) 1043 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1044 1045 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1046 __ incr_allocated_bytes(thread, obj_size, 0); 1047 1048 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 1049 __ verify_oop(obj); 1050 __ pop(rbx); 1051 __ pop(rdi); 1052 __ ret(0); 1053 1054 __ bind(slow_path); 1055 __ pop(rbx); 1056 __ pop(rdi); 1057 } 1058 1059 __ enter(); 1060 OopMap* map = save_live_registers(sasm, 2); 1061 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1062 oop_maps = new OopMapSet(); 1063 oop_maps->add_gc_map(call_offset, map); 1064 restore_live_registers_except_rax(sasm); 1065 __ verify_oop(obj); 1066 __ leave(); 1067 __ ret(0); 1068 1069 // rax,: new instance 1070 } 1071 1072 break; 1073 1074 case counter_overflow_id: 1075 { 1076 Register bci = rax, method = rbx; 1077 __ enter(); 1078 OopMap* map = save_live_registers(sasm, 3); 1079 // Retrieve bci 1080 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1081 // And a pointer to the Method* 1082 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1083 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1084 oop_maps = new OopMapSet(); 1085 oop_maps->add_gc_map(call_offset, map); 1086 restore_live_registers(sasm); 1087 __ leave(); 1088 __ ret(0); 1089 } 1090 break; 1091 1092 case new_type_array_id: 1093 case new_object_array_id: 1094 { 1095 Register length = rbx; // Incoming 1096 Register klass = rdx; // Incoming 1097 Register obj = rax; // Result 1098 1099 if (id == new_type_array_id) { 1100 __ set_info("new_type_array", dont_gc_arguments); 1101 } else { 1102 __ set_info("new_object_array", dont_gc_arguments); 1103 } 1104 1105 #ifdef ASSERT 1106 // assert object type is really an array of the proper kind 1107 { 1108 Label ok; 1109 Register t0 = obj; 1110 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1111 __ sarl(t0, Klass::_lh_array_tag_shift); 1112 int tag = ((id == new_type_array_id) 1113 ? Klass::_lh_array_tag_type_value 1114 : Klass::_lh_array_tag_obj_value); 1115 __ cmpl(t0, tag); 1116 __ jcc(Assembler::equal, ok); 1117 __ stop("assert(is an array klass)"); 1118 __ should_not_reach_here(); 1119 __ bind(ok); 1120 } 1121 #endif // ASSERT 1122 1123 // If we got here, the TLAB allocation failed, so try allocating from 1124 // eden if inline contiguous allocations are supported. 1125 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 1126 Register arr_size = rsi; 1127 Register t1 = rcx; // must be rcx for use as shift count 1128 Register t2 = rdi; 1129 Label slow_path; 1130 1131 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1132 // since size is positive movl does right thing on 64bit 1133 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1134 // since size is postive movl does right thing on 64bit 1135 __ movl(arr_size, length); 1136 assert(t1 == rcx, "fixed register usage"); 1137 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1138 __ shrptr(t1, Klass::_lh_header_size_shift); 1139 __ andptr(t1, Klass::_lh_header_size_mask); 1140 __ addptr(arr_size, t1); 1141 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1142 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1143 1144 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1145 1146 // Using t2 for non 64-bit. 1147 const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread); 1148 NOT_LP64(__ get_thread(thread)); 1149 __ incr_allocated_bytes(thread, arr_size, 0); 1150 1151 __ initialize_header(obj, klass, length, t1, t2); 1152 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1153 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1154 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1155 __ andptr(t1, Klass::_lh_header_size_mask); 1156 __ subptr(arr_size, t1); // body length 1157 __ addptr(t1, obj); // body start 1158 __ initialize_body(t1, arr_size, 0, t2); 1159 __ verify_oop(obj); 1160 __ ret(0); 1161 1162 __ bind(slow_path); 1163 } 1164 1165 __ enter(); 1166 OopMap* map = save_live_registers(sasm, 3); 1167 int call_offset; 1168 if (id == new_type_array_id) { 1169 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1170 } else { 1171 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1172 } 1173 1174 oop_maps = new OopMapSet(); 1175 oop_maps->add_gc_map(call_offset, map); 1176 restore_live_registers_except_rax(sasm); 1177 1178 __ verify_oop(obj); 1179 __ leave(); 1180 __ ret(0); 1181 1182 // rax,: new array 1183 } 1184 break; 1185 1186 case new_multi_array_id: 1187 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1188 // rax,: klass 1189 // rbx,: rank 1190 // rcx: address of 1st dimension 1191 OopMap* map = save_live_registers(sasm, 4); 1192 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1193 1194 oop_maps = new OopMapSet(); 1195 oop_maps->add_gc_map(call_offset, map); 1196 restore_live_registers_except_rax(sasm); 1197 1198 // rax,: new multi array 1199 __ verify_oop(rax); 1200 } 1201 break; 1202 1203 case register_finalizer_id: 1204 { 1205 __ set_info("register_finalizer", dont_gc_arguments); 1206 1207 // This is called via call_runtime so the arguments 1208 // will be place in C abi locations 1209 1210 #ifdef _LP64 1211 __ verify_oop(c_rarg0); 1212 __ mov(rax, c_rarg0); 1213 #else 1214 // The object is passed on the stack and we haven't pushed a 1215 // frame yet so it's one work away from top of stack. 1216 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1217 __ verify_oop(rax); 1218 #endif // _LP64 1219 1220 // load the klass and check the has finalizer flag 1221 Label register_finalizer; 1222 Register t = rsi; 1223 __ load_klass(t, rax); 1224 __ movl(t, Address(t, Klass::access_flags_offset())); 1225 __ testl(t, JVM_ACC_HAS_FINALIZER); 1226 __ jcc(Assembler::notZero, register_finalizer); 1227 __ ret(0); 1228 1229 __ bind(register_finalizer); 1230 __ enter(); 1231 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1232 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1233 oop_maps = new OopMapSet(); 1234 oop_maps->add_gc_map(call_offset, oop_map); 1235 1236 // Now restore all the live registers 1237 restore_live_registers(sasm); 1238 1239 __ leave(); 1240 __ ret(0); 1241 } 1242 break; 1243 1244 case throw_range_check_failed_id: 1245 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1246 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1247 } 1248 break; 1249 1250 case throw_index_exception_id: 1251 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1252 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1253 } 1254 break; 1255 1256 case throw_div0_exception_id: 1257 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1258 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1259 } 1260 break; 1261 1262 case throw_null_pointer_exception_id: 1263 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1264 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1265 } 1266 break; 1267 1268 case handle_exception_nofpu_id: 1269 case handle_exception_id: 1270 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1271 oop_maps = generate_handle_exception(id, sasm); 1272 } 1273 break; 1274 1275 case handle_exception_from_callee_id: 1276 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1277 oop_maps = generate_handle_exception(id, sasm); 1278 } 1279 break; 1280 1281 case unwind_exception_id: 1282 { __ set_info("unwind_exception", dont_gc_arguments); 1283 // note: no stubframe since we are about to leave the current 1284 // activation and we are calling a leaf VM function only. 1285 generate_unwind_exception(sasm); 1286 } 1287 break; 1288 1289 case throw_array_store_exception_id: 1290 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1291 // tos + 0: link 1292 // + 1: return address 1293 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1294 } 1295 break; 1296 1297 case throw_class_cast_exception_id: 1298 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1299 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1300 } 1301 break; 1302 1303 case throw_incompatible_class_change_error_id: 1304 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1305 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1306 } 1307 break; 1308 1309 case slow_subtype_check_id: 1310 { 1311 // Typical calling sequence: 1312 // __ push(klass_RInfo); // object klass or other subclass 1313 // __ push(sup_k_RInfo); // array element klass or other superclass 1314 // __ call(slow_subtype_check); 1315 // Note that the subclass is pushed first, and is therefore deepest. 1316 // Previous versions of this code reversed the names 'sub' and 'super'. 1317 // This was operationally harmless but made the code unreadable. 1318 enum layout { 1319 rax_off, SLOT2(raxH_off) 1320 rcx_off, SLOT2(rcxH_off) 1321 rsi_off, SLOT2(rsiH_off) 1322 rdi_off, SLOT2(rdiH_off) 1323 // saved_rbp_off, SLOT2(saved_rbpH_off) 1324 return_off, SLOT2(returnH_off) 1325 sup_k_off, SLOT2(sup_kH_off) 1326 klass_off, SLOT2(superH_off) 1327 framesize, 1328 result_off = klass_off // deepest argument is also the return value 1329 }; 1330 1331 __ set_info("slow_subtype_check", dont_gc_arguments); 1332 __ push(rdi); 1333 __ push(rsi); 1334 __ push(rcx); 1335 __ push(rax); 1336 1337 // This is called by pushing args and not with C abi 1338 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1339 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1340 1341 Label miss; 1342 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1343 1344 // fallthrough on success: 1345 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1346 __ pop(rax); 1347 __ pop(rcx); 1348 __ pop(rsi); 1349 __ pop(rdi); 1350 __ ret(0); 1351 1352 __ bind(miss); 1353 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1354 __ pop(rax); 1355 __ pop(rcx); 1356 __ pop(rsi); 1357 __ pop(rdi); 1358 __ ret(0); 1359 } 1360 break; 1361 1362 case monitorenter_nofpu_id: 1363 save_fpu_registers = false; 1364 // fall through 1365 case monitorenter_id: 1366 { 1367 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1368 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1369 1370 // Called with store_parameter and not C abi 1371 1372 f.load_argument(1, rax); // rax,: object 1373 f.load_argument(0, rbx); // rbx,: lock address 1374 1375 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1376 1377 oop_maps = new OopMapSet(); 1378 oop_maps->add_gc_map(call_offset, map); 1379 restore_live_registers(sasm, save_fpu_registers); 1380 } 1381 break; 1382 1383 case monitorexit_nofpu_id: 1384 save_fpu_registers = false; 1385 // fall through 1386 case monitorexit_id: 1387 { 1388 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1389 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1390 1391 // Called with store_parameter and not C abi 1392 1393 f.load_argument(0, rax); // rax,: lock address 1394 1395 // note: really a leaf routine but must setup last java sp 1396 // => use call_RT for now (speed can be improved by 1397 // doing last java sp setup manually) 1398 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1399 1400 oop_maps = new OopMapSet(); 1401 oop_maps->add_gc_map(call_offset, map); 1402 restore_live_registers(sasm, save_fpu_registers); 1403 } 1404 break; 1405 1406 case deoptimize_id: 1407 { 1408 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1409 const int num_rt_args = 2; // thread, trap_request 1410 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1411 f.load_argument(0, rax); 1412 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1413 oop_maps = new OopMapSet(); 1414 oop_maps->add_gc_map(call_offset, oop_map); 1415 restore_live_registers(sasm); 1416 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1417 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1418 __ leave(); 1419 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1420 } 1421 break; 1422 1423 case access_field_patching_id: 1424 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1425 // we should set up register map 1426 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1427 } 1428 break; 1429 1430 case load_klass_patching_id: 1431 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1432 // we should set up register map 1433 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1434 } 1435 break; 1436 1437 case load_mirror_patching_id: 1438 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1439 // we should set up register map 1440 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1441 } 1442 break; 1443 1444 case load_appendix_patching_id: 1445 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1446 // we should set up register map 1447 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1448 } 1449 break; 1450 1451 case dtrace_object_alloc_id: 1452 { // rax,: object 1453 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1454 // we can't gc here so skip the oopmap but make sure that all 1455 // the live registers get saved. 1456 save_live_registers(sasm, 1); 1457 1458 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1459 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1460 NOT_LP64(__ pop(rax)); 1461 1462 restore_live_registers(sasm); 1463 } 1464 break; 1465 1466 case fpu2long_stub_id: 1467 { 1468 // rax, and rdx are destroyed, but should be free since the result is returned there 1469 // preserve rsi,ecx 1470 __ push(rsi); 1471 __ push(rcx); 1472 LP64_ONLY(__ push(rdx);) 1473 1474 // check for NaN 1475 Label return0, do_return, return_min_jlong, do_convert; 1476 1477 Address value_high_word(rsp, wordSize + 4); 1478 Address value_low_word(rsp, wordSize); 1479 Address result_high_word(rsp, 3*wordSize + 4); 1480 Address result_low_word(rsp, 3*wordSize); 1481 1482 __ subptr(rsp, 32); // more than enough on 32bit 1483 __ fst_d(value_low_word); 1484 __ movl(rax, value_high_word); 1485 __ andl(rax, 0x7ff00000); 1486 __ cmpl(rax, 0x7ff00000); 1487 __ jcc(Assembler::notEqual, do_convert); 1488 __ movl(rax, value_high_word); 1489 __ andl(rax, 0xfffff); 1490 __ orl(rax, value_low_word); 1491 __ jcc(Assembler::notZero, return0); 1492 1493 __ bind(do_convert); 1494 __ fnstcw(Address(rsp, 0)); 1495 __ movzwl(rax, Address(rsp, 0)); 1496 __ orl(rax, 0xc00); 1497 __ movw(Address(rsp, 2), rax); 1498 __ fldcw(Address(rsp, 2)); 1499 __ fwait(); 1500 __ fistp_d(result_low_word); 1501 __ fldcw(Address(rsp, 0)); 1502 __ fwait(); 1503 // This gets the entire long in rax on 64bit 1504 __ movptr(rax, result_low_word); 1505 // testing of high bits 1506 __ movl(rdx, result_high_word); 1507 __ mov(rcx, rax); 1508 // What the heck is the point of the next instruction??? 1509 __ xorl(rcx, 0x0); 1510 __ movl(rsi, 0x80000000); 1511 __ xorl(rsi, rdx); 1512 __ orl(rcx, rsi); 1513 __ jcc(Assembler::notEqual, do_return); 1514 __ fldz(); 1515 __ fcomp_d(value_low_word); 1516 __ fnstsw_ax(); 1517 #ifdef _LP64 1518 __ testl(rax, 0x4100); // ZF & CF == 0 1519 __ jcc(Assembler::equal, return_min_jlong); 1520 #else 1521 __ sahf(); 1522 __ jcc(Assembler::above, return_min_jlong); 1523 #endif // _LP64 1524 // return max_jlong 1525 #ifndef _LP64 1526 __ movl(rdx, 0x7fffffff); 1527 __ movl(rax, 0xffffffff); 1528 #else 1529 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1530 #endif // _LP64 1531 __ jmp(do_return); 1532 1533 __ bind(return_min_jlong); 1534 #ifndef _LP64 1535 __ movl(rdx, 0x80000000); 1536 __ xorl(rax, rax); 1537 #else 1538 __ mov64(rax, UCONST64(0x8000000000000000)); 1539 #endif // _LP64 1540 __ jmp(do_return); 1541 1542 __ bind(return0); 1543 __ fpop(); 1544 #ifndef _LP64 1545 __ xorptr(rdx,rdx); 1546 __ xorptr(rax,rax); 1547 #else 1548 __ xorptr(rax, rax); 1549 #endif // _LP64 1550 1551 __ bind(do_return); 1552 __ addptr(rsp, 32); 1553 LP64_ONLY(__ pop(rdx);) 1554 __ pop(rcx); 1555 __ pop(rsi); 1556 __ ret(0); 1557 } 1558 break; 1559 1560 #if INCLUDE_ALL_GCS 1561 case g1_pre_barrier_slow_id: 1562 { 1563 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1564 // arg0 : previous value of memory 1565 1566 BarrierSet* bs = BarrierSet::barrier_set(); 1567 if (bs->kind() != BarrierSet::G1BarrierSet) { 1568 __ movptr(rax, (int)id); 1569 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1570 __ should_not_reach_here(); 1571 break; 1572 } 1573 __ push(rax); 1574 __ push(rdx); 1575 1576 const Register pre_val = rax; 1577 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1578 const Register tmp = rdx; 1579 1580 NOT_LP64(__ get_thread(thread);) 1581 1582 Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 1583 Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); 1584 Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); 1585 1586 Label done; 1587 Label runtime; 1588 1589 // Is marking still active? 1590 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 1591 __ cmpl(queue_active, 0); 1592 } else { 1593 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 1594 __ cmpb(queue_active, 0); 1595 } 1596 __ jcc(Assembler::equal, done); 1597 1598 // Can we store original value in the thread's buffer? 1599 1600 __ movptr(tmp, queue_index); 1601 __ testptr(tmp, tmp); 1602 __ jcc(Assembler::zero, runtime); 1603 __ subptr(tmp, wordSize); 1604 __ movptr(queue_index, tmp); 1605 __ addptr(tmp, buffer); 1606 1607 // prev_val (rax) 1608 f.load_argument(0, pre_val); 1609 __ movptr(Address(tmp, 0), pre_val); 1610 __ jmp(done); 1611 1612 __ bind(runtime); 1613 1614 save_live_registers(sasm, 3); 1615 1616 // load the pre-value 1617 f.load_argument(0, rcx); 1618 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); 1619 1620 restore_live_registers(sasm); 1621 1622 __ bind(done); 1623 1624 __ pop(rdx); 1625 __ pop(rax); 1626 } 1627 break; 1628 1629 case g1_post_barrier_slow_id: 1630 { 1631 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1632 1633 BarrierSet* bs = BarrierSet::barrier_set(); 1634 if (bs->kind() != BarrierSet::G1BarrierSet) { 1635 __ movptr(rax, (int)id); 1636 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1637 __ should_not_reach_here(); 1638 break; 1639 } 1640 1641 // arg0: store_address 1642 Address store_addr(rbp, 2*BytesPerWord); 1643 1644 Label done; 1645 Label enqueued; 1646 Label runtime; 1647 1648 // At this point we know new_value is non-NULL and the new_value crosses regions. 1649 // Must check to see if card is already dirty 1650 1651 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1652 1653 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 1654 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 1655 1656 __ push(rax); 1657 __ push(rcx); 1658 1659 const Register cardtable = rax; 1660 const Register card_addr = rcx; 1661 1662 f.load_argument(0, card_addr); 1663 __ shrptr(card_addr, CardTable::card_shift); 1664 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 1665 // a valid address and therefore is not properly handled by the relocation code. 1666 __ movptr(cardtable, ci_card_table_address_as<intptr_t>()); 1667 __ addptr(card_addr, cardtable); 1668 1669 NOT_LP64(__ get_thread(thread);) 1670 1671 __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val()); 1672 __ jcc(Assembler::equal, done); 1673 1674 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 1675 __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); 1676 __ jcc(Assembler::equal, done); 1677 1678 // storing region crossing non-NULL, card is clean. 1679 // dirty card and log. 1680 1681 __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); 1682 1683 const Register tmp = rdx; 1684 __ push(rdx); 1685 1686 __ movptr(tmp, queue_index); 1687 __ testptr(tmp, tmp); 1688 __ jcc(Assembler::zero, runtime); 1689 __ subptr(tmp, wordSize); 1690 __ movptr(queue_index, tmp); 1691 __ addptr(tmp, buffer); 1692 __ movptr(Address(tmp, 0), card_addr); 1693 __ jmp(enqueued); 1694 1695 __ bind(runtime); 1696 1697 save_live_registers(sasm, 3); 1698 1699 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1700 1701 restore_live_registers(sasm); 1702 1703 __ bind(enqueued); 1704 __ pop(rdx); 1705 1706 __ bind(done); 1707 __ pop(rcx); 1708 __ pop(rax); 1709 } 1710 break; 1711 #endif // INCLUDE_ALL_GCS 1712 1713 case predicate_failed_trap_id: 1714 { 1715 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1716 1717 OopMap* map = save_live_registers(sasm, 1); 1718 1719 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1720 oop_maps = new OopMapSet(); 1721 oop_maps->add_gc_map(call_offset, map); 1722 restore_live_registers(sasm); 1723 __ leave(); 1724 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1725 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1726 1727 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1728 } 1729 break; 1730 1731 default: 1732 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1733 __ movptr(rax, (int)id); 1734 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1735 __ should_not_reach_here(); 1736 } 1737 break; 1738 } 1739 return oop_maps; 1740 } 1741 1742 #undef __ 1743 1744 const char *Runtime1::pd_name_for_address(address entry) { 1745 return "<unknown function>"; 1746 }