1 /* 2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "oops/compiledICHolder.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "register_x86.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/signature.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "utilities/macros.hpp" 40 #include "vmreg_x86.inline.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 43 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 44 #endif 45 46 47 // Implementation of StubAssembler 48 49 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 50 // setup registers 51 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 52 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 53 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 54 assert(args_size >= 0, "illegal args_size"); 55 bool align_stack = false; 56 #ifdef _LP64 57 // At a method handle call, the stack may not be properly aligned 58 // when returning with an exception. 59 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 60 #endif 61 62 #ifdef _LP64 63 mov(c_rarg0, thread); 64 set_num_rt_args(0); // Nothing on stack 65 #else 66 set_num_rt_args(1 + args_size); 67 68 // push java thread (becomes first argument of C function) 69 get_thread(thread); 70 push(thread); 71 #endif // _LP64 72 73 int call_offset; 74 if (!align_stack) { 75 set_last_Java_frame(thread, noreg, rbp, NULL); 76 } else { 77 address the_pc = pc(); 78 call_offset = offset(); 79 set_last_Java_frame(thread, noreg, rbp, the_pc); 80 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 81 } 82 83 // do the call 84 call(RuntimeAddress(entry)); 85 if (!align_stack) { 86 call_offset = offset(); 87 } 88 // verify callee-saved register 89 #ifdef ASSERT 90 guarantee(thread != rax, "change this code"); 91 push(rax); 92 { Label L; 93 get_thread(rax); 94 cmpptr(thread, rax); 95 jcc(Assembler::equal, L); 96 int3(); 97 stop("StubAssembler::call_RT: rdi not callee saved?"); 98 bind(L); 99 } 100 pop(rax); 101 #endif 102 reset_last_Java_frame(thread, true, align_stack); 103 104 // discard thread and arguments 105 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 106 107 // check for pending exceptions 108 { Label L; 109 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 110 jcc(Assembler::equal, L); 111 // exception pending => remove activation and forward to exception handler 112 movptr(rax, Address(thread, Thread::pending_exception_offset())); 113 // make sure that the vm_results are cleared 114 if (oop_result1->is_valid()) { 115 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 116 } 117 if (metadata_result->is_valid()) { 118 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 119 } 120 if (frame_size() == no_frame_size) { 121 leave(); 122 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 123 } else if (_stub_id == Runtime1::forward_exception_id) { 124 should_not_reach_here(); 125 } else { 126 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 127 } 128 bind(L); 129 } 130 // get oop results if there are any and reset the values in the thread 131 if (oop_result1->is_valid()) { 132 get_vm_result(oop_result1, thread); 133 } 134 if (metadata_result->is_valid()) { 135 get_vm_result_2(metadata_result, thread); 136 } 137 return call_offset; 138 } 139 140 141 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 142 #ifdef _LP64 143 mov(c_rarg1, arg1); 144 #else 145 push(arg1); 146 #endif // _LP64 147 return call_RT(oop_result1, metadata_result, entry, 1); 148 } 149 150 151 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 152 #ifdef _LP64 153 if (c_rarg1 == arg2) { 154 if (c_rarg2 == arg1) { 155 xchgq(arg1, arg2); 156 } else { 157 mov(c_rarg2, arg2); 158 mov(c_rarg1, arg1); 159 } 160 } else { 161 mov(c_rarg1, arg1); 162 mov(c_rarg2, arg2); 163 } 164 #else 165 push(arg2); 166 push(arg1); 167 #endif // _LP64 168 return call_RT(oop_result1, metadata_result, entry, 2); 169 } 170 171 172 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 173 #ifdef _LP64 174 // if there is any conflict use the stack 175 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 176 arg2 == c_rarg1 || arg1 == c_rarg3 || 177 arg3 == c_rarg1 || arg1 == c_rarg2) { 178 push(arg3); 179 push(arg2); 180 push(arg1); 181 pop(c_rarg1); 182 pop(c_rarg2); 183 pop(c_rarg3); 184 } else { 185 mov(c_rarg1, arg1); 186 mov(c_rarg2, arg2); 187 mov(c_rarg3, arg3); 188 } 189 #else 190 push(arg3); 191 push(arg2); 192 push(arg1); 193 #endif // _LP64 194 return call_RT(oop_result1, metadata_result, entry, 3); 195 } 196 197 198 // Implementation of StubFrame 199 200 class StubFrame: public StackObj { 201 private: 202 StubAssembler* _sasm; 203 204 public: 205 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 206 void load_argument(int offset_in_words, Register reg); 207 208 ~StubFrame(); 209 }; 210 211 212 #define __ _sasm-> 213 214 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 215 _sasm = sasm; 216 __ set_info(name, must_gc_arguments); 217 __ enter(); 218 } 219 220 // load parameters that were stored with LIR_Assembler::store_parameter 221 // Note: offsets for store_parameter and load_argument must match 222 void StubFrame::load_argument(int offset_in_words, Register reg) { 223 // rbp, + 0: link 224 // + 1: return address 225 // + 2: argument with offset 0 226 // + 3: argument with offset 1 227 // + 4: ... 228 229 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 230 } 231 232 233 StubFrame::~StubFrame() { 234 __ leave(); 235 __ ret(0); 236 } 237 238 #undef __ 239 240 241 // Implementation of Runtime1 242 243 #define __ sasm-> 244 245 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 246 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 247 248 // Stack layout for saving/restoring all the registers needed during a runtime 249 // call (this includes deoptimization) 250 // Note: note that users of this frame may well have arguments to some runtime 251 // while these values are on the stack. These positions neglect those arguments 252 // but the code in save_live_registers will take the argument count into 253 // account. 254 // 255 #ifdef _LP64 256 #define SLOT2(x) x, 257 #define SLOT_PER_WORD 2 258 #else 259 #define SLOT2(x) 260 #define SLOT_PER_WORD 1 261 #endif // _LP64 262 263 enum reg_save_layout { 264 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 265 // happen and will assert if the stack size we create is misaligned 266 #ifdef _LP64 267 align_dummy_0, align_dummy_1, 268 #endif // _LP64 269 #ifdef _WIN64 270 // Windows always allocates space for it's argument registers (see 271 // frame::arg_reg_save_area_bytes). 272 arg_reg_save_1, arg_reg_save_1H, // 0, 4 273 arg_reg_save_2, arg_reg_save_2H, // 8, 12 274 arg_reg_save_3, arg_reg_save_3H, // 16, 20 275 arg_reg_save_4, arg_reg_save_4H, // 24, 28 276 #endif // _WIN64 277 xmm_regs_as_doubles_off, // 32 278 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 279 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 280 // fpu_state_end_off is exclusive 281 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 282 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 283 extra_space_offset, // 360 284 #ifdef _LP64 285 r15_off = extra_space_offset, r15H_off, // 360, 364 286 r14_off, r14H_off, // 368, 372 287 r13_off, r13H_off, // 376, 380 288 r12_off, r12H_off, // 384, 388 289 r11_off, r11H_off, // 392, 396 290 r10_off, r10H_off, // 400, 404 291 r9_off, r9H_off, // 408, 412 292 r8_off, r8H_off, // 416, 420 293 rdi_off, rdiH_off, // 424, 428 294 #else 295 rdi_off = extra_space_offset, 296 #endif // _LP64 297 rsi_off, SLOT2(rsiH_off) // 432, 436 298 rbp_off, SLOT2(rbpH_off) // 440, 444 299 rsp_off, SLOT2(rspH_off) // 448, 452 300 rbx_off, SLOT2(rbxH_off) // 456, 460 301 rdx_off, SLOT2(rdxH_off) // 464, 468 302 rcx_off, SLOT2(rcxH_off) // 472, 476 303 rax_off, SLOT2(raxH_off) // 480, 484 304 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 305 return_off, SLOT2(returnH_off) // 496, 500 306 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 307 }; 308 309 310 311 // Save off registers which might be killed by calls into the runtime. 312 // Tries to smart of about FP registers. In particular we separate 313 // saving and describing the FPU registers for deoptimization since we 314 // have to save the FPU registers twice if we describe them and on P4 315 // saving FPU registers which don't contain anything appears 316 // expensive. The deopt blob is the only thing which needs to 317 // describe FPU registers. In all other cases it should be sufficient 318 // to simply save their current value. 319 320 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 321 bool save_fpu_registers = true) { 322 323 // In 64bit all the args are in regs so there are no additional stack slots 324 LP64_ONLY(num_rt_args = 0); 325 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 326 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 327 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 328 329 // record saved value locations in an OopMap 330 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 331 OopMap* map = new OopMap(frame_size_in_slots, 0); 332 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 333 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 334 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 335 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 336 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 337 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 338 #ifdef _LP64 339 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 340 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 341 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 342 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 343 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 344 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 345 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 346 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 347 348 // This is stupid but needed. 349 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 350 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 351 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 352 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 353 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 354 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 355 356 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 357 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 358 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 359 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 360 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 361 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 362 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 363 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 364 #endif // _LP64 365 366 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 367 #ifdef _LP64 368 if (UseAVX < 3) { 369 xmm_bypass_limit = xmm_bypass_limit / 2; 370 } 371 #endif 372 373 if (save_fpu_registers) { 374 if (UseSSE < 2) { 375 int fpu_off = float_regs_as_doubles_off; 376 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 377 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 378 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 379 // %%% This is really a waste but we'll keep things as they were for now 380 if (true) { 381 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 382 } 383 fpu_off += 2; 384 } 385 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 386 } 387 388 if (UseSSE >= 2) { 389 int xmm_off = xmm_regs_as_doubles_off; 390 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 391 if (n < xmm_bypass_limit) { 392 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 393 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 394 // %%% This is really a waste but we'll keep things as they were for now 395 if (true) { 396 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 397 } 398 } 399 xmm_off += 2; 400 } 401 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 402 403 } else if (UseSSE == 1) { 404 int xmm_off = xmm_regs_as_doubles_off; 405 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 406 if (n < xmm_bypass_limit) { 407 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 408 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 409 } 410 xmm_off += 2; 411 } 412 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 413 } 414 } 415 416 return map; 417 } 418 419 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 420 bool save_fpu_registers = true) { 421 __ block_comment("save_live_registers"); 422 423 __ pusha(); // integer registers 424 425 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 426 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 427 428 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 429 430 #ifdef ASSERT 431 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 432 #endif 433 434 if (save_fpu_registers) { 435 if (UseSSE < 2) { 436 // save FPU stack 437 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 438 __ fwait(); 439 440 #ifdef ASSERT 441 Label ok; 442 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 443 __ jccb(Assembler::equal, ok); 444 __ stop("corrupted control word detected"); 445 __ bind(ok); 446 #endif 447 448 // Reset the control word to guard against exceptions being unmasked 449 // since fstp_d can cause FPU stack underflow exceptions. Write it 450 // into the on stack copy and then reload that to make sure that the 451 // current and future values are correct. 452 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 453 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 454 455 // Save the FPU registers in de-opt-able form 456 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 457 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 458 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 459 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 460 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 461 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 462 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 463 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 464 } 465 466 if (UseSSE >= 2) { 467 // save XMM registers 468 // XMM registers can contain float or double values, but this is not known here, 469 // so always save them as doubles. 470 // note that float values are _not_ converted automatically, so for float values 471 // the second word contains only garbage data. 472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); 473 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); 474 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); 475 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); 476 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); 477 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); 478 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); 479 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); 480 #ifdef _LP64 481 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); 482 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); 483 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); 484 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); 485 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); 486 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); 487 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); 488 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); 489 if (UseAVX > 2) { 490 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 128), xmm16); 491 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 136), xmm17); 492 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 144), xmm18); 493 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 152), xmm19); 494 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 160), xmm20); 495 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 168), xmm21); 496 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 176), xmm22); 497 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 184), xmm23); 498 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 192), xmm24); 499 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 200), xmm25); 500 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 208), xmm26); 501 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 216), xmm27); 502 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 224), xmm28); 503 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 232), xmm29); 504 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 240), xmm30); 505 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 248), xmm31); 506 } 507 #endif // _LP64 508 } else if (UseSSE == 1) { 509 // save XMM registers as float because double not supported without SSE2 510 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); 511 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); 512 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); 513 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); 514 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); 515 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); 516 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); 517 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); 518 } 519 } 520 521 // FPU stack must be empty now 522 __ verify_FPU(0, "save_live_registers"); 523 524 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 525 } 526 527 528 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { 529 if (restore_fpu_registers) { 530 if (UseSSE >= 2) { 531 // restore XMM registers 532 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 533 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 534 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 535 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 536 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 537 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 538 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 539 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 540 #ifdef _LP64 541 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); 542 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); 543 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); 544 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); 545 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); 546 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); 547 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); 548 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); 549 if (UseAVX > 2) { 550 __ movdbl(xmm16, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 128)); 551 __ movdbl(xmm17, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 136)); 552 __ movdbl(xmm18, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 144)); 553 __ movdbl(xmm19, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 152)); 554 __ movdbl(xmm20, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 160)); 555 __ movdbl(xmm21, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 168)); 556 __ movdbl(xmm22, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 176)); 557 __ movdbl(xmm23, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 184)); 558 __ movdbl(xmm24, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 192)); 559 __ movdbl(xmm25, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 200)); 560 __ movdbl(xmm26, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 208)); 561 __ movdbl(xmm27, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 216)); 562 __ movdbl(xmm28, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 224)); 563 __ movdbl(xmm29, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 232)); 564 __ movdbl(xmm30, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 240)); 565 __ movdbl(xmm31, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 248)); 566 } 567 #endif // _LP64 568 } else if (UseSSE == 1) { 569 // restore XMM registers 570 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 571 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 572 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 573 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 574 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 575 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 576 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 577 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 578 } 579 580 if (UseSSE < 2) { 581 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 582 } else { 583 // check that FPU stack is really empty 584 __ verify_FPU(0, "restore_live_registers"); 585 } 586 587 } else { 588 // check that FPU stack is really empty 589 __ verify_FPU(0, "restore_live_registers"); 590 } 591 592 #ifdef ASSERT 593 { 594 Label ok; 595 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 596 __ jcc(Assembler::equal, ok); 597 __ stop("bad offsets in frame"); 598 __ bind(ok); 599 } 600 #endif // ASSERT 601 602 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 603 } 604 605 606 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 607 __ block_comment("restore_live_registers"); 608 609 restore_fpu(sasm, restore_fpu_registers); 610 __ popa(); 611 } 612 613 614 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 615 __ block_comment("restore_live_registers_except_rax"); 616 617 restore_fpu(sasm, restore_fpu_registers); 618 619 #ifdef _LP64 620 __ movptr(r15, Address(rsp, 0)); 621 __ movptr(r14, Address(rsp, wordSize)); 622 __ movptr(r13, Address(rsp, 2 * wordSize)); 623 __ movptr(r12, Address(rsp, 3 * wordSize)); 624 __ movptr(r11, Address(rsp, 4 * wordSize)); 625 __ movptr(r10, Address(rsp, 5 * wordSize)); 626 __ movptr(r9, Address(rsp, 6 * wordSize)); 627 __ movptr(r8, Address(rsp, 7 * wordSize)); 628 __ movptr(rdi, Address(rsp, 8 * wordSize)); 629 __ movptr(rsi, Address(rsp, 9 * wordSize)); 630 __ movptr(rbp, Address(rsp, 10 * wordSize)); 631 // skip rsp 632 __ movptr(rbx, Address(rsp, 12 * wordSize)); 633 __ movptr(rdx, Address(rsp, 13 * wordSize)); 634 __ movptr(rcx, Address(rsp, 14 * wordSize)); 635 636 __ addptr(rsp, 16 * wordSize); 637 #else 638 639 __ pop(rdi); 640 __ pop(rsi); 641 __ pop(rbp); 642 __ pop(rbx); // skip this value 643 __ pop(rbx); 644 __ pop(rdx); 645 __ pop(rcx); 646 __ addptr(rsp, BytesPerWord); 647 #endif // _LP64 648 } 649 650 651 void Runtime1::initialize_pd() { 652 // nothing to do 653 } 654 655 656 // target: the entry point of the method that creates and posts the exception oop 657 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 658 659 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 660 // preserve all registers 661 int num_rt_args = has_argument ? 2 : 1; 662 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 663 664 // now all registers are saved and can be used freely 665 // verify that no old value is used accidentally 666 __ invalidate_registers(true, true, true, true, true, true); 667 668 // registers used by this stub 669 const Register temp_reg = rbx; 670 671 // load argument for exception that is passed as an argument into the stub 672 if (has_argument) { 673 #ifdef _LP64 674 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 675 #else 676 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 677 __ push(temp_reg); 678 #endif // _LP64 679 } 680 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 681 682 OopMapSet* oop_maps = new OopMapSet(); 683 oop_maps->add_gc_map(call_offset, oop_map); 684 685 __ stop("should not reach here"); 686 687 return oop_maps; 688 } 689 690 691 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 692 __ block_comment("generate_handle_exception"); 693 694 // incoming parameters 695 const Register exception_oop = rax; 696 const Register exception_pc = rdx; 697 // other registers used in this stub 698 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 699 700 // Save registers, if required. 701 OopMapSet* oop_maps = new OopMapSet(); 702 OopMap* oop_map = NULL; 703 switch (id) { 704 case forward_exception_id: 705 // We're handling an exception in the context of a compiled frame. 706 // The registers have been saved in the standard places. Perform 707 // an exception lookup in the caller and dispatch to the handler 708 // if found. Otherwise unwind and dispatch to the callers 709 // exception handler. 710 oop_map = generate_oop_map(sasm, 1 /*thread*/); 711 712 // load and clear pending exception oop into RAX 713 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 714 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 715 716 // load issuing PC (the return address for this stub) into rdx 717 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 718 719 // make sure that the vm_results are cleared (may be unnecessary) 720 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 721 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 722 break; 723 case handle_exception_nofpu_id: 724 case handle_exception_id: 725 // At this point all registers MAY be live. 726 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 727 break; 728 case handle_exception_from_callee_id: { 729 // At this point all registers except exception oop (RAX) and 730 // exception pc (RDX) are dead. 731 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 732 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 733 sasm->set_frame_size(frame_size); 734 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 735 break; 736 } 737 default: ShouldNotReachHere(); 738 } 739 740 #ifdef TIERED 741 // C2 can leave the fpu stack dirty 742 if (UseSSE < 2) { 743 __ empty_FPU_stack(); 744 } 745 #endif // TIERED 746 747 // verify that only rax, and rdx is valid at this time 748 __ invalidate_registers(false, true, true, false, true, true); 749 // verify that rax, contains a valid exception 750 __ verify_not_null_oop(exception_oop); 751 752 // load address of JavaThread object for thread-local data 753 NOT_LP64(__ get_thread(thread);) 754 755 #ifdef ASSERT 756 // check that fields in JavaThread for exception oop and issuing pc are 757 // empty before writing to them 758 Label oop_empty; 759 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 760 __ jcc(Assembler::equal, oop_empty); 761 __ stop("exception oop already set"); 762 __ bind(oop_empty); 763 764 Label pc_empty; 765 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 766 __ jcc(Assembler::equal, pc_empty); 767 __ stop("exception pc already set"); 768 __ bind(pc_empty); 769 #endif 770 771 // save exception oop and issuing pc into JavaThread 772 // (exception handler will load it from here) 773 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 774 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 775 776 // patch throwing pc into return address (has bci & oop map) 777 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 778 779 // compute the exception handler. 780 // the exception oop and the throwing pc are read from the fields in JavaThread 781 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 782 oop_maps->add_gc_map(call_offset, oop_map); 783 784 // rax: handler address 785 // will be the deopt blob if nmethod was deoptimized while we looked up 786 // handler regardless of whether handler existed in the nmethod. 787 788 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 789 __ invalidate_registers(false, true, true, true, true, true); 790 791 // patch the return address, this stub will directly return to the exception handler 792 __ movptr(Address(rbp, 1*BytesPerWord), rax); 793 794 switch (id) { 795 case forward_exception_id: 796 case handle_exception_nofpu_id: 797 case handle_exception_id: 798 // Restore the registers that were saved at the beginning. 799 restore_live_registers(sasm, id != handle_exception_nofpu_id); 800 break; 801 case handle_exception_from_callee_id: 802 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 803 // since we do a leave anyway. 804 805 // Pop the return address. 806 __ leave(); 807 __ pop(rcx); 808 __ jmp(rcx); // jump to exception handler 809 break; 810 default: ShouldNotReachHere(); 811 } 812 813 return oop_maps; 814 } 815 816 817 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 818 // incoming parameters 819 const Register exception_oop = rax; 820 // callee-saved copy of exception_oop during runtime call 821 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 822 // other registers used in this stub 823 const Register exception_pc = rdx; 824 const Register handler_addr = rbx; 825 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 826 827 // verify that only rax, is valid at this time 828 __ invalidate_registers(false, true, true, true, true, true); 829 830 #ifdef ASSERT 831 // check that fields in JavaThread for exception oop and issuing pc are empty 832 NOT_LP64(__ get_thread(thread);) 833 Label oop_empty; 834 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 835 __ jcc(Assembler::equal, oop_empty); 836 __ stop("exception oop must be empty"); 837 __ bind(oop_empty); 838 839 Label pc_empty; 840 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 841 __ jcc(Assembler::equal, pc_empty); 842 __ stop("exception pc must be empty"); 843 __ bind(pc_empty); 844 #endif 845 846 // clear the FPU stack in case any FPU results are left behind 847 __ empty_FPU_stack(); 848 849 // save exception_oop in callee-saved register to preserve it during runtime calls 850 __ verify_not_null_oop(exception_oop); 851 __ movptr(exception_oop_callee_saved, exception_oop); 852 853 NOT_LP64(__ get_thread(thread);) 854 // Get return address (is on top of stack after leave). 855 __ movptr(exception_pc, Address(rsp, 0)); 856 857 // search the exception handler address of the caller (using the return address) 858 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 859 // rax: exception handler address of the caller 860 861 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 862 __ invalidate_registers(false, true, true, true, false, true); 863 864 // move result of call into correct register 865 __ movptr(handler_addr, rax); 866 867 // Restore exception oop to RAX (required convention of exception handler). 868 __ movptr(exception_oop, exception_oop_callee_saved); 869 870 // verify that there is really a valid exception in rax 871 __ verify_not_null_oop(exception_oop); 872 873 // get throwing pc (= return address). 874 // rdx has been destroyed by the call, so it must be set again 875 // the pop is also necessary to simulate the effect of a ret(0) 876 __ pop(exception_pc); 877 878 // continue at exception handler (return address removed) 879 // note: do *not* remove arguments when unwinding the 880 // activation since the caller assumes having 881 // all arguments on the stack when entering the 882 // runtime to determine the exception handler 883 // (GC happens at call site with arguments!) 884 // rax: exception oop 885 // rdx: throwing pc 886 // rbx: exception handler 887 __ jmp(handler_addr); 888 } 889 890 891 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 892 // use the maximum number of runtime-arguments here because it is difficult to 893 // distinguish each RT-Call. 894 // Note: This number affects also the RT-Call in generate_handle_exception because 895 // the oop-map is shared for all calls. 896 const int num_rt_args = 2; // thread + dummy 897 898 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 899 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 900 901 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 902 903 #ifdef _LP64 904 const Register thread = r15_thread; 905 // No need to worry about dummy 906 __ mov(c_rarg0, thread); 907 #else 908 __ push(rax); // push dummy 909 910 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 911 // push java thread (becomes first argument of C function) 912 __ get_thread(thread); 913 __ push(thread); 914 #endif // _LP64 915 __ set_last_Java_frame(thread, noreg, rbp, NULL); 916 // do the call 917 __ call(RuntimeAddress(target)); 918 OopMapSet* oop_maps = new OopMapSet(); 919 oop_maps->add_gc_map(__ offset(), oop_map); 920 // verify callee-saved register 921 #ifdef ASSERT 922 guarantee(thread != rax, "change this code"); 923 __ push(rax); 924 { Label L; 925 __ get_thread(rax); 926 __ cmpptr(thread, rax); 927 __ jcc(Assembler::equal, L); 928 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 929 __ bind(L); 930 } 931 __ pop(rax); 932 #endif 933 __ reset_last_Java_frame(thread, true, false); 934 #ifndef _LP64 935 __ pop(rcx); // discard thread arg 936 __ pop(rcx); // discard dummy 937 #endif // _LP64 938 939 // check for pending exceptions 940 { Label L; 941 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 942 __ jcc(Assembler::equal, L); 943 // exception pending => remove activation and forward to exception handler 944 945 __ testptr(rax, rax); // have we deoptimized? 946 __ jump_cc(Assembler::equal, 947 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 948 949 // the deopt blob expects exceptions in the special fields of 950 // JavaThread, so copy and clear pending exception. 951 952 // load and clear pending exception 953 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 954 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 955 956 // check that there is really a valid exception 957 __ verify_not_null_oop(rax); 958 959 // load throwing pc: this is the return address of the stub 960 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 961 962 #ifdef ASSERT 963 // check that fields in JavaThread for exception oop and issuing pc are empty 964 Label oop_empty; 965 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 966 __ jcc(Assembler::equal, oop_empty); 967 __ stop("exception oop must be empty"); 968 __ bind(oop_empty); 969 970 Label pc_empty; 971 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 972 __ jcc(Assembler::equal, pc_empty); 973 __ stop("exception pc must be empty"); 974 __ bind(pc_empty); 975 #endif 976 977 // store exception oop and throwing pc to JavaThread 978 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 979 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 980 981 restore_live_registers(sasm); 982 983 __ leave(); 984 __ addptr(rsp, BytesPerWord); // remove return address from stack 985 986 // Forward the exception directly to deopt blob. We can blow no 987 // registers and must leave throwing pc on the stack. A patch may 988 // have values live in registers so the entry point with the 989 // exception in tls. 990 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 991 992 __ bind(L); 993 } 994 995 996 // Runtime will return true if the nmethod has been deoptimized during 997 // the patching process. In that case we must do a deopt reexecute instead. 998 999 Label reexecuteEntry, cont; 1000 1001 __ testptr(rax, rax); // have we deoptimized? 1002 __ jcc(Assembler::equal, cont); // no 1003 1004 // Will reexecute. Proper return address is already on the stack we just restore 1005 // registers, pop all of our frame but the return address and jump to the deopt blob 1006 restore_live_registers(sasm); 1007 __ leave(); 1008 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1009 1010 __ bind(cont); 1011 restore_live_registers(sasm); 1012 __ leave(); 1013 __ ret(0); 1014 1015 return oop_maps; 1016 } 1017 1018 1019 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 1020 1021 // for better readability 1022 const bool must_gc_arguments = true; 1023 const bool dont_gc_arguments = false; 1024 1025 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 1026 bool save_fpu_registers = true; 1027 1028 // stub code & info for the different stubs 1029 OopMapSet* oop_maps = NULL; 1030 switch (id) { 1031 case forward_exception_id: 1032 { 1033 oop_maps = generate_handle_exception(id, sasm); 1034 __ leave(); 1035 __ ret(0); 1036 } 1037 break; 1038 1039 case new_instance_id: 1040 case fast_new_instance_id: 1041 case fast_new_instance_init_check_id: 1042 { 1043 Register klass = rdx; // Incoming 1044 Register obj = rax; // Result 1045 1046 if (id == new_instance_id) { 1047 __ set_info("new_instance", dont_gc_arguments); 1048 } else if (id == fast_new_instance_id) { 1049 __ set_info("fast new_instance", dont_gc_arguments); 1050 } else { 1051 assert(id == fast_new_instance_init_check_id, "bad StubID"); 1052 __ set_info("fast new_instance init check", dont_gc_arguments); 1053 } 1054 1055 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 1056 UseTLAB && FastTLABRefill) { 1057 Label slow_path; 1058 Register obj_size = rcx; 1059 Register t1 = rbx; 1060 Register t2 = rsi; 1061 assert_different_registers(klass, obj, obj_size, t1, t2); 1062 1063 __ push(rdi); 1064 __ push(rbx); 1065 1066 if (id == fast_new_instance_init_check_id) { 1067 // make sure the klass is initialized 1068 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1069 __ jcc(Assembler::notEqual, slow_path); 1070 } 1071 1072 #ifdef ASSERT 1073 // assert object can be fast path allocated 1074 { 1075 Label ok, not_ok; 1076 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1077 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1078 __ jcc(Assembler::lessEqual, not_ok); 1079 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1080 __ jcc(Assembler::zero, ok); 1081 __ bind(not_ok); 1082 __ stop("assert(can be fast path allocated)"); 1083 __ should_not_reach_here(); 1084 __ bind(ok); 1085 } 1086 #endif // ASSERT 1087 1088 // if we got here then the TLAB allocation failed, so try 1089 // refilling the TLAB or allocating directly from eden. 1090 Label retry_tlab, try_eden; 1091 const Register thread = 1092 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi 1093 1094 __ bind(retry_tlab); 1095 1096 // get the instance size (size is postive so movl is fine for 64bit) 1097 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1098 1099 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 1100 1101 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1102 __ verify_oop(obj); 1103 __ pop(rbx); 1104 __ pop(rdi); 1105 __ ret(0); 1106 1107 __ bind(try_eden); 1108 // get the instance size (size is postive so movl is fine for 64bit) 1109 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1110 1111 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1112 __ incr_allocated_bytes(thread, obj_size, 0); 1113 1114 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1115 __ verify_oop(obj); 1116 __ pop(rbx); 1117 __ pop(rdi); 1118 __ ret(0); 1119 1120 __ bind(slow_path); 1121 __ pop(rbx); 1122 __ pop(rdi); 1123 } 1124 1125 __ enter(); 1126 OopMap* map = save_live_registers(sasm, 2); 1127 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1128 oop_maps = new OopMapSet(); 1129 oop_maps->add_gc_map(call_offset, map); 1130 restore_live_registers_except_rax(sasm); 1131 __ verify_oop(obj); 1132 __ leave(); 1133 __ ret(0); 1134 1135 // rax,: new instance 1136 } 1137 1138 break; 1139 1140 case counter_overflow_id: 1141 { 1142 Register bci = rax, method = rbx; 1143 __ enter(); 1144 OopMap* map = save_live_registers(sasm, 3); 1145 // Retrieve bci 1146 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1147 // And a pointer to the Method* 1148 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1149 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1150 oop_maps = new OopMapSet(); 1151 oop_maps->add_gc_map(call_offset, map); 1152 restore_live_registers(sasm); 1153 __ leave(); 1154 __ ret(0); 1155 } 1156 break; 1157 1158 case new_type_array_id: 1159 case new_object_array_id: 1160 { 1161 Register length = rbx; // Incoming 1162 Register klass = rdx; // Incoming 1163 Register obj = rax; // Result 1164 1165 if (id == new_type_array_id) { 1166 __ set_info("new_type_array", dont_gc_arguments); 1167 } else { 1168 __ set_info("new_object_array", dont_gc_arguments); 1169 } 1170 1171 #ifdef ASSERT 1172 // assert object type is really an array of the proper kind 1173 { 1174 Label ok; 1175 Register t0 = obj; 1176 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1177 __ sarl(t0, Klass::_lh_array_tag_shift); 1178 int tag = ((id == new_type_array_id) 1179 ? Klass::_lh_array_tag_type_value 1180 : Klass::_lh_array_tag_obj_value); 1181 __ cmpl(t0, tag); 1182 __ jcc(Assembler::equal, ok); 1183 __ stop("assert(is an array klass)"); 1184 __ should_not_reach_here(); 1185 __ bind(ok); 1186 } 1187 #endif // ASSERT 1188 1189 if (UseTLAB && FastTLABRefill) { 1190 Register arr_size = rsi; 1191 Register t1 = rcx; // must be rcx for use as shift count 1192 Register t2 = rdi; 1193 Label slow_path; 1194 assert_different_registers(length, klass, obj, arr_size, t1, t2); 1195 1196 // check that array length is small enough for fast path. 1197 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); 1198 __ jcc(Assembler::above, slow_path); 1199 1200 // if we got here then the TLAB allocation failed, so try 1201 // refilling the TLAB or allocating directly from eden. 1202 Label retry_tlab, try_eden; 1203 const Register thread = 1204 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi 1205 1206 __ bind(retry_tlab); 1207 1208 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1209 // since size is positive movl does right thing on 64bit 1210 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1211 // since size is postive movl does right thing on 64bit 1212 __ movl(arr_size, length); 1213 assert(t1 == rcx, "fixed register usage"); 1214 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1215 __ shrptr(t1, Klass::_lh_header_size_shift); 1216 __ andptr(t1, Klass::_lh_header_size_mask); 1217 __ addptr(arr_size, t1); 1218 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1219 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1220 1221 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size 1222 1223 __ initialize_header(obj, klass, length, t1, t2); 1224 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1225 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1226 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1227 __ andptr(t1, Klass::_lh_header_size_mask); 1228 __ subptr(arr_size, t1); // body length 1229 __ addptr(t1, obj); // body start 1230 __ initialize_body(t1, arr_size, 0, t2); 1231 __ verify_oop(obj); 1232 __ ret(0); 1233 1234 __ bind(try_eden); 1235 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1236 // since size is positive movl does right thing on 64bit 1237 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1238 // since size is postive movl does right thing on 64bit 1239 __ movl(arr_size, length); 1240 assert(t1 == rcx, "fixed register usage"); 1241 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1242 __ shrptr(t1, Klass::_lh_header_size_shift); 1243 __ andptr(t1, Klass::_lh_header_size_mask); 1244 __ addptr(arr_size, t1); 1245 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1246 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1247 1248 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1249 __ incr_allocated_bytes(thread, arr_size, 0); 1250 1251 __ initialize_header(obj, klass, length, t1, t2); 1252 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1253 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1254 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1255 __ andptr(t1, Klass::_lh_header_size_mask); 1256 __ subptr(arr_size, t1); // body length 1257 __ addptr(t1, obj); // body start 1258 __ initialize_body(t1, arr_size, 0, t2); 1259 __ verify_oop(obj); 1260 __ ret(0); 1261 1262 __ bind(slow_path); 1263 } 1264 1265 __ enter(); 1266 OopMap* map = save_live_registers(sasm, 3); 1267 int call_offset; 1268 if (id == new_type_array_id) { 1269 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1270 } else { 1271 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1272 } 1273 1274 oop_maps = new OopMapSet(); 1275 oop_maps->add_gc_map(call_offset, map); 1276 restore_live_registers_except_rax(sasm); 1277 1278 __ verify_oop(obj); 1279 __ leave(); 1280 __ ret(0); 1281 1282 // rax,: new array 1283 } 1284 break; 1285 1286 case new_multi_array_id: 1287 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1288 // rax,: klass 1289 // rbx,: rank 1290 // rcx: address of 1st dimension 1291 OopMap* map = save_live_registers(sasm, 4); 1292 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1293 1294 oop_maps = new OopMapSet(); 1295 oop_maps->add_gc_map(call_offset, map); 1296 restore_live_registers_except_rax(sasm); 1297 1298 // rax,: new multi array 1299 __ verify_oop(rax); 1300 } 1301 break; 1302 1303 case register_finalizer_id: 1304 { 1305 __ set_info("register_finalizer", dont_gc_arguments); 1306 1307 // This is called via call_runtime so the arguments 1308 // will be place in C abi locations 1309 1310 #ifdef _LP64 1311 __ verify_oop(c_rarg0); 1312 __ mov(rax, c_rarg0); 1313 #else 1314 // The object is passed on the stack and we haven't pushed a 1315 // frame yet so it's one work away from top of stack. 1316 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1317 __ verify_oop(rax); 1318 #endif // _LP64 1319 1320 // load the klass and check the has finalizer flag 1321 Label register_finalizer; 1322 Register t = rsi; 1323 __ load_klass(t, rax); 1324 __ movl(t, Address(t, Klass::access_flags_offset())); 1325 __ testl(t, JVM_ACC_HAS_FINALIZER); 1326 __ jcc(Assembler::notZero, register_finalizer); 1327 __ ret(0); 1328 1329 __ bind(register_finalizer); 1330 __ enter(); 1331 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1332 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1333 oop_maps = new OopMapSet(); 1334 oop_maps->add_gc_map(call_offset, oop_map); 1335 1336 // Now restore all the live registers 1337 restore_live_registers(sasm); 1338 1339 __ leave(); 1340 __ ret(0); 1341 } 1342 break; 1343 1344 case throw_range_check_failed_id: 1345 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1346 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1347 } 1348 break; 1349 1350 case throw_index_exception_id: 1351 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1352 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1353 } 1354 break; 1355 1356 case throw_div0_exception_id: 1357 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1358 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1359 } 1360 break; 1361 1362 case throw_null_pointer_exception_id: 1363 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1364 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1365 } 1366 break; 1367 1368 case handle_exception_nofpu_id: 1369 case handle_exception_id: 1370 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1371 oop_maps = generate_handle_exception(id, sasm); 1372 } 1373 break; 1374 1375 case handle_exception_from_callee_id: 1376 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1377 oop_maps = generate_handle_exception(id, sasm); 1378 } 1379 break; 1380 1381 case unwind_exception_id: 1382 { __ set_info("unwind_exception", dont_gc_arguments); 1383 // note: no stubframe since we are about to leave the current 1384 // activation and we are calling a leaf VM function only. 1385 generate_unwind_exception(sasm); 1386 } 1387 break; 1388 1389 case throw_array_store_exception_id: 1390 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1391 // tos + 0: link 1392 // + 1: return address 1393 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1394 } 1395 break; 1396 1397 case throw_class_cast_exception_id: 1398 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1399 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1400 } 1401 break; 1402 1403 case throw_incompatible_class_change_error_id: 1404 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1405 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1406 } 1407 break; 1408 1409 case slow_subtype_check_id: 1410 { 1411 // Typical calling sequence: 1412 // __ push(klass_RInfo); // object klass or other subclass 1413 // __ push(sup_k_RInfo); // array element klass or other superclass 1414 // __ call(slow_subtype_check); 1415 // Note that the subclass is pushed first, and is therefore deepest. 1416 // Previous versions of this code reversed the names 'sub' and 'super'. 1417 // This was operationally harmless but made the code unreadable. 1418 enum layout { 1419 rax_off, SLOT2(raxH_off) 1420 rcx_off, SLOT2(rcxH_off) 1421 rsi_off, SLOT2(rsiH_off) 1422 rdi_off, SLOT2(rdiH_off) 1423 // saved_rbp_off, SLOT2(saved_rbpH_off) 1424 return_off, SLOT2(returnH_off) 1425 sup_k_off, SLOT2(sup_kH_off) 1426 klass_off, SLOT2(superH_off) 1427 framesize, 1428 result_off = klass_off // deepest argument is also the return value 1429 }; 1430 1431 __ set_info("slow_subtype_check", dont_gc_arguments); 1432 __ push(rdi); 1433 __ push(rsi); 1434 __ push(rcx); 1435 __ push(rax); 1436 1437 // This is called by pushing args and not with C abi 1438 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1439 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1440 1441 Label miss; 1442 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1443 1444 // fallthrough on success: 1445 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1446 __ pop(rax); 1447 __ pop(rcx); 1448 __ pop(rsi); 1449 __ pop(rdi); 1450 __ ret(0); 1451 1452 __ bind(miss); 1453 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1454 __ pop(rax); 1455 __ pop(rcx); 1456 __ pop(rsi); 1457 __ pop(rdi); 1458 __ ret(0); 1459 } 1460 break; 1461 1462 case monitorenter_nofpu_id: 1463 save_fpu_registers = false; 1464 // fall through 1465 case monitorenter_id: 1466 { 1467 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1468 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1469 1470 // Called with store_parameter and not C abi 1471 1472 f.load_argument(1, rax); // rax,: object 1473 f.load_argument(0, rbx); // rbx,: lock address 1474 1475 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1476 1477 oop_maps = new OopMapSet(); 1478 oop_maps->add_gc_map(call_offset, map); 1479 restore_live_registers(sasm, save_fpu_registers); 1480 } 1481 break; 1482 1483 case monitorexit_nofpu_id: 1484 save_fpu_registers = false; 1485 // fall through 1486 case monitorexit_id: 1487 { 1488 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1489 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1490 1491 // Called with store_parameter and not C abi 1492 1493 f.load_argument(0, rax); // rax,: lock address 1494 1495 // note: really a leaf routine but must setup last java sp 1496 // => use call_RT for now (speed can be improved by 1497 // doing last java sp setup manually) 1498 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1499 1500 oop_maps = new OopMapSet(); 1501 oop_maps->add_gc_map(call_offset, map); 1502 restore_live_registers(sasm, save_fpu_registers); 1503 } 1504 break; 1505 1506 case deoptimize_id: 1507 { 1508 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1509 const int num_rt_args = 2; // thread, trap_request 1510 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1511 f.load_argument(0, rax); 1512 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1513 oop_maps = new OopMapSet(); 1514 oop_maps->add_gc_map(call_offset, oop_map); 1515 restore_live_registers(sasm); 1516 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1517 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1518 __ leave(); 1519 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1520 } 1521 break; 1522 1523 case access_field_patching_id: 1524 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1525 // we should set up register map 1526 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1527 } 1528 break; 1529 1530 case load_klass_patching_id: 1531 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1532 // we should set up register map 1533 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1534 } 1535 break; 1536 1537 case load_mirror_patching_id: 1538 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1539 // we should set up register map 1540 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1541 } 1542 break; 1543 1544 case load_appendix_patching_id: 1545 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1546 // we should set up register map 1547 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1548 } 1549 break; 1550 1551 case dtrace_object_alloc_id: 1552 { // rax,: object 1553 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1554 // we can't gc here so skip the oopmap but make sure that all 1555 // the live registers get saved. 1556 save_live_registers(sasm, 1); 1557 1558 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1559 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1560 NOT_LP64(__ pop(rax)); 1561 1562 restore_live_registers(sasm); 1563 } 1564 break; 1565 1566 case fpu2long_stub_id: 1567 { 1568 // rax, and rdx are destroyed, but should be free since the result is returned there 1569 // preserve rsi,ecx 1570 __ push(rsi); 1571 __ push(rcx); 1572 LP64_ONLY(__ push(rdx);) 1573 1574 // check for NaN 1575 Label return0, do_return, return_min_jlong, do_convert; 1576 1577 Address value_high_word(rsp, wordSize + 4); 1578 Address value_low_word(rsp, wordSize); 1579 Address result_high_word(rsp, 3*wordSize + 4); 1580 Address result_low_word(rsp, 3*wordSize); 1581 1582 __ subptr(rsp, 32); // more than enough on 32bit 1583 __ fst_d(value_low_word); 1584 __ movl(rax, value_high_word); 1585 __ andl(rax, 0x7ff00000); 1586 __ cmpl(rax, 0x7ff00000); 1587 __ jcc(Assembler::notEqual, do_convert); 1588 __ movl(rax, value_high_word); 1589 __ andl(rax, 0xfffff); 1590 __ orl(rax, value_low_word); 1591 __ jcc(Assembler::notZero, return0); 1592 1593 __ bind(do_convert); 1594 __ fnstcw(Address(rsp, 0)); 1595 __ movzwl(rax, Address(rsp, 0)); 1596 __ orl(rax, 0xc00); 1597 __ movw(Address(rsp, 2), rax); 1598 __ fldcw(Address(rsp, 2)); 1599 __ fwait(); 1600 __ fistp_d(result_low_word); 1601 __ fldcw(Address(rsp, 0)); 1602 __ fwait(); 1603 // This gets the entire long in rax on 64bit 1604 __ movptr(rax, result_low_word); 1605 // testing of high bits 1606 __ movl(rdx, result_high_word); 1607 __ mov(rcx, rax); 1608 // What the heck is the point of the next instruction??? 1609 __ xorl(rcx, 0x0); 1610 __ movl(rsi, 0x80000000); 1611 __ xorl(rsi, rdx); 1612 __ orl(rcx, rsi); 1613 __ jcc(Assembler::notEqual, do_return); 1614 __ fldz(); 1615 __ fcomp_d(value_low_word); 1616 __ fnstsw_ax(); 1617 #ifdef _LP64 1618 __ testl(rax, 0x4100); // ZF & CF == 0 1619 __ jcc(Assembler::equal, return_min_jlong); 1620 #else 1621 __ sahf(); 1622 __ jcc(Assembler::above, return_min_jlong); 1623 #endif // _LP64 1624 // return max_jlong 1625 #ifndef _LP64 1626 __ movl(rdx, 0x7fffffff); 1627 __ movl(rax, 0xffffffff); 1628 #else 1629 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1630 #endif // _LP64 1631 __ jmp(do_return); 1632 1633 __ bind(return_min_jlong); 1634 #ifndef _LP64 1635 __ movl(rdx, 0x80000000); 1636 __ xorl(rax, rax); 1637 #else 1638 __ mov64(rax, UCONST64(0x8000000000000000)); 1639 #endif // _LP64 1640 __ jmp(do_return); 1641 1642 __ bind(return0); 1643 __ fpop(); 1644 #ifndef _LP64 1645 __ xorptr(rdx,rdx); 1646 __ xorptr(rax,rax); 1647 #else 1648 __ xorptr(rax, rax); 1649 #endif // _LP64 1650 1651 __ bind(do_return); 1652 __ addptr(rsp, 32); 1653 LP64_ONLY(__ pop(rdx);) 1654 __ pop(rcx); 1655 __ pop(rsi); 1656 __ ret(0); 1657 } 1658 break; 1659 1660 #if INCLUDE_ALL_GCS 1661 case shenandoah_write_barrier_slow_id: 1662 { 1663 StubFrame f(sasm, "shenandoah_write_barrier", dont_gc_arguments); 1664 1665 save_live_registers(sasm, 1); 1666 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::write_barrier_c1), r15_thread, rax); 1667 restore_live_registers_except_rax(sasm); 1668 __ verify_oop(rax); 1669 1670 } 1671 break; 1672 case g1_pre_barrier_slow_id: 1673 { 1674 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1675 // arg0 : previous value of memory 1676 1677 BarrierSet* bs = Universe::heap()->barrier_set(); 1678 if (bs->kind() != BarrierSet::G1SATBCTLogging && bs->kind() != BarrierSet::ShenandoahBarrierSet) { 1679 __ movptr(rax, (int)id); 1680 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1681 __ should_not_reach_here(); 1682 break; 1683 } 1684 __ push(rax); 1685 __ push(rdx); 1686 1687 const Register pre_val = rax; 1688 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1689 const Register tmp = rdx; 1690 1691 NOT_LP64(__ get_thread(thread);) 1692 1693 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1694 PtrQueue::byte_offset_of_index())); 1695 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1696 PtrQueue::byte_offset_of_buf())); 1697 1698 Label done; 1699 Label runtime; 1700 1701 // Can we store original value in the thread's buffer? 1702 1703 __ movptr(tmp, queue_index); 1704 __ testptr(tmp, tmp); 1705 __ jcc(Assembler::zero, runtime); 1706 __ subptr(tmp, wordSize); 1707 __ movptr(queue_index, tmp); 1708 __ addptr(tmp, buffer); 1709 1710 // prev_val (rax) 1711 f.load_argument(0, pre_val); 1712 __ movptr(Address(tmp, 0), pre_val); 1713 __ jmp(done); 1714 1715 __ bind(runtime); 1716 __ push(rcx); 1717 #ifdef _LP64 1718 __ push(r8); 1719 __ push(r9); 1720 __ push(r10); 1721 __ push(r11); 1722 # ifndef _WIN64 1723 __ push(rdi); 1724 __ push(rsi); 1725 # endif 1726 #endif 1727 // load the pre-value 1728 f.load_argument(0, rcx); 1729 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); 1730 #ifdef _LP64 1731 # ifndef _WIN64 1732 __ pop(rsi); 1733 __ pop(rdi); 1734 # endif 1735 __ pop(r11); 1736 __ pop(r10); 1737 __ pop(r9); 1738 __ pop(r8); 1739 #endif 1740 __ pop(rcx); 1741 __ bind(done); 1742 1743 __ pop(rdx); 1744 __ pop(rax); 1745 } 1746 break; 1747 1748 case g1_post_barrier_slow_id: 1749 { 1750 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1751 1752 1753 // arg0: store_address 1754 Address store_addr(rbp, 2*BytesPerWord); 1755 1756 BarrierSet* bs = Universe::heap()->barrier_set(); 1757 if (bs->kind() == BarrierSet::ShenandoahBarrierSet) { 1758 __ movptr(rax, (int)id); 1759 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1760 __ should_not_reach_here(); 1761 break; 1762 } 1763 CardTableModRefBS* ct = 1764 barrier_set_cast<CardTableModRefBS>(bs); 1765 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1766 1767 Label done; 1768 Label enqueued; 1769 Label runtime; 1770 1771 // At this point we know new_value is non-NULL and the new_value crosses regions. 1772 // Must check to see if card is already dirty 1773 1774 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1775 1776 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1777 PtrQueue::byte_offset_of_index())); 1778 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1779 PtrQueue::byte_offset_of_buf())); 1780 1781 __ push(rax); 1782 __ push(rcx); 1783 1784 const Register cardtable = rax; 1785 const Register card_addr = rcx; 1786 1787 f.load_argument(0, card_addr); 1788 __ shrptr(card_addr, CardTableModRefBS::card_shift); 1789 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 1790 // a valid address and therefore is not properly handled by the relocation code. 1791 __ movptr(cardtable, (intptr_t)ct->byte_map_base); 1792 __ addptr(card_addr, cardtable); 1793 1794 NOT_LP64(__ get_thread(thread);) 1795 1796 __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); 1797 __ jcc(Assembler::equal, done); 1798 1799 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 1800 __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 1801 __ jcc(Assembler::equal, done); 1802 1803 // storing region crossing non-NULL, card is clean. 1804 // dirty card and log. 1805 1806 __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 1807 1808 const Register tmp = rdx; 1809 __ push(rdx); 1810 1811 __ movptr(tmp, queue_index); 1812 __ testptr(tmp, tmp); 1813 __ jcc(Assembler::zero, runtime); 1814 __ subptr(tmp, wordSize); 1815 __ movptr(queue_index, tmp); 1816 __ addptr(tmp, buffer); 1817 __ movptr(Address(tmp, 0), card_addr); 1818 __ jmp(enqueued); 1819 1820 __ bind(runtime); 1821 #ifdef _LP64 1822 __ push(r8); 1823 __ push(r9); 1824 __ push(r10); 1825 __ push(r11); 1826 # ifndef _WIN64 1827 __ push(rdi); 1828 __ push(rsi); 1829 # endif 1830 #endif 1831 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1832 #ifdef _LP64 1833 # ifndef _WIN64 1834 __ pop(rsi); 1835 __ pop(rdi); 1836 # endif 1837 __ pop(r11); 1838 __ pop(r10); 1839 __ pop(r9); 1840 __ pop(r8); 1841 #endif 1842 __ bind(enqueued); 1843 __ pop(rdx); 1844 1845 __ bind(done); 1846 __ pop(rcx); 1847 __ pop(rax); 1848 } 1849 break; 1850 #endif // INCLUDE_ALL_GCS 1851 1852 case predicate_failed_trap_id: 1853 { 1854 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1855 1856 OopMap* map = save_live_registers(sasm, 1); 1857 1858 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1859 oop_maps = new OopMapSet(); 1860 oop_maps->add_gc_map(call_offset, map); 1861 restore_live_registers(sasm); 1862 __ leave(); 1863 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1864 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1865 1866 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1867 } 1868 break; 1869 1870 default: 1871 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1872 __ movptr(rax, (int)id); 1873 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1874 __ should_not_reach_here(); 1875 } 1876 break; 1877 } 1878 return oop_maps; 1879 } 1880 1881 #undef __ 1882 1883 const char *Runtime1::pd_name_for_address(address entry) { 1884 return "<unknown function>"; 1885 }