1 /* 2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "oops/compiledICHolder.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "register_x86.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/signature.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "utilities/macros.hpp" 40 #include "vmreg_x86.inline.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 43 #endif 44 45 46 // Implementation of StubAssembler 47 48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 49 // setup registers 50 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 52 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 53 assert(args_size >= 0, "illegal args_size"); 54 bool align_stack = false; 55 #ifdef _LP64 56 // At a method handle call, the stack may not be properly aligned 57 // when returning with an exception. 58 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 59 #endif 60 61 #ifdef _LP64 62 mov(c_rarg0, thread); 63 set_num_rt_args(0); // Nothing on stack 64 #else 65 set_num_rt_args(1 + args_size); 66 67 // push java thread (becomes first argument of C function) 68 get_thread(thread); 69 push(thread); 70 #endif // _LP64 71 72 int call_offset; 73 if (!align_stack) { 74 set_last_Java_frame(thread, noreg, rbp, NULL); 75 } else { 76 address the_pc = pc(); 77 call_offset = offset(); 78 set_last_Java_frame(thread, noreg, rbp, the_pc); 79 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 80 } 81 82 // do the call 83 call(RuntimeAddress(entry)); 84 if (!align_stack) { 85 call_offset = offset(); 86 } 87 // verify callee-saved register 88 #ifdef ASSERT 89 guarantee(thread != rax, "change this code"); 90 push(rax); 91 { Label L; 92 get_thread(rax); 93 cmpptr(thread, rax); 94 jcc(Assembler::equal, L); 95 int3(); 96 stop("StubAssembler::call_RT: rdi not callee saved?"); 97 bind(L); 98 } 99 pop(rax); 100 #endif 101 reset_last_Java_frame(thread, true, align_stack); 102 103 // discard thread and arguments 104 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 105 106 // check for pending exceptions 107 { Label L; 108 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 109 jcc(Assembler::equal, L); 110 // exception pending => remove activation and forward to exception handler 111 movptr(rax, Address(thread, Thread::pending_exception_offset())); 112 // make sure that the vm_results are cleared 113 if (oop_result1->is_valid()) { 114 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 115 } 116 if (metadata_result->is_valid()) { 117 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 118 } 119 if (frame_size() == no_frame_size) { 120 leave(); 121 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 122 } else if (_stub_id == Runtime1::forward_exception_id) { 123 should_not_reach_here(); 124 } else { 125 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 126 } 127 bind(L); 128 } 129 // get oop results if there are any and reset the values in the thread 130 if (oop_result1->is_valid()) { 131 get_vm_result(oop_result1, thread); 132 } 133 if (metadata_result->is_valid()) { 134 get_vm_result_2(metadata_result, thread); 135 } 136 return call_offset; 137 } 138 139 140 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 141 #ifdef _LP64 142 mov(c_rarg1, arg1); 143 #else 144 push(arg1); 145 #endif // _LP64 146 return call_RT(oop_result1, metadata_result, entry, 1); 147 } 148 149 150 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 151 #ifdef _LP64 152 if (c_rarg1 == arg2) { 153 if (c_rarg2 == arg1) { 154 xchgq(arg1, arg2); 155 } else { 156 mov(c_rarg2, arg2); 157 mov(c_rarg1, arg1); 158 } 159 } else { 160 mov(c_rarg1, arg1); 161 mov(c_rarg2, arg2); 162 } 163 #else 164 push(arg2); 165 push(arg1); 166 #endif // _LP64 167 return call_RT(oop_result1, metadata_result, entry, 2); 168 } 169 170 171 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 172 #ifdef _LP64 173 // if there is any conflict use the stack 174 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 175 arg2 == c_rarg1 || arg1 == c_rarg3 || 176 arg3 == c_rarg1 || arg1 == c_rarg2) { 177 push(arg3); 178 push(arg2); 179 push(arg1); 180 pop(c_rarg1); 181 pop(c_rarg2); 182 pop(c_rarg3); 183 } else { 184 mov(c_rarg1, arg1); 185 mov(c_rarg2, arg2); 186 mov(c_rarg3, arg3); 187 } 188 #else 189 push(arg3); 190 push(arg2); 191 push(arg1); 192 #endif // _LP64 193 return call_RT(oop_result1, metadata_result, entry, 3); 194 } 195 196 197 // Implementation of StubFrame 198 199 class StubFrame: public StackObj { 200 private: 201 StubAssembler* _sasm; 202 203 public: 204 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 205 void load_argument(int offset_in_words, Register reg); 206 207 ~StubFrame(); 208 }; 209 210 211 #define __ _sasm-> 212 213 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 214 _sasm = sasm; 215 __ set_info(name, must_gc_arguments); 216 __ enter(); 217 } 218 219 // load parameters that were stored with LIR_Assembler::store_parameter 220 // Note: offsets for store_parameter and load_argument must match 221 void StubFrame::load_argument(int offset_in_words, Register reg) { 222 // rbp, + 0: link 223 // + 1: return address 224 // + 2: argument with offset 0 225 // + 3: argument with offset 1 226 // + 4: ... 227 228 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 229 } 230 231 232 StubFrame::~StubFrame() { 233 __ leave(); 234 __ ret(0); 235 } 236 237 #undef __ 238 239 240 // Implementation of Runtime1 241 242 #define __ sasm-> 243 244 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 245 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 246 247 // Stack layout for saving/restoring all the registers needed during a runtime 248 // call (this includes deoptimization) 249 // Note: note that users of this frame may well have arguments to some runtime 250 // while these values are on the stack. These positions neglect those arguments 251 // but the code in save_live_registers will take the argument count into 252 // account. 253 // 254 #ifdef _LP64 255 #define SLOT2(x) x, 256 #define SLOT_PER_WORD 2 257 #else 258 #define SLOT2(x) 259 #define SLOT_PER_WORD 1 260 #endif // _LP64 261 262 enum reg_save_layout { 263 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 264 // happen and will assert if the stack size we create is misaligned 265 #ifdef _LP64 266 align_dummy_0, align_dummy_1, 267 #endif // _LP64 268 #ifdef _WIN64 269 // Windows always allocates space for it's argument registers (see 270 // frame::arg_reg_save_area_bytes). 271 arg_reg_save_1, arg_reg_save_1H, // 0, 4 272 arg_reg_save_2, arg_reg_save_2H, // 8, 12 273 arg_reg_save_3, arg_reg_save_3H, // 16, 20 274 arg_reg_save_4, arg_reg_save_4H, // 24, 28 275 #endif // _WIN64 276 xmm_regs_as_doubles_off, // 32 277 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 278 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 279 // fpu_state_end_off is exclusive 280 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 281 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 282 extra_space_offset, // 360 283 #ifdef _LP64 284 r15_off = extra_space_offset, r15H_off, // 360, 364 285 r14_off, r14H_off, // 368, 372 286 r13_off, r13H_off, // 376, 380 287 r12_off, r12H_off, // 384, 388 288 r11_off, r11H_off, // 392, 396 289 r10_off, r10H_off, // 400, 404 290 r9_off, r9H_off, // 408, 412 291 r8_off, r8H_off, // 416, 420 292 rdi_off, rdiH_off, // 424, 428 293 #else 294 rdi_off = extra_space_offset, 295 #endif // _LP64 296 rsi_off, SLOT2(rsiH_off) // 432, 436 297 rbp_off, SLOT2(rbpH_off) // 440, 444 298 rsp_off, SLOT2(rspH_off) // 448, 452 299 rbx_off, SLOT2(rbxH_off) // 456, 460 300 rdx_off, SLOT2(rdxH_off) // 464, 468 301 rcx_off, SLOT2(rcxH_off) // 472, 476 302 rax_off, SLOT2(raxH_off) // 480, 484 303 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 304 return_off, SLOT2(returnH_off) // 496, 500 305 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 306 }; 307 308 309 310 // Save off registers which might be killed by calls into the runtime. 311 // Tries to smart of about FP registers. In particular we separate 312 // saving and describing the FPU registers for deoptimization since we 313 // have to save the FPU registers twice if we describe them and on P4 314 // saving FPU registers which don't contain anything appears 315 // expensive. The deopt blob is the only thing which needs to 316 // describe FPU registers. In all other cases it should be sufficient 317 // to simply save their current value. 318 319 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 320 bool save_fpu_registers = true) { 321 322 // In 64bit all the args are in regs so there are no additional stack slots 323 LP64_ONLY(num_rt_args = 0); 324 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 325 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 326 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 327 328 // record saved value locations in an OopMap 329 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 330 OopMap* map = new OopMap(frame_size_in_slots, 0); 331 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 332 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 333 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 334 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 335 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 336 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 337 #ifdef _LP64 338 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 339 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 340 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 341 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 342 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 343 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 344 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 345 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 346 347 // This is stupid but needed. 348 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 349 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 350 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 351 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 352 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 353 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 354 355 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 356 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 357 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 358 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 359 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 360 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 361 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 362 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 363 #endif // _LP64 364 365 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 366 #ifdef _LP64 367 if (UseAVX < 3) { 368 xmm_bypass_limit = xmm_bypass_limit / 2; 369 } 370 #endif 371 372 if (save_fpu_registers) { 373 if (UseSSE < 2) { 374 int fpu_off = float_regs_as_doubles_off; 375 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 376 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 377 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 378 // %%% This is really a waste but we'll keep things as they were for now 379 if (true) { 380 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 381 } 382 fpu_off += 2; 383 } 384 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 385 } 386 387 if (UseSSE >= 2) { 388 int xmm_off = xmm_regs_as_doubles_off; 389 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 390 if (n < xmm_bypass_limit) { 391 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 392 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 393 // %%% This is really a waste but we'll keep things as they were for now 394 if (true) { 395 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 396 } 397 } 398 xmm_off += 2; 399 } 400 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 401 402 } else if (UseSSE == 1) { 403 int xmm_off = xmm_regs_as_doubles_off; 404 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 405 if (n < xmm_bypass_limit) { 406 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 407 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 408 } 409 xmm_off += 2; 410 } 411 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 412 } 413 } 414 415 return map; 416 } 417 418 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 419 bool save_fpu_registers = true) { 420 __ block_comment("save_live_registers"); 421 422 __ pusha(); // integer registers 423 424 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 425 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 426 427 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 428 429 #ifdef ASSERT 430 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 431 #endif 432 433 if (save_fpu_registers) { 434 if (UseSSE < 2) { 435 // save FPU stack 436 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 437 __ fwait(); 438 439 #ifdef ASSERT 440 Label ok; 441 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 442 __ jccb(Assembler::equal, ok); 443 __ stop("corrupted control word detected"); 444 __ bind(ok); 445 #endif 446 447 // Reset the control word to guard against exceptions being unmasked 448 // since fstp_d can cause FPU stack underflow exceptions. Write it 449 // into the on stack copy and then reload that to make sure that the 450 // current and future values are correct. 451 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 452 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 453 454 // Save the FPU registers in de-opt-able form 455 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 456 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 457 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 458 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 459 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 460 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 461 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 462 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 463 } 464 465 if (UseSSE >= 2) { 466 // save XMM registers 467 // XMM registers can contain float or double values, but this is not known here, 468 // so always save them as doubles. 469 // note that float values are _not_ converted automatically, so for float values 470 // the second word contains only garbage data. 471 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); 472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); 473 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); 474 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); 475 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); 476 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); 477 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); 478 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); 479 #ifdef _LP64 480 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); 481 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); 482 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); 483 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); 484 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); 485 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); 486 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); 487 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); 488 if (UseAVX > 2) { 489 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 128), xmm16); 490 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 136), xmm17); 491 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 144), xmm18); 492 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 152), xmm19); 493 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 160), xmm20); 494 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 168), xmm21); 495 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 176), xmm22); 496 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 184), xmm23); 497 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 192), xmm24); 498 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 200), xmm25); 499 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 208), xmm26); 500 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 216), xmm27); 501 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 224), xmm28); 502 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 232), xmm29); 503 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 240), xmm30); 504 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 248), xmm31); 505 } 506 #endif // _LP64 507 } else if (UseSSE == 1) { 508 // save XMM registers as float because double not supported without SSE2 509 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); 510 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); 511 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); 512 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); 513 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); 514 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); 515 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); 516 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); 517 } 518 } 519 520 // FPU stack must be empty now 521 __ verify_FPU(0, "save_live_registers"); 522 523 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 524 } 525 526 527 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { 528 if (restore_fpu_registers) { 529 if (UseSSE >= 2) { 530 // restore XMM registers 531 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 532 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 533 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 534 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 535 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 536 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 537 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 538 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 539 #ifdef _LP64 540 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); 541 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); 542 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); 543 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); 544 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); 545 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); 546 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); 547 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); 548 if (UseAVX > 2) { 549 __ movdbl(xmm16, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 128)); 550 __ movdbl(xmm17, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 136)); 551 __ movdbl(xmm18, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 144)); 552 __ movdbl(xmm19, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 152)); 553 __ movdbl(xmm20, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 160)); 554 __ movdbl(xmm21, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 168)); 555 __ movdbl(xmm22, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 176)); 556 __ movdbl(xmm23, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 184)); 557 __ movdbl(xmm24, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 192)); 558 __ movdbl(xmm25, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 200)); 559 __ movdbl(xmm26, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 208)); 560 __ movdbl(xmm27, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 216)); 561 __ movdbl(xmm28, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 224)); 562 __ movdbl(xmm29, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 232)); 563 __ movdbl(xmm30, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 240)); 564 __ movdbl(xmm31, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 248)); 565 } 566 #endif // _LP64 567 } else if (UseSSE == 1) { 568 // restore XMM registers 569 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 570 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 571 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 572 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 573 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 574 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 575 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 576 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 577 } 578 579 if (UseSSE < 2) { 580 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 581 } else { 582 // check that FPU stack is really empty 583 __ verify_FPU(0, "restore_live_registers"); 584 } 585 586 } else { 587 // check that FPU stack is really empty 588 __ verify_FPU(0, "restore_live_registers"); 589 } 590 591 #ifdef ASSERT 592 { 593 Label ok; 594 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 595 __ jcc(Assembler::equal, ok); 596 __ stop("bad offsets in frame"); 597 __ bind(ok); 598 } 599 #endif // ASSERT 600 601 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 602 } 603 604 605 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 606 __ block_comment("restore_live_registers"); 607 608 restore_fpu(sasm, restore_fpu_registers); 609 __ popa(); 610 } 611 612 613 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 614 __ block_comment("restore_live_registers_except_rax"); 615 616 restore_fpu(sasm, restore_fpu_registers); 617 618 #ifdef _LP64 619 __ movptr(r15, Address(rsp, 0)); 620 __ movptr(r14, Address(rsp, wordSize)); 621 __ movptr(r13, Address(rsp, 2 * wordSize)); 622 __ movptr(r12, Address(rsp, 3 * wordSize)); 623 __ movptr(r11, Address(rsp, 4 * wordSize)); 624 __ movptr(r10, Address(rsp, 5 * wordSize)); 625 __ movptr(r9, Address(rsp, 6 * wordSize)); 626 __ movptr(r8, Address(rsp, 7 * wordSize)); 627 __ movptr(rdi, Address(rsp, 8 * wordSize)); 628 __ movptr(rsi, Address(rsp, 9 * wordSize)); 629 __ movptr(rbp, Address(rsp, 10 * wordSize)); 630 // skip rsp 631 __ movptr(rbx, Address(rsp, 12 * wordSize)); 632 __ movptr(rdx, Address(rsp, 13 * wordSize)); 633 __ movptr(rcx, Address(rsp, 14 * wordSize)); 634 635 __ addptr(rsp, 16 * wordSize); 636 #else 637 638 __ pop(rdi); 639 __ pop(rsi); 640 __ pop(rbp); 641 __ pop(rbx); // skip this value 642 __ pop(rbx); 643 __ pop(rdx); 644 __ pop(rcx); 645 __ addptr(rsp, BytesPerWord); 646 #endif // _LP64 647 } 648 649 650 void Runtime1::initialize_pd() { 651 // nothing to do 652 } 653 654 655 // target: the entry point of the method that creates and posts the exception oop 656 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 657 658 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 659 // preserve all registers 660 int num_rt_args = has_argument ? 2 : 1; 661 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 662 663 // now all registers are saved and can be used freely 664 // verify that no old value is used accidentally 665 __ invalidate_registers(true, true, true, true, true, true); 666 667 // registers used by this stub 668 const Register temp_reg = rbx; 669 670 // load argument for exception that is passed as an argument into the stub 671 if (has_argument) { 672 #ifdef _LP64 673 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 674 #else 675 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 676 __ push(temp_reg); 677 #endif // _LP64 678 } 679 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 680 681 OopMapSet* oop_maps = new OopMapSet(); 682 oop_maps->add_gc_map(call_offset, oop_map); 683 684 __ stop("should not reach here"); 685 686 return oop_maps; 687 } 688 689 690 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 691 __ block_comment("generate_handle_exception"); 692 693 // incoming parameters 694 const Register exception_oop = rax; 695 const Register exception_pc = rdx; 696 // other registers used in this stub 697 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 698 699 // Save registers, if required. 700 OopMapSet* oop_maps = new OopMapSet(); 701 OopMap* oop_map = NULL; 702 switch (id) { 703 case forward_exception_id: 704 // We're handling an exception in the context of a compiled frame. 705 // The registers have been saved in the standard places. Perform 706 // an exception lookup in the caller and dispatch to the handler 707 // if found. Otherwise unwind and dispatch to the callers 708 // exception handler. 709 oop_map = generate_oop_map(sasm, 1 /*thread*/); 710 711 // load and clear pending exception oop into RAX 712 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 713 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 714 715 // load issuing PC (the return address for this stub) into rdx 716 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 717 718 // make sure that the vm_results are cleared (may be unnecessary) 719 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 720 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 721 break; 722 case handle_exception_nofpu_id: 723 case handle_exception_id: 724 // At this point all registers MAY be live. 725 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 726 break; 727 case handle_exception_from_callee_id: { 728 // At this point all registers except exception oop (RAX) and 729 // exception pc (RDX) are dead. 730 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 731 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 732 sasm->set_frame_size(frame_size); 733 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 734 break; 735 } 736 default: ShouldNotReachHere(); 737 } 738 739 #ifdef TIERED 740 // C2 can leave the fpu stack dirty 741 if (UseSSE < 2) { 742 __ empty_FPU_stack(); 743 } 744 #endif // TIERED 745 746 // verify that only rax, and rdx is valid at this time 747 __ invalidate_registers(false, true, true, false, true, true); 748 // verify that rax, contains a valid exception 749 __ verify_not_null_oop(exception_oop); 750 751 // load address of JavaThread object for thread-local data 752 NOT_LP64(__ get_thread(thread);) 753 754 #ifdef ASSERT 755 // check that fields in JavaThread for exception oop and issuing pc are 756 // empty before writing to them 757 Label oop_empty; 758 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 759 __ jcc(Assembler::equal, oop_empty); 760 __ stop("exception oop already set"); 761 __ bind(oop_empty); 762 763 Label pc_empty; 764 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 765 __ jcc(Assembler::equal, pc_empty); 766 __ stop("exception pc already set"); 767 __ bind(pc_empty); 768 #endif 769 770 // save exception oop and issuing pc into JavaThread 771 // (exception handler will load it from here) 772 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 773 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 774 775 // patch throwing pc into return address (has bci & oop map) 776 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 777 778 // compute the exception handler. 779 // the exception oop and the throwing pc are read from the fields in JavaThread 780 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 781 oop_maps->add_gc_map(call_offset, oop_map); 782 783 // rax: handler address 784 // will be the deopt blob if nmethod was deoptimized while we looked up 785 // handler regardless of whether handler existed in the nmethod. 786 787 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 788 __ invalidate_registers(false, true, true, true, true, true); 789 790 // patch the return address, this stub will directly return to the exception handler 791 __ movptr(Address(rbp, 1*BytesPerWord), rax); 792 793 switch (id) { 794 case forward_exception_id: 795 case handle_exception_nofpu_id: 796 case handle_exception_id: 797 // Restore the registers that were saved at the beginning. 798 restore_live_registers(sasm, id != handle_exception_nofpu_id); 799 break; 800 case handle_exception_from_callee_id: 801 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 802 // since we do a leave anyway. 803 804 // Pop the return address since we are possibly changing SP (restoring from BP). 805 __ leave(); 806 __ pop(rcx); 807 808 // Restore SP from BP if the exception PC is a method handle call site. 809 NOT_LP64(__ get_thread(thread);) 810 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 811 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 812 __ jmp(rcx); // jump to exception handler 813 break; 814 default: ShouldNotReachHere(); 815 } 816 817 return oop_maps; 818 } 819 820 821 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 822 // incoming parameters 823 const Register exception_oop = rax; 824 // callee-saved copy of exception_oop during runtime call 825 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 826 // other registers used in this stub 827 const Register exception_pc = rdx; 828 const Register handler_addr = rbx; 829 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 830 831 // verify that only rax, is valid at this time 832 __ invalidate_registers(false, true, true, true, true, true); 833 834 #ifdef ASSERT 835 // check that fields in JavaThread for exception oop and issuing pc are empty 836 NOT_LP64(__ get_thread(thread);) 837 Label oop_empty; 838 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 839 __ jcc(Assembler::equal, oop_empty); 840 __ stop("exception oop must be empty"); 841 __ bind(oop_empty); 842 843 Label pc_empty; 844 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 845 __ jcc(Assembler::equal, pc_empty); 846 __ stop("exception pc must be empty"); 847 __ bind(pc_empty); 848 #endif 849 850 // clear the FPU stack in case any FPU results are left behind 851 __ empty_FPU_stack(); 852 853 // save exception_oop in callee-saved register to preserve it during runtime calls 854 __ verify_not_null_oop(exception_oop); 855 __ movptr(exception_oop_callee_saved, exception_oop); 856 857 NOT_LP64(__ get_thread(thread);) 858 // Get return address (is on top of stack after leave). 859 __ movptr(exception_pc, Address(rsp, 0)); 860 861 // search the exception handler address of the caller (using the return address) 862 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 863 // rax: exception handler address of the caller 864 865 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 866 __ invalidate_registers(false, true, true, true, false, true); 867 868 // move result of call into correct register 869 __ movptr(handler_addr, rax); 870 871 // Restore exception oop to RAX (required convention of exception handler). 872 __ movptr(exception_oop, exception_oop_callee_saved); 873 874 // verify that there is really a valid exception in rax 875 __ verify_not_null_oop(exception_oop); 876 877 // get throwing pc (= return address). 878 // rdx has been destroyed by the call, so it must be set again 879 // the pop is also necessary to simulate the effect of a ret(0) 880 __ pop(exception_pc); 881 882 // Restore SP from BP if the exception PC is a method handle call site. 883 NOT_LP64(__ get_thread(thread);) 884 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 885 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 886 887 // continue at exception handler (return address removed) 888 // note: do *not* remove arguments when unwinding the 889 // activation since the caller assumes having 890 // all arguments on the stack when entering the 891 // runtime to determine the exception handler 892 // (GC happens at call site with arguments!) 893 // rax: exception oop 894 // rdx: throwing pc 895 // rbx: exception handler 896 __ jmp(handler_addr); 897 } 898 899 900 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 901 // use the maximum number of runtime-arguments here because it is difficult to 902 // distinguish each RT-Call. 903 // Note: This number affects also the RT-Call in generate_handle_exception because 904 // the oop-map is shared for all calls. 905 const int num_rt_args = 2; // thread + dummy 906 907 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 908 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 909 910 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 911 912 #ifdef _LP64 913 const Register thread = r15_thread; 914 // No need to worry about dummy 915 __ mov(c_rarg0, thread); 916 #else 917 __ push(rax); // push dummy 918 919 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 920 // push java thread (becomes first argument of C function) 921 __ get_thread(thread); 922 __ push(thread); 923 #endif // _LP64 924 __ set_last_Java_frame(thread, noreg, rbp, NULL); 925 // do the call 926 __ call(RuntimeAddress(target)); 927 OopMapSet* oop_maps = new OopMapSet(); 928 oop_maps->add_gc_map(__ offset(), oop_map); 929 // verify callee-saved register 930 #ifdef ASSERT 931 guarantee(thread != rax, "change this code"); 932 __ push(rax); 933 { Label L; 934 __ get_thread(rax); 935 __ cmpptr(thread, rax); 936 __ jcc(Assembler::equal, L); 937 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 938 __ bind(L); 939 } 940 __ pop(rax); 941 #endif 942 __ reset_last_Java_frame(thread, true, false); 943 #ifndef _LP64 944 __ pop(rcx); // discard thread arg 945 __ pop(rcx); // discard dummy 946 #endif // _LP64 947 948 // check for pending exceptions 949 { Label L; 950 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 951 __ jcc(Assembler::equal, L); 952 // exception pending => remove activation and forward to exception handler 953 954 __ testptr(rax, rax); // have we deoptimized? 955 __ jump_cc(Assembler::equal, 956 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 957 958 // the deopt blob expects exceptions in the special fields of 959 // JavaThread, so copy and clear pending exception. 960 961 // load and clear pending exception 962 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 963 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 964 965 // check that there is really a valid exception 966 __ verify_not_null_oop(rax); 967 968 // load throwing pc: this is the return address of the stub 969 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 970 971 #ifdef ASSERT 972 // check that fields in JavaThread for exception oop and issuing pc are empty 973 Label oop_empty; 974 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 975 __ jcc(Assembler::equal, oop_empty); 976 __ stop("exception oop must be empty"); 977 __ bind(oop_empty); 978 979 Label pc_empty; 980 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 981 __ jcc(Assembler::equal, pc_empty); 982 __ stop("exception pc must be empty"); 983 __ bind(pc_empty); 984 #endif 985 986 // store exception oop and throwing pc to JavaThread 987 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 988 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 989 990 restore_live_registers(sasm); 991 992 __ leave(); 993 __ addptr(rsp, BytesPerWord); // remove return address from stack 994 995 // Forward the exception directly to deopt blob. We can blow no 996 // registers and must leave throwing pc on the stack. A patch may 997 // have values live in registers so the entry point with the 998 // exception in tls. 999 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 1000 1001 __ bind(L); 1002 } 1003 1004 1005 // Runtime will return true if the nmethod has been deoptimized during 1006 // the patching process. In that case we must do a deopt reexecute instead. 1007 1008 Label reexecuteEntry, cont; 1009 1010 __ testptr(rax, rax); // have we deoptimized? 1011 __ jcc(Assembler::equal, cont); // no 1012 1013 // Will reexecute. Proper return address is already on the stack we just restore 1014 // registers, pop all of our frame but the return address and jump to the deopt blob 1015 restore_live_registers(sasm); 1016 __ leave(); 1017 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1018 1019 __ bind(cont); 1020 restore_live_registers(sasm); 1021 __ leave(); 1022 __ ret(0); 1023 1024 return oop_maps; 1025 } 1026 1027 1028 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 1029 1030 // for better readability 1031 const bool must_gc_arguments = true; 1032 const bool dont_gc_arguments = false; 1033 1034 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 1035 bool save_fpu_registers = true; 1036 1037 // stub code & info for the different stubs 1038 OopMapSet* oop_maps = NULL; 1039 switch (id) { 1040 case forward_exception_id: 1041 { 1042 oop_maps = generate_handle_exception(id, sasm); 1043 __ leave(); 1044 __ ret(0); 1045 } 1046 break; 1047 1048 case new_instance_id: 1049 case fast_new_instance_id: 1050 case fast_new_instance_init_check_id: 1051 { 1052 Register klass = rdx; // Incoming 1053 Register obj = rax; // Result 1054 1055 if (id == new_instance_id) { 1056 __ set_info("new_instance", dont_gc_arguments); 1057 } else if (id == fast_new_instance_id) { 1058 __ set_info("fast new_instance", dont_gc_arguments); 1059 } else { 1060 assert(id == fast_new_instance_init_check_id, "bad StubID"); 1061 __ set_info("fast new_instance init check", dont_gc_arguments); 1062 } 1063 1064 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 1065 UseTLAB && FastTLABRefill) { 1066 Label slow_path; 1067 Register obj_size = rcx; 1068 Register t1 = rbx; 1069 Register t2 = rsi; 1070 assert_different_registers(klass, obj, obj_size, t1, t2); 1071 1072 __ push(rdi); 1073 __ push(rbx); 1074 1075 if (id == fast_new_instance_init_check_id) { 1076 // make sure the klass is initialized 1077 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1078 __ jcc(Assembler::notEqual, slow_path); 1079 } 1080 1081 #ifdef ASSERT 1082 // assert object can be fast path allocated 1083 { 1084 Label ok, not_ok; 1085 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1086 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1087 __ jcc(Assembler::lessEqual, not_ok); 1088 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1089 __ jcc(Assembler::zero, ok); 1090 __ bind(not_ok); 1091 __ stop("assert(can be fast path allocated)"); 1092 __ should_not_reach_here(); 1093 __ bind(ok); 1094 } 1095 #endif // ASSERT 1096 1097 // if we got here then the TLAB allocation failed, so try 1098 // refilling the TLAB or allocating directly from eden. 1099 Label retry_tlab, try_eden; 1100 const Register thread = 1101 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi 1102 1103 __ bind(retry_tlab); 1104 1105 // get the instance size (size is postive so movl is fine for 64bit) 1106 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1107 1108 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 1109 1110 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1111 __ verify_oop(obj); 1112 __ pop(rbx); 1113 __ pop(rdi); 1114 __ ret(0); 1115 1116 __ bind(try_eden); 1117 // get the instance size (size is postive so movl is fine for 64bit) 1118 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1119 1120 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1121 __ incr_allocated_bytes(thread, obj_size, 0); 1122 1123 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1124 __ verify_oop(obj); 1125 __ pop(rbx); 1126 __ pop(rdi); 1127 __ ret(0); 1128 1129 __ bind(slow_path); 1130 __ pop(rbx); 1131 __ pop(rdi); 1132 } 1133 1134 __ enter(); 1135 OopMap* map = save_live_registers(sasm, 2); 1136 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1137 oop_maps = new OopMapSet(); 1138 oop_maps->add_gc_map(call_offset, map); 1139 restore_live_registers_except_rax(sasm); 1140 __ verify_oop(obj); 1141 __ leave(); 1142 __ ret(0); 1143 1144 // rax,: new instance 1145 } 1146 1147 break; 1148 1149 case counter_overflow_id: 1150 { 1151 Register bci = rax, method = rbx; 1152 __ enter(); 1153 OopMap* map = save_live_registers(sasm, 3); 1154 // Retrieve bci 1155 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1156 // And a pointer to the Method* 1157 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1158 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1159 oop_maps = new OopMapSet(); 1160 oop_maps->add_gc_map(call_offset, map); 1161 restore_live_registers(sasm); 1162 __ leave(); 1163 __ ret(0); 1164 } 1165 break; 1166 1167 case new_type_array_id: 1168 case new_object_array_id: 1169 { 1170 Register length = rbx; // Incoming 1171 Register klass = rdx; // Incoming 1172 Register obj = rax; // Result 1173 1174 if (id == new_type_array_id) { 1175 __ set_info("new_type_array", dont_gc_arguments); 1176 } else { 1177 __ set_info("new_object_array", dont_gc_arguments); 1178 } 1179 1180 #ifdef ASSERT 1181 // assert object type is really an array of the proper kind 1182 { 1183 Label ok; 1184 Register t0 = obj; 1185 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1186 __ sarl(t0, Klass::_lh_array_tag_shift); 1187 int tag = ((id == new_type_array_id) 1188 ? Klass::_lh_array_tag_type_value 1189 : Klass::_lh_array_tag_obj_value); 1190 __ cmpl(t0, tag); 1191 __ jcc(Assembler::equal, ok); 1192 __ stop("assert(is an array klass)"); 1193 __ should_not_reach_here(); 1194 __ bind(ok); 1195 } 1196 #endif // ASSERT 1197 1198 if (UseTLAB && FastTLABRefill) { 1199 Register arr_size = rsi; 1200 Register t1 = rcx; // must be rcx for use as shift count 1201 Register t2 = rdi; 1202 Label slow_path; 1203 assert_different_registers(length, klass, obj, arr_size, t1, t2); 1204 1205 // check that array length is small enough for fast path. 1206 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); 1207 __ jcc(Assembler::above, slow_path); 1208 1209 // if we got here then the TLAB allocation failed, so try 1210 // refilling the TLAB or allocating directly from eden. 1211 Label retry_tlab, try_eden; 1212 const Register thread = 1213 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi 1214 1215 __ bind(retry_tlab); 1216 1217 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1218 // since size is positive movl does right thing on 64bit 1219 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1220 // since size is postive movl does right thing on 64bit 1221 __ movl(arr_size, length); 1222 assert(t1 == rcx, "fixed register usage"); 1223 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1224 __ shrptr(t1, Klass::_lh_header_size_shift); 1225 __ andptr(t1, Klass::_lh_header_size_mask); 1226 __ addptr(arr_size, t1); 1227 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1228 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1229 1230 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size 1231 1232 __ initialize_header(obj, klass, length, t1, t2); 1233 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1234 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1235 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1236 __ andptr(t1, Klass::_lh_header_size_mask); 1237 __ subptr(arr_size, t1); // body length 1238 __ addptr(t1, obj); // body start 1239 __ initialize_body(t1, arr_size, 0, t2); 1240 __ verify_oop(obj); 1241 __ ret(0); 1242 1243 __ bind(try_eden); 1244 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1245 // since size is positive movl does right thing on 64bit 1246 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1247 // since size is postive movl does right thing on 64bit 1248 __ movl(arr_size, length); 1249 assert(t1 == rcx, "fixed register usage"); 1250 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1251 __ shrptr(t1, Klass::_lh_header_size_shift); 1252 __ andptr(t1, Klass::_lh_header_size_mask); 1253 __ addptr(arr_size, t1); 1254 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1255 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1256 1257 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1258 __ incr_allocated_bytes(thread, arr_size, 0); 1259 1260 __ initialize_header(obj, klass, length, t1, t2); 1261 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1262 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1263 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1264 __ andptr(t1, Klass::_lh_header_size_mask); 1265 __ subptr(arr_size, t1); // body length 1266 __ addptr(t1, obj); // body start 1267 __ initialize_body(t1, arr_size, 0, t2); 1268 __ verify_oop(obj); 1269 __ ret(0); 1270 1271 __ bind(slow_path); 1272 } 1273 1274 __ enter(); 1275 OopMap* map = save_live_registers(sasm, 3); 1276 int call_offset; 1277 if (id == new_type_array_id) { 1278 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1279 } else { 1280 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1281 } 1282 1283 oop_maps = new OopMapSet(); 1284 oop_maps->add_gc_map(call_offset, map); 1285 restore_live_registers_except_rax(sasm); 1286 1287 __ verify_oop(obj); 1288 __ leave(); 1289 __ ret(0); 1290 1291 // rax,: new array 1292 } 1293 break; 1294 1295 case new_multi_array_id: 1296 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1297 // rax,: klass 1298 // rbx,: rank 1299 // rcx: address of 1st dimension 1300 OopMap* map = save_live_registers(sasm, 4); 1301 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1302 1303 oop_maps = new OopMapSet(); 1304 oop_maps->add_gc_map(call_offset, map); 1305 restore_live_registers_except_rax(sasm); 1306 1307 // rax,: new multi array 1308 __ verify_oop(rax); 1309 } 1310 break; 1311 1312 case register_finalizer_id: 1313 { 1314 __ set_info("register_finalizer", dont_gc_arguments); 1315 1316 // This is called via call_runtime so the arguments 1317 // will be place in C abi locations 1318 1319 #ifdef _LP64 1320 __ verify_oop(c_rarg0); 1321 __ mov(rax, c_rarg0); 1322 #else 1323 // The object is passed on the stack and we haven't pushed a 1324 // frame yet so it's one work away from top of stack. 1325 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1326 __ verify_oop(rax); 1327 #endif // _LP64 1328 1329 // load the klass and check the has finalizer flag 1330 Label register_finalizer; 1331 Register t = rsi; 1332 __ load_klass(t, rax); 1333 __ movl(t, Address(t, Klass::access_flags_offset())); 1334 __ testl(t, JVM_ACC_HAS_FINALIZER); 1335 __ jcc(Assembler::notZero, register_finalizer); 1336 __ ret(0); 1337 1338 __ bind(register_finalizer); 1339 __ enter(); 1340 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1341 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1342 oop_maps = new OopMapSet(); 1343 oop_maps->add_gc_map(call_offset, oop_map); 1344 1345 // Now restore all the live registers 1346 restore_live_registers(sasm); 1347 1348 __ leave(); 1349 __ ret(0); 1350 } 1351 break; 1352 1353 case throw_range_check_failed_id: 1354 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1355 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1356 } 1357 break; 1358 1359 case throw_index_exception_id: 1360 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1361 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1362 } 1363 break; 1364 1365 case throw_div0_exception_id: 1366 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1367 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1368 } 1369 break; 1370 1371 case throw_null_pointer_exception_id: 1372 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1373 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1374 } 1375 break; 1376 1377 case handle_exception_nofpu_id: 1378 case handle_exception_id: 1379 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1380 oop_maps = generate_handle_exception(id, sasm); 1381 } 1382 break; 1383 1384 case handle_exception_from_callee_id: 1385 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1386 oop_maps = generate_handle_exception(id, sasm); 1387 } 1388 break; 1389 1390 case unwind_exception_id: 1391 { __ set_info("unwind_exception", dont_gc_arguments); 1392 // note: no stubframe since we are about to leave the current 1393 // activation and we are calling a leaf VM function only. 1394 generate_unwind_exception(sasm); 1395 } 1396 break; 1397 1398 case throw_array_store_exception_id: 1399 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1400 // tos + 0: link 1401 // + 1: return address 1402 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1403 } 1404 break; 1405 1406 case throw_class_cast_exception_id: 1407 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1408 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1409 } 1410 break; 1411 1412 case throw_incompatible_class_change_error_id: 1413 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1414 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1415 } 1416 break; 1417 1418 case slow_subtype_check_id: 1419 { 1420 // Typical calling sequence: 1421 // __ push(klass_RInfo); // object klass or other subclass 1422 // __ push(sup_k_RInfo); // array element klass or other superclass 1423 // __ call(slow_subtype_check); 1424 // Note that the subclass is pushed first, and is therefore deepest. 1425 // Previous versions of this code reversed the names 'sub' and 'super'. 1426 // This was operationally harmless but made the code unreadable. 1427 enum layout { 1428 rax_off, SLOT2(raxH_off) 1429 rcx_off, SLOT2(rcxH_off) 1430 rsi_off, SLOT2(rsiH_off) 1431 rdi_off, SLOT2(rdiH_off) 1432 // saved_rbp_off, SLOT2(saved_rbpH_off) 1433 return_off, SLOT2(returnH_off) 1434 sup_k_off, SLOT2(sup_kH_off) 1435 klass_off, SLOT2(superH_off) 1436 framesize, 1437 result_off = klass_off // deepest argument is also the return value 1438 }; 1439 1440 __ set_info("slow_subtype_check", dont_gc_arguments); 1441 __ push(rdi); 1442 __ push(rsi); 1443 __ push(rcx); 1444 __ push(rax); 1445 1446 // This is called by pushing args and not with C abi 1447 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1448 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1449 1450 Label miss; 1451 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1452 1453 // fallthrough on success: 1454 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1455 __ pop(rax); 1456 __ pop(rcx); 1457 __ pop(rsi); 1458 __ pop(rdi); 1459 __ ret(0); 1460 1461 __ bind(miss); 1462 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1463 __ pop(rax); 1464 __ pop(rcx); 1465 __ pop(rsi); 1466 __ pop(rdi); 1467 __ ret(0); 1468 } 1469 break; 1470 1471 case monitorenter_nofpu_id: 1472 save_fpu_registers = false; 1473 // fall through 1474 case monitorenter_id: 1475 { 1476 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1477 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1478 1479 // Called with store_parameter and not C abi 1480 1481 f.load_argument(1, rax); // rax,: object 1482 f.load_argument(0, rbx); // rbx,: lock address 1483 1484 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1485 1486 oop_maps = new OopMapSet(); 1487 oop_maps->add_gc_map(call_offset, map); 1488 restore_live_registers(sasm, save_fpu_registers); 1489 } 1490 break; 1491 1492 case monitorexit_nofpu_id: 1493 save_fpu_registers = false; 1494 // fall through 1495 case monitorexit_id: 1496 { 1497 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1498 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1499 1500 // Called with store_parameter and not C abi 1501 1502 f.load_argument(0, rax); // rax,: lock address 1503 1504 // note: really a leaf routine but must setup last java sp 1505 // => use call_RT for now (speed can be improved by 1506 // doing last java sp setup manually) 1507 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1508 1509 oop_maps = new OopMapSet(); 1510 oop_maps->add_gc_map(call_offset, map); 1511 restore_live_registers(sasm, save_fpu_registers); 1512 } 1513 break; 1514 1515 case deoptimize_id: 1516 { 1517 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1518 const int num_rt_args = 2; // thread, trap_request 1519 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1520 f.load_argument(0, rax); 1521 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1522 oop_maps = new OopMapSet(); 1523 oop_maps->add_gc_map(call_offset, oop_map); 1524 restore_live_registers(sasm); 1525 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1526 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1527 __ leave(); 1528 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1529 } 1530 break; 1531 1532 case access_field_patching_id: 1533 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1534 // we should set up register map 1535 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1536 } 1537 break; 1538 1539 case load_klass_patching_id: 1540 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1541 // we should set up register map 1542 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1543 } 1544 break; 1545 1546 case load_mirror_patching_id: 1547 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1548 // we should set up register map 1549 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1550 } 1551 break; 1552 1553 case load_appendix_patching_id: 1554 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1555 // we should set up register map 1556 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1557 } 1558 break; 1559 1560 case dtrace_object_alloc_id: 1561 { // rax,: object 1562 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1563 // we can't gc here so skip the oopmap but make sure that all 1564 // the live registers get saved. 1565 save_live_registers(sasm, 1); 1566 1567 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1568 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1569 NOT_LP64(__ pop(rax)); 1570 1571 restore_live_registers(sasm); 1572 } 1573 break; 1574 1575 case fpu2long_stub_id: 1576 { 1577 // rax, and rdx are destroyed, but should be free since the result is returned there 1578 // preserve rsi,ecx 1579 __ push(rsi); 1580 __ push(rcx); 1581 LP64_ONLY(__ push(rdx);) 1582 1583 // check for NaN 1584 Label return0, do_return, return_min_jlong, do_convert; 1585 1586 Address value_high_word(rsp, wordSize + 4); 1587 Address value_low_word(rsp, wordSize); 1588 Address result_high_word(rsp, 3*wordSize + 4); 1589 Address result_low_word(rsp, 3*wordSize); 1590 1591 __ subptr(rsp, 32); // more than enough on 32bit 1592 __ fst_d(value_low_word); 1593 __ movl(rax, value_high_word); 1594 __ andl(rax, 0x7ff00000); 1595 __ cmpl(rax, 0x7ff00000); 1596 __ jcc(Assembler::notEqual, do_convert); 1597 __ movl(rax, value_high_word); 1598 __ andl(rax, 0xfffff); 1599 __ orl(rax, value_low_word); 1600 __ jcc(Assembler::notZero, return0); 1601 1602 __ bind(do_convert); 1603 __ fnstcw(Address(rsp, 0)); 1604 __ movzwl(rax, Address(rsp, 0)); 1605 __ orl(rax, 0xc00); 1606 __ movw(Address(rsp, 2), rax); 1607 __ fldcw(Address(rsp, 2)); 1608 __ fwait(); 1609 __ fistp_d(result_low_word); 1610 __ fldcw(Address(rsp, 0)); 1611 __ fwait(); 1612 // This gets the entire long in rax on 64bit 1613 __ movptr(rax, result_low_word); 1614 // testing of high bits 1615 __ movl(rdx, result_high_word); 1616 __ mov(rcx, rax); 1617 // What the heck is the point of the next instruction??? 1618 __ xorl(rcx, 0x0); 1619 __ movl(rsi, 0x80000000); 1620 __ xorl(rsi, rdx); 1621 __ orl(rcx, rsi); 1622 __ jcc(Assembler::notEqual, do_return); 1623 __ fldz(); 1624 __ fcomp_d(value_low_word); 1625 __ fnstsw_ax(); 1626 #ifdef _LP64 1627 __ testl(rax, 0x4100); // ZF & CF == 0 1628 __ jcc(Assembler::equal, return_min_jlong); 1629 #else 1630 __ sahf(); 1631 __ jcc(Assembler::above, return_min_jlong); 1632 #endif // _LP64 1633 // return max_jlong 1634 #ifndef _LP64 1635 __ movl(rdx, 0x7fffffff); 1636 __ movl(rax, 0xffffffff); 1637 #else 1638 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1639 #endif // _LP64 1640 __ jmp(do_return); 1641 1642 __ bind(return_min_jlong); 1643 #ifndef _LP64 1644 __ movl(rdx, 0x80000000); 1645 __ xorl(rax, rax); 1646 #else 1647 __ mov64(rax, UCONST64(0x8000000000000000)); 1648 #endif // _LP64 1649 __ jmp(do_return); 1650 1651 __ bind(return0); 1652 __ fpop(); 1653 #ifndef _LP64 1654 __ xorptr(rdx,rdx); 1655 __ xorptr(rax,rax); 1656 #else 1657 __ xorptr(rax, rax); 1658 #endif // _LP64 1659 1660 __ bind(do_return); 1661 __ addptr(rsp, 32); 1662 LP64_ONLY(__ pop(rdx);) 1663 __ pop(rcx); 1664 __ pop(rsi); 1665 __ ret(0); 1666 } 1667 break; 1668 1669 #if INCLUDE_ALL_GCS 1670 case g1_pre_barrier_slow_id: 1671 { 1672 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1673 // arg0 : previous value of memory 1674 1675 BarrierSet* bs = Universe::heap()->barrier_set(); 1676 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 1677 __ movptr(rax, (int)id); 1678 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1679 __ should_not_reach_here(); 1680 break; 1681 } 1682 __ push(rax); 1683 __ push(rdx); 1684 1685 const Register pre_val = rax; 1686 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1687 const Register tmp = rdx; 1688 1689 NOT_LP64(__ get_thread(thread);) 1690 1691 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1692 PtrQueue::byte_offset_of_active())); 1693 1694 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1695 PtrQueue::byte_offset_of_index())); 1696 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1697 PtrQueue::byte_offset_of_buf())); 1698 1699 1700 Label done; 1701 Label runtime; 1702 1703 // Can we store original value in the thread's buffer? 1704 1705 #ifdef _LP64 1706 __ movslq(tmp, queue_index); 1707 __ cmpq(tmp, 0); 1708 #else 1709 __ cmpl(queue_index, 0); 1710 #endif 1711 __ jcc(Assembler::equal, runtime); 1712 #ifdef _LP64 1713 __ subq(tmp, wordSize); 1714 __ movl(queue_index, tmp); 1715 __ addq(tmp, buffer); 1716 #else 1717 __ subl(queue_index, wordSize); 1718 __ movl(tmp, buffer); 1719 __ addl(tmp, queue_index); 1720 #endif 1721 1722 // prev_val (rax) 1723 f.load_argument(0, pre_val); 1724 __ movptr(Address(tmp, 0), pre_val); 1725 __ jmp(done); 1726 1727 __ bind(runtime); 1728 __ push(rcx); 1729 #ifdef _LP64 1730 __ push(r8); 1731 __ push(r9); 1732 __ push(r10); 1733 __ push(r11); 1734 # ifndef _WIN64 1735 __ push(rdi); 1736 __ push(rsi); 1737 # endif 1738 #endif 1739 // load the pre-value 1740 f.load_argument(0, rcx); 1741 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); 1742 #ifdef _LP64 1743 # ifndef _WIN64 1744 __ pop(rsi); 1745 __ pop(rdi); 1746 # endif 1747 __ pop(r11); 1748 __ pop(r10); 1749 __ pop(r9); 1750 __ pop(r8); 1751 #endif 1752 __ pop(rcx); 1753 __ bind(done); 1754 1755 __ pop(rdx); 1756 __ pop(rax); 1757 } 1758 break; 1759 1760 case g1_post_barrier_slow_id: 1761 { 1762 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1763 1764 1765 // arg0: store_address 1766 Address store_addr(rbp, 2*BytesPerWord); 1767 1768 CardTableModRefBS* ct = 1769 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 1770 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1771 1772 Label done; 1773 Label runtime; 1774 1775 // At this point we know new_value is non-NULL and the new_value crosses regions. 1776 // Must check to see if card is already dirty 1777 1778 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1779 1780 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1781 PtrQueue::byte_offset_of_index())); 1782 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1783 PtrQueue::byte_offset_of_buf())); 1784 1785 __ push(rax); 1786 __ push(rcx); 1787 1788 const Register cardtable = rax; 1789 const Register card_addr = rcx; 1790 1791 f.load_argument(0, card_addr); 1792 __ shrptr(card_addr, CardTableModRefBS::card_shift); 1793 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 1794 // a valid address and therefore is not properly handled by the relocation code. 1795 __ movptr(cardtable, (intptr_t)ct->byte_map_base); 1796 __ addptr(card_addr, cardtable); 1797 1798 NOT_LP64(__ get_thread(thread);) 1799 1800 __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); 1801 __ jcc(Assembler::equal, done); 1802 1803 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 1804 __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 1805 __ jcc(Assembler::equal, done); 1806 1807 // storing region crossing non-NULL, card is clean. 1808 // dirty card and log. 1809 1810 __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 1811 1812 __ cmpl(queue_index, 0); 1813 __ jcc(Assembler::equal, runtime); 1814 __ subl(queue_index, wordSize); 1815 1816 const Register buffer_addr = rbx; 1817 __ push(rbx); 1818 1819 __ movptr(buffer_addr, buffer); 1820 1821 #ifdef _LP64 1822 __ movslq(rscratch1, queue_index); 1823 __ addptr(buffer_addr, rscratch1); 1824 #else 1825 __ addptr(buffer_addr, queue_index); 1826 #endif 1827 __ movptr(Address(buffer_addr, 0), card_addr); 1828 1829 __ pop(rbx); 1830 __ jmp(done); 1831 1832 __ bind(runtime); 1833 __ push(rdx); 1834 #ifdef _LP64 1835 __ push(r8); 1836 __ push(r9); 1837 __ push(r10); 1838 __ push(r11); 1839 # ifndef _WIN64 1840 __ push(rdi); 1841 __ push(rsi); 1842 # endif 1843 #endif 1844 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1845 #ifdef _LP64 1846 # ifndef _WIN64 1847 __ pop(rsi); 1848 __ pop(rdi); 1849 # endif 1850 __ pop(r11); 1851 __ pop(r10); 1852 __ pop(r9); 1853 __ pop(r8); 1854 #endif 1855 __ pop(rdx); 1856 __ bind(done); 1857 1858 __ pop(rcx); 1859 __ pop(rax); 1860 1861 } 1862 break; 1863 #endif // INCLUDE_ALL_GCS 1864 1865 case predicate_failed_trap_id: 1866 { 1867 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1868 1869 OopMap* map = save_live_registers(sasm, 1); 1870 1871 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1872 oop_maps = new OopMapSet(); 1873 oop_maps->add_gc_map(call_offset, map); 1874 restore_live_registers(sasm); 1875 __ leave(); 1876 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1877 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1878 1879 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1880 } 1881 break; 1882 1883 default: 1884 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1885 __ movptr(rax, (int)id); 1886 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1887 __ should_not_reach_here(); 1888 } 1889 break; 1890 } 1891 return oop_maps; 1892 } 1893 1894 #undef __ 1895 1896 const char *Runtime1::pd_name_for_address(address entry) { 1897 return "<unknown function>"; 1898 }