1 /* 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "oops/compiledICHolderOop.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "register_x86.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/signature.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "vmreg_x86.inline.hpp" 40 41 42 // Implementation of StubAssembler 43 44 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { 45 // setup registers 46 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 47 assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different"); 48 assert(oop_result1 != thread && oop_result2 != thread, "registers must be different"); 49 assert(args_size >= 0, "illegal args_size"); 50 51 #ifdef _LP64 52 mov(c_rarg0, thread); 53 set_num_rt_args(0); // Nothing on stack 54 #else 55 set_num_rt_args(1 + args_size); 56 57 // push java thread (becomes first argument of C function) 58 get_thread(thread); 59 push(thread); 60 #endif // _LP64 61 62 set_last_Java_frame(thread, noreg, rbp, NULL); 63 64 // do the call 65 call(RuntimeAddress(entry)); 66 int call_offset = offset(); 67 // verify callee-saved register 68 #ifdef ASSERT 69 guarantee(thread != rax, "change this code"); 70 push(rax); 71 { Label L; 72 get_thread(rax); 73 cmpptr(thread, rax); 74 jcc(Assembler::equal, L); 75 int3(); 76 stop("StubAssembler::call_RT: rdi not callee saved?"); 77 bind(L); 78 } 79 pop(rax); 80 #endif 81 reset_last_Java_frame(thread, true, false); 82 83 // discard thread and arguments 84 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 85 86 // check for pending exceptions 87 { Label L; 88 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 89 jcc(Assembler::equal, L); 90 // exception pending => remove activation and forward to exception handler 91 movptr(rax, Address(thread, Thread::pending_exception_offset())); 92 // make sure that the vm_results are cleared 93 if (oop_result1->is_valid()) { 94 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 95 } 96 if (oop_result2->is_valid()) { 97 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 98 } 99 if (frame_size() == no_frame_size) { 100 leave(); 101 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 102 } else if (_stub_id == Runtime1::forward_exception_id) { 103 should_not_reach_here(); 104 } else { 105 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 106 } 107 bind(L); 108 } 109 // get oop results if there are any and reset the values in the thread 110 if (oop_result1->is_valid()) { 111 movptr(oop_result1, Address(thread, JavaThread::vm_result_offset())); 112 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 113 verify_oop(oop_result1); 114 } 115 if (oop_result2->is_valid()) { 116 movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); 117 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 118 verify_oop(oop_result2); 119 } 120 return call_offset; 121 } 122 123 124 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { 125 #ifdef _LP64 126 mov(c_rarg1, arg1); 127 #else 128 push(arg1); 129 #endif // _LP64 130 return call_RT(oop_result1, oop_result2, entry, 1); 131 } 132 133 134 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { 135 #ifdef _LP64 136 if (c_rarg1 == arg2) { 137 if (c_rarg2 == arg1) { 138 xchgq(arg1, arg2); 139 } else { 140 mov(c_rarg2, arg2); 141 mov(c_rarg1, arg1); 142 } 143 } else { 144 mov(c_rarg1, arg1); 145 mov(c_rarg2, arg2); 146 } 147 #else 148 push(arg2); 149 push(arg1); 150 #endif // _LP64 151 return call_RT(oop_result1, oop_result2, entry, 2); 152 } 153 154 155 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { 156 #ifdef _LP64 157 // if there is any conflict use the stack 158 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 159 arg2 == c_rarg1 || arg1 == c_rarg3 || 160 arg3 == c_rarg1 || arg1 == c_rarg2) { 161 push(arg3); 162 push(arg2); 163 push(arg1); 164 pop(c_rarg1); 165 pop(c_rarg2); 166 pop(c_rarg3); 167 } else { 168 mov(c_rarg1, arg1); 169 mov(c_rarg2, arg2); 170 mov(c_rarg3, arg3); 171 } 172 #else 173 push(arg3); 174 push(arg2); 175 push(arg1); 176 #endif // _LP64 177 return call_RT(oop_result1, oop_result2, entry, 3); 178 } 179 180 181 // Implementation of StubFrame 182 183 class StubFrame: public StackObj { 184 private: 185 StubAssembler* _sasm; 186 187 public: 188 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 189 void load_argument(int offset_in_words, Register reg); 190 191 ~StubFrame(); 192 }; 193 194 195 #define __ _sasm-> 196 197 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 198 _sasm = sasm; 199 __ set_info(name, must_gc_arguments); 200 __ enter(); 201 } 202 203 // load parameters that were stored with LIR_Assembler::store_parameter 204 // Note: offsets for store_parameter and load_argument must match 205 void StubFrame::load_argument(int offset_in_words, Register reg) { 206 // rbp, + 0: link 207 // + 1: return address 208 // + 2: argument with offset 0 209 // + 3: argument with offset 1 210 // + 4: ... 211 212 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 213 } 214 215 216 StubFrame::~StubFrame() { 217 __ leave(); 218 __ ret(0); 219 } 220 221 #undef __ 222 223 224 // Implementation of Runtime1 225 226 #define __ sasm-> 227 228 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 229 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 230 231 // Stack layout for saving/restoring all the registers needed during a runtime 232 // call (this includes deoptimization) 233 // Note: note that users of this frame may well have arguments to some runtime 234 // while these values are on the stack. These positions neglect those arguments 235 // but the code in save_live_registers will take the argument count into 236 // account. 237 // 238 #ifdef _LP64 239 #define SLOT2(x) x, 240 #define SLOT_PER_WORD 2 241 #else 242 #define SLOT2(x) 243 #define SLOT_PER_WORD 1 244 #endif // _LP64 245 246 enum reg_save_layout { 247 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 248 // happen and will assert if the stack size we create is misaligned 249 #ifdef _LP64 250 align_dummy_0, align_dummy_1, 251 #endif // _LP64 252 #ifdef _WIN64 253 // Windows always allocates space for it's argument registers (see 254 // frame::arg_reg_save_area_bytes). 255 arg_reg_save_1, arg_reg_save_1H, // 0, 4 256 arg_reg_save_2, arg_reg_save_2H, // 8, 12 257 arg_reg_save_3, arg_reg_save_3H, // 16, 20 258 arg_reg_save_4, arg_reg_save_4H, // 24, 28 259 #endif // _WIN64 260 xmm_regs_as_doubles_off, // 32 261 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 262 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 263 // fpu_state_end_off is exclusive 264 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 265 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 266 extra_space_offset, // 360 267 #ifdef _LP64 268 r15_off = extra_space_offset, r15H_off, // 360, 364 269 r14_off, r14H_off, // 368, 372 270 r13_off, r13H_off, // 376, 380 271 r12_off, r12H_off, // 384, 388 272 r11_off, r11H_off, // 392, 396 273 r10_off, r10H_off, // 400, 404 274 r9_off, r9H_off, // 408, 412 275 r8_off, r8H_off, // 416, 420 276 rdi_off, rdiH_off, // 424, 428 277 #else 278 rdi_off = extra_space_offset, 279 #endif // _LP64 280 rsi_off, SLOT2(rsiH_off) // 432, 436 281 rbp_off, SLOT2(rbpH_off) // 440, 444 282 rsp_off, SLOT2(rspH_off) // 448, 452 283 rbx_off, SLOT2(rbxH_off) // 456, 460 284 rdx_off, SLOT2(rdxH_off) // 464, 468 285 rcx_off, SLOT2(rcxH_off) // 472, 476 286 rax_off, SLOT2(raxH_off) // 480, 484 287 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 288 return_off, SLOT2(returnH_off) // 496, 500 289 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 290 }; 291 292 293 294 // Save off registers which might be killed by calls into the runtime. 295 // Tries to smart of about FP registers. In particular we separate 296 // saving and describing the FPU registers for deoptimization since we 297 // have to save the FPU registers twice if we describe them and on P4 298 // saving FPU registers which don't contain anything appears 299 // expensive. The deopt blob is the only thing which needs to 300 // describe FPU registers. In all other cases it should be sufficient 301 // to simply save their current value. 302 303 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 304 bool save_fpu_registers = true) { 305 306 // In 64bit all the args are in regs so there are no additional stack slots 307 LP64_ONLY(num_rt_args = 0); 308 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 309 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 310 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); 311 312 // record saved value locations in an OopMap 313 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 314 OopMap* map = new OopMap(frame_size_in_slots, 0); 315 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 316 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 317 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 318 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 319 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 320 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 321 #ifdef _LP64 322 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 323 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 324 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 325 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 326 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 327 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 328 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 329 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 330 331 // This is stupid but needed. 332 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 333 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 334 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 335 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 336 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 337 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 338 339 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 340 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 341 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 342 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 343 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 344 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 345 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 346 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 347 #endif // _LP64 348 349 if (save_fpu_registers) { 350 if (UseSSE < 2) { 351 int fpu_off = float_regs_as_doubles_off; 352 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 353 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 354 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 355 // %%% This is really a waste but we'll keep things as they were for now 356 if (true) { 357 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 358 } 359 fpu_off += 2; 360 } 361 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 362 } 363 364 if (UseSSE >= 2) { 365 int xmm_off = xmm_regs_as_doubles_off; 366 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 367 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 368 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 369 // %%% This is really a waste but we'll keep things as they were for now 370 if (true) { 371 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 372 } 373 xmm_off += 2; 374 } 375 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 376 377 } else if (UseSSE == 1) { 378 int xmm_off = xmm_regs_as_doubles_off; 379 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 380 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 381 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 382 xmm_off += 2; 383 } 384 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 385 } 386 } 387 388 return map; 389 } 390 391 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 392 bool save_fpu_registers = true) { 393 __ block_comment("save_live_registers"); 394 395 __ pusha(); // integer registers 396 397 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 398 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 399 400 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 401 402 #ifdef ASSERT 403 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 404 #endif 405 406 if (save_fpu_registers) { 407 if (UseSSE < 2) { 408 // save FPU stack 409 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 410 __ fwait(); 411 412 #ifdef ASSERT 413 Label ok; 414 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 415 __ jccb(Assembler::equal, ok); 416 __ stop("corrupted control word detected"); 417 __ bind(ok); 418 #endif 419 420 // Reset the control word to guard against exceptions being unmasked 421 // since fstp_d can cause FPU stack underflow exceptions. Write it 422 // into the on stack copy and then reload that to make sure that the 423 // current and future values are correct. 424 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 425 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 426 427 // Save the FPU registers in de-opt-able form 428 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 429 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 430 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 431 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 432 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 433 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 434 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 435 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 436 } 437 438 if (UseSSE >= 2) { 439 // save XMM registers 440 // XMM registers can contain float or double values, but this is not known here, 441 // so always save them as doubles. 442 // note that float values are _not_ converted automatically, so for float values 443 // the second word contains only garbage data. 444 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); 445 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); 446 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); 447 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); 448 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); 449 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); 450 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); 451 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); 452 #ifdef _LP64 453 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); 454 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); 455 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); 456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); 457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); 458 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); 459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); 460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); 461 #endif // _LP64 462 } else if (UseSSE == 1) { 463 // save XMM registers as float because double not supported without SSE2 464 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); 465 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); 466 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); 467 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); 468 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); 469 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); 470 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); 471 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); 472 } 473 } 474 475 // FPU stack must be empty now 476 __ verify_FPU(0, "save_live_registers"); 477 478 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 479 } 480 481 482 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { 483 if (restore_fpu_registers) { 484 if (UseSSE >= 2) { 485 // restore XMM registers 486 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 487 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 488 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 489 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 490 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 491 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 492 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 493 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 494 #ifdef _LP64 495 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); 496 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); 497 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); 498 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); 499 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); 500 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); 501 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); 502 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); 503 #endif // _LP64 504 } else if (UseSSE == 1) { 505 // restore XMM registers 506 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); 507 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); 508 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); 509 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); 510 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); 511 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); 512 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); 513 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); 514 } 515 516 if (UseSSE < 2) { 517 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 518 } else { 519 // check that FPU stack is really empty 520 __ verify_FPU(0, "restore_live_registers"); 521 } 522 523 } else { 524 // check that FPU stack is really empty 525 __ verify_FPU(0, "restore_live_registers"); 526 } 527 528 #ifdef ASSERT 529 { 530 Label ok; 531 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 532 __ jcc(Assembler::equal, ok); 533 __ stop("bad offsets in frame"); 534 __ bind(ok); 535 } 536 #endif // ASSERT 537 538 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 539 } 540 541 542 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 543 __ block_comment("restore_live_registers"); 544 545 restore_fpu(sasm, restore_fpu_registers); 546 __ popa(); 547 } 548 549 550 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 551 __ block_comment("restore_live_registers_except_rax"); 552 553 restore_fpu(sasm, restore_fpu_registers); 554 555 #ifdef _LP64 556 __ movptr(r15, Address(rsp, 0)); 557 __ movptr(r14, Address(rsp, wordSize)); 558 __ movptr(r13, Address(rsp, 2 * wordSize)); 559 __ movptr(r12, Address(rsp, 3 * wordSize)); 560 __ movptr(r11, Address(rsp, 4 * wordSize)); 561 __ movptr(r10, Address(rsp, 5 * wordSize)); 562 __ movptr(r9, Address(rsp, 6 * wordSize)); 563 __ movptr(r8, Address(rsp, 7 * wordSize)); 564 __ movptr(rdi, Address(rsp, 8 * wordSize)); 565 __ movptr(rsi, Address(rsp, 9 * wordSize)); 566 __ movptr(rbp, Address(rsp, 10 * wordSize)); 567 // skip rsp 568 __ movptr(rbx, Address(rsp, 12 * wordSize)); 569 __ movptr(rdx, Address(rsp, 13 * wordSize)); 570 __ movptr(rcx, Address(rsp, 14 * wordSize)); 571 572 __ addptr(rsp, 16 * wordSize); 573 #else 574 575 __ pop(rdi); 576 __ pop(rsi); 577 __ pop(rbp); 578 __ pop(rbx); // skip this value 579 __ pop(rbx); 580 __ pop(rdx); 581 __ pop(rcx); 582 __ addptr(rsp, BytesPerWord); 583 #endif // _LP64 584 } 585 586 587 void Runtime1::initialize_pd() { 588 // nothing to do 589 } 590 591 592 // target: the entry point of the method that creates and posts the exception oop 593 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 594 595 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 596 // preserve all registers 597 int num_rt_args = has_argument ? 2 : 1; 598 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 599 600 // now all registers are saved and can be used freely 601 // verify that no old value is used accidentally 602 __ invalidate_registers(true, true, true, true, true, true); 603 604 // registers used by this stub 605 const Register temp_reg = rbx; 606 607 // load argument for exception that is passed as an argument into the stub 608 if (has_argument) { 609 #ifdef _LP64 610 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 611 #else 612 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 613 __ push(temp_reg); 614 #endif // _LP64 615 } 616 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 617 618 OopMapSet* oop_maps = new OopMapSet(); 619 oop_maps->add_gc_map(call_offset, oop_map); 620 621 __ stop("should not reach here"); 622 623 return oop_maps; 624 } 625 626 627 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 628 __ block_comment("generate_handle_exception"); 629 630 // incoming parameters 631 const Register exception_oop = rax; 632 const Register exception_pc = rdx; 633 // other registers used in this stub 634 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 635 636 // Save registers, if required. 637 OopMapSet* oop_maps = new OopMapSet(); 638 OopMap* oop_map = NULL; 639 switch (id) { 640 case forward_exception_id: 641 // We're handling an exception in the context of a compiled frame. 642 // The registers have been saved in the standard places. Perform 643 // an exception lookup in the caller and dispatch to the handler 644 // if found. Otherwise unwind and dispatch to the callers 645 // exception handler. 646 oop_map = generate_oop_map(sasm, 1 /*thread*/); 647 648 // load and clear pending exception oop into RAX 649 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 650 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 651 652 // load issuing PC (the return address for this stub) into rdx 653 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 654 655 // make sure that the vm_results are cleared (may be unnecessary) 656 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 657 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 658 break; 659 case handle_exception_nofpu_id: 660 case handle_exception_id: 661 // At this point all registers MAY be live. 662 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); 663 break; 664 case handle_exception_from_callee_id: { 665 // At this point all registers except exception oop (RAX) and 666 // exception pc (RDX) are dead. 667 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 668 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 669 sasm->set_frame_size(frame_size); 670 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 671 break; 672 } 673 default: ShouldNotReachHere(); 674 } 675 676 #ifdef TIERED 677 // C2 can leave the fpu stack dirty 678 if (UseSSE < 2) { 679 __ empty_FPU_stack(); 680 } 681 #endif // TIERED 682 683 // verify that only rax, and rdx is valid at this time 684 __ invalidate_registers(false, true, true, false, true, true); 685 // verify that rax, contains a valid exception 686 __ verify_not_null_oop(exception_oop); 687 688 // load address of JavaThread object for thread-local data 689 NOT_LP64(__ get_thread(thread);) 690 691 #ifdef ASSERT 692 // check that fields in JavaThread for exception oop and issuing pc are 693 // empty before writing to them 694 Label oop_empty; 695 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 696 __ jcc(Assembler::equal, oop_empty); 697 __ stop("exception oop already set"); 698 __ bind(oop_empty); 699 700 Label pc_empty; 701 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 702 __ jcc(Assembler::equal, pc_empty); 703 __ stop("exception pc already set"); 704 __ bind(pc_empty); 705 #endif 706 707 // save exception oop and issuing pc into JavaThread 708 // (exception handler will load it from here) 709 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 710 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 711 712 // patch throwing pc into return address (has bci & oop map) 713 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 714 715 // compute the exception handler. 716 // the exception oop and the throwing pc are read from the fields in JavaThread 717 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 718 oop_maps->add_gc_map(call_offset, oop_map); 719 720 // rax: handler address 721 // will be the deopt blob if nmethod was deoptimized while we looked up 722 // handler regardless of whether handler existed in the nmethod. 723 724 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 725 __ invalidate_registers(false, true, true, true, true, true); 726 727 // patch the return address, this stub will directly return to the exception handler 728 __ movptr(Address(rbp, 1*BytesPerWord), rax); 729 730 switch (id) { 731 case forward_exception_id: 732 case handle_exception_nofpu_id: 733 case handle_exception_id: 734 // Restore the registers that were saved at the beginning. 735 restore_live_registers(sasm, id == handle_exception_nofpu_id); 736 break; 737 case handle_exception_from_callee_id: 738 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 739 // since we do a leave anyway. 740 741 // Pop the return address since we are possibly changing SP (restoring from BP). 742 __ leave(); 743 __ pop(rcx); 744 745 // Restore SP from BP if the exception PC is a method handle call site. 746 NOT_LP64(__ get_thread(thread);) 747 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 748 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 749 __ jmp(rcx); // jump to exception handler 750 break; 751 default: ShouldNotReachHere(); 752 } 753 754 return oop_maps; 755 } 756 757 758 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 759 // incoming parameters 760 const Register exception_oop = rax; 761 // callee-saved copy of exception_oop during runtime call 762 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 763 // other registers used in this stub 764 const Register exception_pc = rdx; 765 const Register handler_addr = rbx; 766 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 767 768 // verify that only rax, is valid at this time 769 __ invalidate_registers(false, true, true, true, true, true); 770 771 #ifdef ASSERT 772 // check that fields in JavaThread for exception oop and issuing pc are empty 773 NOT_LP64(__ get_thread(thread);) 774 Label oop_empty; 775 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 776 __ jcc(Assembler::equal, oop_empty); 777 __ stop("exception oop must be empty"); 778 __ bind(oop_empty); 779 780 Label pc_empty; 781 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 782 __ jcc(Assembler::equal, pc_empty); 783 __ stop("exception pc must be empty"); 784 __ bind(pc_empty); 785 #endif 786 787 // clear the FPU stack in case any FPU results are left behind 788 __ empty_FPU_stack(); 789 790 // save exception_oop in callee-saved register to preserve it during runtime calls 791 __ verify_not_null_oop(exception_oop); 792 __ movptr(exception_oop_callee_saved, exception_oop); 793 794 NOT_LP64(__ get_thread(thread);) 795 // Get return address (is on top of stack after leave). 796 __ movptr(exception_pc, Address(rsp, 0)); 797 798 // search the exception handler address of the caller (using the return address) 799 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 800 // rax: exception handler address of the caller 801 802 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 803 __ invalidate_registers(false, true, true, true, false, true); 804 805 // move result of call into correct register 806 __ movptr(handler_addr, rax); 807 808 // Restore exception oop to RAX (required convention of exception handler). 809 __ movptr(exception_oop, exception_oop_callee_saved); 810 811 // verify that there is really a valid exception in rax 812 __ verify_not_null_oop(exception_oop); 813 814 // get throwing pc (= return address). 815 // rdx has been destroyed by the call, so it must be set again 816 // the pop is also necessary to simulate the effect of a ret(0) 817 __ pop(exception_pc); 818 819 // Restore SP from BP if the exception PC is a method handle call site. 820 NOT_LP64(__ get_thread(thread);) 821 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 822 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 823 824 // continue at exception handler (return address removed) 825 // note: do *not* remove arguments when unwinding the 826 // activation since the caller assumes having 827 // all arguments on the stack when entering the 828 // runtime to determine the exception handler 829 // (GC happens at call site with arguments!) 830 // rax: exception oop 831 // rdx: throwing pc 832 // rbx: exception handler 833 __ jmp(handler_addr); 834 } 835 836 837 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 838 // use the maximum number of runtime-arguments here because it is difficult to 839 // distinguish each RT-Call. 840 // Note: This number affects also the RT-Call in generate_handle_exception because 841 // the oop-map is shared for all calls. 842 const int num_rt_args = 2; // thread + dummy 843 844 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 845 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 846 847 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 848 849 #ifdef _LP64 850 const Register thread = r15_thread; 851 // No need to worry about dummy 852 __ mov(c_rarg0, thread); 853 #else 854 __ push(rax); // push dummy 855 856 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 857 // push java thread (becomes first argument of C function) 858 __ get_thread(thread); 859 __ push(thread); 860 #endif // _LP64 861 __ set_last_Java_frame(thread, noreg, rbp, NULL); 862 // do the call 863 __ call(RuntimeAddress(target)); 864 OopMapSet* oop_maps = new OopMapSet(); 865 oop_maps->add_gc_map(__ offset(), oop_map); 866 // verify callee-saved register 867 #ifdef ASSERT 868 guarantee(thread != rax, "change this code"); 869 __ push(rax); 870 { Label L; 871 __ get_thread(rax); 872 __ cmpptr(thread, rax); 873 __ jcc(Assembler::equal, L); 874 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 875 __ bind(L); 876 } 877 __ pop(rax); 878 #endif 879 __ reset_last_Java_frame(thread, true, false); 880 #ifndef _LP64 881 __ pop(rcx); // discard thread arg 882 __ pop(rcx); // discard dummy 883 #endif // _LP64 884 885 // check for pending exceptions 886 { Label L; 887 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 888 __ jcc(Assembler::equal, L); 889 // exception pending => remove activation and forward to exception handler 890 891 __ testptr(rax, rax); // have we deoptimized? 892 __ jump_cc(Assembler::equal, 893 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 894 895 // the deopt blob expects exceptions in the special fields of 896 // JavaThread, so copy and clear pending exception. 897 898 // load and clear pending exception 899 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 900 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 901 902 // check that there is really a valid exception 903 __ verify_not_null_oop(rax); 904 905 // load throwing pc: this is the return address of the stub 906 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 907 908 #ifdef ASSERT 909 // check that fields in JavaThread for exception oop and issuing pc are empty 910 Label oop_empty; 911 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 912 __ jcc(Assembler::equal, oop_empty); 913 __ stop("exception oop must be empty"); 914 __ bind(oop_empty); 915 916 Label pc_empty; 917 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 918 __ jcc(Assembler::equal, pc_empty); 919 __ stop("exception pc must be empty"); 920 __ bind(pc_empty); 921 #endif 922 923 // store exception oop and throwing pc to JavaThread 924 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 925 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 926 927 restore_live_registers(sasm); 928 929 __ leave(); 930 __ addptr(rsp, BytesPerWord); // remove return address from stack 931 932 // Forward the exception directly to deopt blob. We can blow no 933 // registers and must leave throwing pc on the stack. A patch may 934 // have values live in registers so the entry point with the 935 // exception in tls. 936 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 937 938 __ bind(L); 939 } 940 941 942 // Runtime will return true if the nmethod has been deoptimized during 943 // the patching process. In that case we must do a deopt reexecute instead. 944 945 Label reexecuteEntry, cont; 946 947 __ testptr(rax, rax); // have we deoptimized? 948 __ jcc(Assembler::equal, cont); // no 949 950 // Will reexecute. Proper return address is already on the stack we just restore 951 // registers, pop all of our frame but the return address and jump to the deopt blob 952 restore_live_registers(sasm); 953 __ leave(); 954 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 955 956 __ bind(cont); 957 restore_live_registers(sasm); 958 __ leave(); 959 __ ret(0); 960 961 return oop_maps; 962 } 963 964 965 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 966 967 // for better readability 968 const bool must_gc_arguments = true; 969 const bool dont_gc_arguments = false; 970 971 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 972 bool save_fpu_registers = true; 973 974 // stub code & info for the different stubs 975 OopMapSet* oop_maps = NULL; 976 switch (id) { 977 case forward_exception_id: 978 { 979 oop_maps = generate_handle_exception(id, sasm); 980 __ leave(); 981 __ ret(0); 982 } 983 break; 984 985 case new_instance_id: 986 case fast_new_instance_id: 987 case fast_new_instance_init_check_id: 988 { 989 Register klass = rdx; // Incoming 990 Register obj = rax; // Result 991 992 if (id == new_instance_id) { 993 __ set_info("new_instance", dont_gc_arguments); 994 } else if (id == fast_new_instance_id) { 995 __ set_info("fast new_instance", dont_gc_arguments); 996 } else { 997 assert(id == fast_new_instance_init_check_id, "bad StubID"); 998 __ set_info("fast new_instance init check", dont_gc_arguments); 999 } 1000 1001 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 1002 UseTLAB && FastTLABRefill) { 1003 Label slow_path; 1004 Register obj_size = rcx; 1005 Register t1 = rbx; 1006 Register t2 = rsi; 1007 assert_different_registers(klass, obj, obj_size, t1, t2); 1008 1009 __ push(rdi); 1010 __ push(rbx); 1011 1012 if (id == fast_new_instance_init_check_id) { 1013 // make sure the klass is initialized 1014 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes()), instanceKlass::fully_initialized); 1015 __ jcc(Assembler::notEqual, slow_path); 1016 } 1017 1018 #ifdef ASSERT 1019 // assert object can be fast path allocated 1020 { 1021 Label ok, not_ok; 1022 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes())); 1023 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1024 __ jcc(Assembler::lessEqual, not_ok); 1025 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1026 __ jcc(Assembler::zero, ok); 1027 __ bind(not_ok); 1028 __ stop("assert(can be fast path allocated)"); 1029 __ should_not_reach_here(); 1030 __ bind(ok); 1031 } 1032 #endif // ASSERT 1033 1034 // if we got here then the TLAB allocation failed, so try 1035 // refilling the TLAB or allocating directly from eden. 1036 Label retry_tlab, try_eden; 1037 const Register thread = 1038 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi 1039 1040 __ bind(retry_tlab); 1041 1042 // get the instance size (size is postive so movl is fine for 64bit) 1043 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes())); 1044 1045 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 1046 1047 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1048 __ verify_oop(obj); 1049 __ pop(rbx); 1050 __ pop(rdi); 1051 __ ret(0); 1052 1053 __ bind(try_eden); 1054 // get the instance size (size is postive so movl is fine for 64bit) 1055 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes())); 1056 1057 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1058 __ incr_allocated_bytes(thread, obj_size, 0); 1059 1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1061 __ verify_oop(obj); 1062 __ pop(rbx); 1063 __ pop(rdi); 1064 __ ret(0); 1065 1066 __ bind(slow_path); 1067 __ pop(rbx); 1068 __ pop(rdi); 1069 } 1070 1071 __ enter(); 1072 OopMap* map = save_live_registers(sasm, 2); 1073 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1074 oop_maps = new OopMapSet(); 1075 oop_maps->add_gc_map(call_offset, map); 1076 restore_live_registers_except_rax(sasm); 1077 __ verify_oop(obj); 1078 __ leave(); 1079 __ ret(0); 1080 1081 // rax,: new instance 1082 } 1083 1084 break; 1085 1086 case counter_overflow_id: 1087 { 1088 Register bci = rax, method = rbx; 1089 __ enter(); 1090 OopMap* map = save_live_registers(sasm, 3); 1091 // Retrieve bci 1092 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1093 // And a pointer to the methodOop 1094 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1095 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1096 oop_maps = new OopMapSet(); 1097 oop_maps->add_gc_map(call_offset, map); 1098 restore_live_registers(sasm); 1099 __ leave(); 1100 __ ret(0); 1101 } 1102 break; 1103 1104 case new_type_array_id: 1105 case new_object_array_id: 1106 { 1107 Register length = rbx; // Incoming 1108 Register klass = rdx; // Incoming 1109 Register obj = rax; // Result 1110 1111 if (id == new_type_array_id) { 1112 __ set_info("new_type_array", dont_gc_arguments); 1113 } else { 1114 __ set_info("new_object_array", dont_gc_arguments); 1115 } 1116 1117 #ifdef ASSERT 1118 // assert object type is really an array of the proper kind 1119 { 1120 Label ok; 1121 Register t0 = obj; 1122 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes())); 1123 __ sarl(t0, Klass::_lh_array_tag_shift); 1124 int tag = ((id == new_type_array_id) 1125 ? Klass::_lh_array_tag_type_value 1126 : Klass::_lh_array_tag_obj_value); 1127 __ cmpl(t0, tag); 1128 __ jcc(Assembler::equal, ok); 1129 __ stop("assert(is an array klass)"); 1130 __ should_not_reach_here(); 1131 __ bind(ok); 1132 } 1133 #endif // ASSERT 1134 1135 if (UseTLAB && FastTLABRefill) { 1136 Register arr_size = rsi; 1137 Register t1 = rcx; // must be rcx for use as shift count 1138 Register t2 = rdi; 1139 Label slow_path; 1140 assert_different_registers(length, klass, obj, arr_size, t1, t2); 1141 1142 // check that array length is small enough for fast path. 1143 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); 1144 __ jcc(Assembler::above, slow_path); 1145 1146 // if we got here then the TLAB allocation failed, so try 1147 // refilling the TLAB or allocating directly from eden. 1148 Label retry_tlab, try_eden; 1149 const Register thread = 1150 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi 1151 1152 __ bind(retry_tlab); 1153 1154 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1155 // since size is positive movl does right thing on 64bit 1156 __ movl(t1, Address(klass, Klass::layout_helper_offset_in_bytes())); 1157 // since size is postive movl does right thing on 64bit 1158 __ movl(arr_size, length); 1159 assert(t1 == rcx, "fixed register usage"); 1160 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1161 __ shrptr(t1, Klass::_lh_header_size_shift); 1162 __ andptr(t1, Klass::_lh_header_size_mask); 1163 __ addptr(arr_size, t1); 1164 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1165 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1166 1167 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size 1168 1169 __ initialize_header(obj, klass, length, t1, t2); 1170 __ movb(t1, Address(klass, Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); 1171 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1172 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1173 __ andptr(t1, Klass::_lh_header_size_mask); 1174 __ subptr(arr_size, t1); // body length 1175 __ addptr(t1, obj); // body start 1176 __ initialize_body(t1, arr_size, 0, t2); 1177 __ verify_oop(obj); 1178 __ ret(0); 1179 1180 __ bind(try_eden); 1181 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1182 // since size is positive movl does right thing on 64bit 1183 __ movl(t1, Address(klass, Klass::layout_helper_offset_in_bytes())); 1184 // since size is postive movl does right thing on 64bit 1185 __ movl(arr_size, length); 1186 assert(t1 == rcx, "fixed register usage"); 1187 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1188 __ shrptr(t1, Klass::_lh_header_size_shift); 1189 __ andptr(t1, Klass::_lh_header_size_mask); 1190 __ addptr(arr_size, t1); 1191 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1192 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1193 1194 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1195 __ incr_allocated_bytes(thread, arr_size, 0); 1196 1197 __ initialize_header(obj, klass, length, t1, t2); 1198 __ movb(t1, Address(klass, Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); 1199 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1200 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1201 __ andptr(t1, Klass::_lh_header_size_mask); 1202 __ subptr(arr_size, t1); // body length 1203 __ addptr(t1, obj); // body start 1204 __ initialize_body(t1, arr_size, 0, t2); 1205 __ verify_oop(obj); 1206 __ ret(0); 1207 1208 __ bind(slow_path); 1209 } 1210 1211 __ enter(); 1212 OopMap* map = save_live_registers(sasm, 3); 1213 int call_offset; 1214 if (id == new_type_array_id) { 1215 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1216 } else { 1217 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1218 } 1219 1220 oop_maps = new OopMapSet(); 1221 oop_maps->add_gc_map(call_offset, map); 1222 restore_live_registers_except_rax(sasm); 1223 1224 __ verify_oop(obj); 1225 __ leave(); 1226 __ ret(0); 1227 1228 // rax,: new array 1229 } 1230 break; 1231 1232 case new_multi_array_id: 1233 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1234 // rax,: klass 1235 // rbx,: rank 1236 // rcx: address of 1st dimension 1237 OopMap* map = save_live_registers(sasm, 4); 1238 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1239 1240 oop_maps = new OopMapSet(); 1241 oop_maps->add_gc_map(call_offset, map); 1242 restore_live_registers_except_rax(sasm); 1243 1244 // rax,: new multi array 1245 __ verify_oop(rax); 1246 } 1247 break; 1248 1249 case register_finalizer_id: 1250 { 1251 __ set_info("register_finalizer", dont_gc_arguments); 1252 1253 // This is called via call_runtime so the arguments 1254 // will be place in C abi locations 1255 1256 #ifdef _LP64 1257 __ verify_oop(c_rarg0); 1258 __ mov(rax, c_rarg0); 1259 #else 1260 // The object is passed on the stack and we haven't pushed a 1261 // frame yet so it's one work away from top of stack. 1262 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1263 __ verify_oop(rax); 1264 #endif // _LP64 1265 1266 // load the klass and check the has finalizer flag 1267 Label register_finalizer; 1268 Register t = rsi; 1269 __ load_klass(t, rax); 1270 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes())); 1271 __ testl(t, JVM_ACC_HAS_FINALIZER); 1272 __ jcc(Assembler::notZero, register_finalizer); 1273 __ ret(0); 1274 1275 __ bind(register_finalizer); 1276 __ enter(); 1277 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1278 int call_offset = __ call_RT(noreg, noreg, 1279 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1280 oop_maps = new OopMapSet(); 1281 oop_maps->add_gc_map(call_offset, oop_map); 1282 1283 // Now restore all the live registers 1284 restore_live_registers(sasm); 1285 1286 __ leave(); 1287 __ ret(0); 1288 } 1289 break; 1290 1291 case throw_range_check_failed_id: 1292 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1293 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1294 } 1295 break; 1296 1297 case throw_index_exception_id: 1298 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1299 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1300 } 1301 break; 1302 1303 case throw_div0_exception_id: 1304 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1305 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1306 } 1307 break; 1308 1309 case throw_null_pointer_exception_id: 1310 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1311 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1312 } 1313 break; 1314 1315 case handle_exception_nofpu_id: 1316 case handle_exception_id: 1317 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1318 oop_maps = generate_handle_exception(id, sasm); 1319 } 1320 break; 1321 1322 case handle_exception_from_callee_id: 1323 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1324 oop_maps = generate_handle_exception(id, sasm); 1325 } 1326 break; 1327 1328 case unwind_exception_id: 1329 { __ set_info("unwind_exception", dont_gc_arguments); 1330 // note: no stubframe since we are about to leave the current 1331 // activation and we are calling a leaf VM function only. 1332 generate_unwind_exception(sasm); 1333 } 1334 break; 1335 1336 case throw_array_store_exception_id: 1337 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1338 // tos + 0: link 1339 // + 1: return address 1340 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1341 } 1342 break; 1343 1344 case throw_class_cast_exception_id: 1345 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1346 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1347 } 1348 break; 1349 1350 case throw_incompatible_class_change_error_id: 1351 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1352 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1353 } 1354 break; 1355 1356 case slow_subtype_check_id: 1357 { 1358 // Typical calling sequence: 1359 // __ push(klass_RInfo); // object klass or other subclass 1360 // __ push(sup_k_RInfo); // array element klass or other superclass 1361 // __ call(slow_subtype_check); 1362 // Note that the subclass is pushed first, and is therefore deepest. 1363 // Previous versions of this code reversed the names 'sub' and 'super'. 1364 // This was operationally harmless but made the code unreadable. 1365 enum layout { 1366 rax_off, SLOT2(raxH_off) 1367 rcx_off, SLOT2(rcxH_off) 1368 rsi_off, SLOT2(rsiH_off) 1369 rdi_off, SLOT2(rdiH_off) 1370 // saved_rbp_off, SLOT2(saved_rbpH_off) 1371 return_off, SLOT2(returnH_off) 1372 sup_k_off, SLOT2(sup_kH_off) 1373 klass_off, SLOT2(superH_off) 1374 framesize, 1375 result_off = klass_off // deepest argument is also the return value 1376 }; 1377 1378 __ set_info("slow_subtype_check", dont_gc_arguments); 1379 __ push(rdi); 1380 __ push(rsi); 1381 __ push(rcx); 1382 __ push(rax); 1383 1384 // This is called by pushing args and not with C abi 1385 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1386 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1387 1388 Label miss; 1389 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1390 1391 // fallthrough on success: 1392 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1393 __ pop(rax); 1394 __ pop(rcx); 1395 __ pop(rsi); 1396 __ pop(rdi); 1397 __ ret(0); 1398 1399 __ bind(miss); 1400 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1401 __ pop(rax); 1402 __ pop(rcx); 1403 __ pop(rsi); 1404 __ pop(rdi); 1405 __ ret(0); 1406 } 1407 break; 1408 1409 case monitorenter_nofpu_id: 1410 save_fpu_registers = false; 1411 // fall through 1412 case monitorenter_id: 1413 { 1414 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1415 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1416 1417 // Called with store_parameter and not C abi 1418 1419 f.load_argument(1, rax); // rax,: object 1420 f.load_argument(0, rbx); // rbx,: lock address 1421 1422 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1423 1424 oop_maps = new OopMapSet(); 1425 oop_maps->add_gc_map(call_offset, map); 1426 restore_live_registers(sasm, save_fpu_registers); 1427 } 1428 break; 1429 1430 case monitorexit_nofpu_id: 1431 save_fpu_registers = false; 1432 // fall through 1433 case monitorexit_id: 1434 { 1435 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1436 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1437 1438 // Called with store_parameter and not C abi 1439 1440 f.load_argument(0, rax); // rax,: lock address 1441 1442 // note: really a leaf routine but must setup last java sp 1443 // => use call_RT for now (speed can be improved by 1444 // doing last java sp setup manually) 1445 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1446 1447 oop_maps = new OopMapSet(); 1448 oop_maps->add_gc_map(call_offset, map); 1449 restore_live_registers(sasm, save_fpu_registers); 1450 } 1451 break; 1452 1453 case deoptimize_id: 1454 { 1455 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1456 const int num_rt_args = 1; // thread 1457 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1458 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)); 1459 oop_maps = new OopMapSet(); 1460 oop_maps->add_gc_map(call_offset, oop_map); 1461 restore_live_registers(sasm); 1462 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1463 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1464 __ leave(); 1465 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1466 } 1467 break; 1468 1469 case access_field_patching_id: 1470 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1471 // we should set up register map 1472 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1473 } 1474 break; 1475 1476 case load_klass_patching_id: 1477 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1478 // we should set up register map 1479 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1480 } 1481 break; 1482 1483 case dtrace_object_alloc_id: 1484 { // rax,: object 1485 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1486 // we can't gc here so skip the oopmap but make sure that all 1487 // the live registers get saved. 1488 save_live_registers(sasm, 1); 1489 1490 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1491 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1492 NOT_LP64(__ pop(rax)); 1493 1494 restore_live_registers(sasm); 1495 } 1496 break; 1497 1498 case fpu2long_stub_id: 1499 { 1500 // rax, and rdx are destroyed, but should be free since the result is returned there 1501 // preserve rsi,ecx 1502 __ push(rsi); 1503 __ push(rcx); 1504 LP64_ONLY(__ push(rdx);) 1505 1506 // check for NaN 1507 Label return0, do_return, return_min_jlong, do_convert; 1508 1509 Address value_high_word(rsp, wordSize + 4); 1510 Address value_low_word(rsp, wordSize); 1511 Address result_high_word(rsp, 3*wordSize + 4); 1512 Address result_low_word(rsp, 3*wordSize); 1513 1514 __ subptr(rsp, 32); // more than enough on 32bit 1515 __ fst_d(value_low_word); 1516 __ movl(rax, value_high_word); 1517 __ andl(rax, 0x7ff00000); 1518 __ cmpl(rax, 0x7ff00000); 1519 __ jcc(Assembler::notEqual, do_convert); 1520 __ movl(rax, value_high_word); 1521 __ andl(rax, 0xfffff); 1522 __ orl(rax, value_low_word); 1523 __ jcc(Assembler::notZero, return0); 1524 1525 __ bind(do_convert); 1526 __ fnstcw(Address(rsp, 0)); 1527 __ movzwl(rax, Address(rsp, 0)); 1528 __ orl(rax, 0xc00); 1529 __ movw(Address(rsp, 2), rax); 1530 __ fldcw(Address(rsp, 2)); 1531 __ fwait(); 1532 __ fistp_d(result_low_word); 1533 __ fldcw(Address(rsp, 0)); 1534 __ fwait(); 1535 // This gets the entire long in rax on 64bit 1536 __ movptr(rax, result_low_word); 1537 // testing of high bits 1538 __ movl(rdx, result_high_word); 1539 __ mov(rcx, rax); 1540 // What the heck is the point of the next instruction??? 1541 __ xorl(rcx, 0x0); 1542 __ movl(rsi, 0x80000000); 1543 __ xorl(rsi, rdx); 1544 __ orl(rcx, rsi); 1545 __ jcc(Assembler::notEqual, do_return); 1546 __ fldz(); 1547 __ fcomp_d(value_low_word); 1548 __ fnstsw_ax(); 1549 #ifdef _LP64 1550 __ testl(rax, 0x4100); // ZF & CF == 0 1551 __ jcc(Assembler::equal, return_min_jlong); 1552 #else 1553 __ sahf(); 1554 __ jcc(Assembler::above, return_min_jlong); 1555 #endif // _LP64 1556 // return max_jlong 1557 #ifndef _LP64 1558 __ movl(rdx, 0x7fffffff); 1559 __ movl(rax, 0xffffffff); 1560 #else 1561 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1562 #endif // _LP64 1563 __ jmp(do_return); 1564 1565 __ bind(return_min_jlong); 1566 #ifndef _LP64 1567 __ movl(rdx, 0x80000000); 1568 __ xorl(rax, rax); 1569 #else 1570 __ mov64(rax, CONST64(0x8000000000000000)); 1571 #endif // _LP64 1572 __ jmp(do_return); 1573 1574 __ bind(return0); 1575 __ fpop(); 1576 #ifndef _LP64 1577 __ xorptr(rdx,rdx); 1578 __ xorptr(rax,rax); 1579 #else 1580 __ xorptr(rax, rax); 1581 #endif // _LP64 1582 1583 __ bind(do_return); 1584 __ addptr(rsp, 32); 1585 LP64_ONLY(__ pop(rdx);) 1586 __ pop(rcx); 1587 __ pop(rsi); 1588 __ ret(0); 1589 } 1590 break; 1591 1592 #ifndef SERIALGC 1593 case g1_pre_barrier_slow_id: 1594 { 1595 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1596 // arg0 : previous value of memory 1597 1598 BarrierSet* bs = Universe::heap()->barrier_set(); 1599 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 1600 __ movptr(rax, (int)id); 1601 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1602 __ should_not_reach_here(); 1603 break; 1604 } 1605 __ push(rax); 1606 __ push(rdx); 1607 1608 const Register pre_val = rax; 1609 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1610 const Register tmp = rdx; 1611 1612 NOT_LP64(__ get_thread(thread);) 1613 1614 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1615 PtrQueue::byte_offset_of_active())); 1616 1617 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1618 PtrQueue::byte_offset_of_index())); 1619 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1620 PtrQueue::byte_offset_of_buf())); 1621 1622 1623 Label done; 1624 Label runtime; 1625 1626 // Can we store original value in the thread's buffer? 1627 1628 #ifdef _LP64 1629 __ movslq(tmp, queue_index); 1630 __ cmpq(tmp, 0); 1631 #else 1632 __ cmpl(queue_index, 0); 1633 #endif 1634 __ jcc(Assembler::equal, runtime); 1635 #ifdef _LP64 1636 __ subq(tmp, wordSize); 1637 __ movl(queue_index, tmp); 1638 __ addq(tmp, buffer); 1639 #else 1640 __ subl(queue_index, wordSize); 1641 __ movl(tmp, buffer); 1642 __ addl(tmp, queue_index); 1643 #endif 1644 1645 // prev_val (rax) 1646 f.load_argument(0, pre_val); 1647 __ movptr(Address(tmp, 0), pre_val); 1648 __ jmp(done); 1649 1650 __ bind(runtime); 1651 __ push(rcx); 1652 #ifdef _LP64 1653 __ push(r8); 1654 __ push(r9); 1655 __ push(r10); 1656 __ push(r11); 1657 # ifndef _WIN64 1658 __ push(rdi); 1659 __ push(rsi); 1660 # endif 1661 #endif 1662 // load the pre-value 1663 f.load_argument(0, rcx); 1664 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); 1665 #ifdef _LP64 1666 # ifndef _WIN64 1667 __ pop(rsi); 1668 __ pop(rdi); 1669 # endif 1670 __ pop(r11); 1671 __ pop(r10); 1672 __ pop(r9); 1673 __ pop(r8); 1674 #endif 1675 __ pop(rcx); 1676 __ bind(done); 1677 1678 __ pop(rdx); 1679 __ pop(rax); 1680 } 1681 break; 1682 1683 case g1_post_barrier_slow_id: 1684 { 1685 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1686 1687 1688 // arg0: store_address 1689 Address store_addr(rbp, 2*BytesPerWord); 1690 1691 BarrierSet* bs = Universe::heap()->barrier_set(); 1692 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1693 Label done; 1694 Label runtime; 1695 1696 // At this point we know new_value is non-NULL and the new_value crosses regsion. 1697 // Must check to see if card is already dirty 1698 1699 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1700 1701 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1702 PtrQueue::byte_offset_of_index())); 1703 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1704 PtrQueue::byte_offset_of_buf())); 1705 1706 __ push(rax); 1707 __ push(rcx); 1708 1709 NOT_LP64(__ get_thread(thread);) 1710 ExternalAddress cardtable((address)ct->byte_map_base); 1711 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1712 1713 const Register card_addr = rcx; 1714 #ifdef _LP64 1715 const Register tmp = rscratch1; 1716 f.load_argument(0, card_addr); 1717 __ shrq(card_addr, CardTableModRefBS::card_shift); 1718 __ lea(tmp, cardtable); 1719 // get the address of the card 1720 __ addq(card_addr, tmp); 1721 #else 1722 const Register card_index = rcx; 1723 f.load_argument(0, card_index); 1724 __ shrl(card_index, CardTableModRefBS::card_shift); 1725 1726 Address index(noreg, card_index, Address::times_1); 1727 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); 1728 #endif 1729 1730 __ cmpb(Address(card_addr, 0), 0); 1731 __ jcc(Assembler::equal, done); 1732 1733 // storing region crossing non-NULL, card is clean. 1734 // dirty card and log. 1735 1736 __ movb(Address(card_addr, 0), 0); 1737 1738 __ cmpl(queue_index, 0); 1739 __ jcc(Assembler::equal, runtime); 1740 __ subl(queue_index, wordSize); 1741 1742 const Register buffer_addr = rbx; 1743 __ push(rbx); 1744 1745 __ movptr(buffer_addr, buffer); 1746 1747 #ifdef _LP64 1748 __ movslq(rscratch1, queue_index); 1749 __ addptr(buffer_addr, rscratch1); 1750 #else 1751 __ addptr(buffer_addr, queue_index); 1752 #endif 1753 __ movptr(Address(buffer_addr, 0), card_addr); 1754 1755 __ pop(rbx); 1756 __ jmp(done); 1757 1758 __ bind(runtime); 1759 __ push(rdx); 1760 #ifdef _LP64 1761 __ push(r8); 1762 __ push(r9); 1763 __ push(r10); 1764 __ push(r11); 1765 # ifndef _WIN64 1766 __ push(rdi); 1767 __ push(rsi); 1768 # endif 1769 #endif 1770 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1771 #ifdef _LP64 1772 # ifndef _WIN64 1773 __ pop(rsi); 1774 __ pop(rdi); 1775 # endif 1776 __ pop(r11); 1777 __ pop(r10); 1778 __ pop(r9); 1779 __ pop(r8); 1780 #endif 1781 __ pop(rdx); 1782 __ bind(done); 1783 1784 __ pop(rcx); 1785 __ pop(rax); 1786 1787 } 1788 break; 1789 #endif // !SERIALGC 1790 1791 default: 1792 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1793 __ movptr(rax, (int)id); 1794 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1795 __ should_not_reach_here(); 1796 } 1797 break; 1798 } 1799 return oop_maps; 1800 } 1801 1802 #undef __ 1803 1804 const char *Runtime1::pd_name_for_address(address entry) { 1805 return "<unknown function>"; 1806 }