1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "register_x86.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/signature.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/macros.hpp" 43 #include "vmreg_x86.inline.hpp" 44 #if INCLUDE_ALL_GCS 45 #include "gc/g1/g1BarrierSet.hpp" 46 #include "gc/g1/g1CardTable.hpp" 47 #include "gc/shenandoah/shenandoahHeap.hpp" 48 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 49 #include "gc/g1/g1ThreadLocalData.hpp" 50 #endif 51 52 53 // Implementation of StubAssembler 54 55 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 56 // setup registers 57 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 58 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 59 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 60 assert(args_size >= 0, "illegal args_size"); 61 bool align_stack = false; 62 #ifdef _LP64 63 // At a method handle call, the stack may not be properly aligned 64 // when returning with an exception. 65 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 66 #endif 67 68 #ifdef _LP64 69 mov(c_rarg0, thread); 70 set_num_rt_args(0); // Nothing on stack 71 #else 72 set_num_rt_args(1 + args_size); 73 74 // push java thread (becomes first argument of C function) 75 get_thread(thread); 76 push(thread); 77 #endif // _LP64 78 79 int call_offset; 80 if (!align_stack) { 81 set_last_Java_frame(thread, noreg, rbp, NULL); 82 } else { 83 address the_pc = pc(); 84 call_offset = offset(); 85 set_last_Java_frame(thread, noreg, rbp, the_pc); 86 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 87 } 88 89 // do the call 90 call(RuntimeAddress(entry)); 91 if (!align_stack) { 92 call_offset = offset(); 93 } 94 // verify callee-saved register 95 #ifdef ASSERT 96 guarantee(thread != rax, "change this code"); 97 push(rax); 98 { Label L; 99 get_thread(rax); 100 cmpptr(thread, rax); 101 jcc(Assembler::equal, L); 102 int3(); 103 stop("StubAssembler::call_RT: rdi not callee saved?"); 104 bind(L); 105 } 106 pop(rax); 107 #endif 108 reset_last_Java_frame(thread, true); 109 110 // discard thread and arguments 111 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 112 113 // check for pending exceptions 114 { Label L; 115 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 116 jcc(Assembler::equal, L); 117 // exception pending => remove activation and forward to exception handler 118 movptr(rax, Address(thread, Thread::pending_exception_offset())); 119 // make sure that the vm_results are cleared 120 if (oop_result1->is_valid()) { 121 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 122 } 123 if (metadata_result->is_valid()) { 124 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 125 } 126 if (frame_size() == no_frame_size) { 127 leave(); 128 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 129 } else if (_stub_id == Runtime1::forward_exception_id) { 130 should_not_reach_here(); 131 } else { 132 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 133 } 134 bind(L); 135 } 136 // get oop results if there are any and reset the values in the thread 137 if (oop_result1->is_valid()) { 138 get_vm_result(oop_result1, thread); 139 } 140 if (metadata_result->is_valid()) { 141 get_vm_result_2(metadata_result, thread); 142 } 143 return call_offset; 144 } 145 146 147 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 148 #ifdef _LP64 149 mov(c_rarg1, arg1); 150 #else 151 push(arg1); 152 #endif // _LP64 153 return call_RT(oop_result1, metadata_result, entry, 1); 154 } 155 156 157 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 158 #ifdef _LP64 159 if (c_rarg1 == arg2) { 160 if (c_rarg2 == arg1) { 161 xchgq(arg1, arg2); 162 } else { 163 mov(c_rarg2, arg2); 164 mov(c_rarg1, arg1); 165 } 166 } else { 167 mov(c_rarg1, arg1); 168 mov(c_rarg2, arg2); 169 } 170 #else 171 push(arg2); 172 push(arg1); 173 #endif // _LP64 174 return call_RT(oop_result1, metadata_result, entry, 2); 175 } 176 177 178 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 179 #ifdef _LP64 180 // if there is any conflict use the stack 181 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 182 arg2 == c_rarg1 || arg1 == c_rarg3 || 183 arg3 == c_rarg1 || arg1 == c_rarg2) { 184 push(arg3); 185 push(arg2); 186 push(arg1); 187 pop(c_rarg1); 188 pop(c_rarg2); 189 pop(c_rarg3); 190 } else { 191 mov(c_rarg1, arg1); 192 mov(c_rarg2, arg2); 193 mov(c_rarg3, arg3); 194 } 195 #else 196 push(arg3); 197 push(arg2); 198 push(arg1); 199 #endif // _LP64 200 return call_RT(oop_result1, metadata_result, entry, 3); 201 } 202 203 204 // Implementation of StubFrame 205 206 class StubFrame: public StackObj { 207 private: 208 StubAssembler* _sasm; 209 210 public: 211 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 212 void load_argument(int offset_in_words, Register reg); 213 214 ~StubFrame(); 215 }; 216 217 218 #define __ _sasm-> 219 220 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 221 _sasm = sasm; 222 __ set_info(name, must_gc_arguments); 223 __ enter(); 224 } 225 226 // load parameters that were stored with LIR_Assembler::store_parameter 227 // Note: offsets for store_parameter and load_argument must match 228 void StubFrame::load_argument(int offset_in_words, Register reg) { 229 // rbp, + 0: link 230 // + 1: return address 231 // + 2: argument with offset 0 232 // + 3: argument with offset 1 233 // + 4: ... 234 235 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 236 } 237 238 239 StubFrame::~StubFrame() { 240 __ leave(); 241 __ ret(0); 242 } 243 244 #undef __ 245 246 247 // Implementation of Runtime1 248 249 #define __ sasm-> 250 251 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 252 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 253 254 // Stack layout for saving/restoring all the registers needed during a runtime 255 // call (this includes deoptimization) 256 // Note: note that users of this frame may well have arguments to some runtime 257 // while these values are on the stack. These positions neglect those arguments 258 // but the code in save_live_registers will take the argument count into 259 // account. 260 // 261 #ifdef _LP64 262 #define SLOT2(x) x, 263 #define SLOT_PER_WORD 2 264 #else 265 #define SLOT2(x) 266 #define SLOT_PER_WORD 1 267 #endif // _LP64 268 269 enum reg_save_layout { 270 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 271 // happen and will assert if the stack size we create is misaligned 272 #ifdef _LP64 273 align_dummy_0, align_dummy_1, 274 #endif // _LP64 275 #ifdef _WIN64 276 // Windows always allocates space for it's argument registers (see 277 // frame::arg_reg_save_area_bytes). 278 arg_reg_save_1, arg_reg_save_1H, // 0, 4 279 arg_reg_save_2, arg_reg_save_2H, // 8, 12 280 arg_reg_save_3, arg_reg_save_3H, // 16, 20 281 arg_reg_save_4, arg_reg_save_4H, // 24, 28 282 #endif // _WIN64 283 xmm_regs_as_doubles_off, // 32 284 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 285 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 286 // fpu_state_end_off is exclusive 287 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 288 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 289 extra_space_offset, // 360 290 #ifdef _LP64 291 r15_off = extra_space_offset, r15H_off, // 360, 364 292 r14_off, r14H_off, // 368, 372 293 r13_off, r13H_off, // 376, 380 294 r12_off, r12H_off, // 384, 388 295 r11_off, r11H_off, // 392, 396 296 r10_off, r10H_off, // 400, 404 297 r9_off, r9H_off, // 408, 412 298 r8_off, r8H_off, // 416, 420 299 rdi_off, rdiH_off, // 424, 428 300 #else 301 rdi_off = extra_space_offset, 302 #endif // _LP64 303 rsi_off, SLOT2(rsiH_off) // 432, 436 304 rbp_off, SLOT2(rbpH_off) // 440, 444 305 rsp_off, SLOT2(rspH_off) // 448, 452 306 rbx_off, SLOT2(rbxH_off) // 456, 460 307 rdx_off, SLOT2(rdxH_off) // 464, 468 308 rcx_off, SLOT2(rcxH_off) // 472, 476 309 rax_off, SLOT2(raxH_off) // 480, 484 310 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 311 return_off, SLOT2(returnH_off) // 496, 500 312 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 313 }; 314 315 316 317 // Save off registers which might be killed by calls into the runtime. 318 // Tries to smart of about FP registers. In particular we separate 319 // saving and describing the FPU registers for deoptimization since we 320 // have to save the FPU registers twice if we describe them and on P4 321 // saving FPU registers which don't contain anything appears 322 // expensive. The deopt blob is the only thing which needs to 323 // describe FPU registers. In all other cases it should be sufficient 324 // to simply save their current value. 325 326 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 327 bool save_fpu_registers = true) { 328 329 // In 64bit all the args are in regs so there are no additional stack slots 330 LP64_ONLY(num_rt_args = 0); 331 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 332 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 333 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 334 335 // record saved value locations in an OopMap 336 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 337 OopMap* map = new OopMap(frame_size_in_slots, 0); 338 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 339 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 340 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 341 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 342 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 343 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 344 #ifdef _LP64 345 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 346 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 347 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 348 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 349 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 350 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 351 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 352 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 353 354 // This is stupid but needed. 355 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 356 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 357 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 358 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 359 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 360 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 361 362 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 363 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 364 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 365 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 366 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 367 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 368 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 369 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 370 #endif // _LP64 371 372 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 373 #ifdef _LP64 374 if (UseAVX < 3) { 375 xmm_bypass_limit = xmm_bypass_limit / 2; 376 } 377 #endif 378 379 if (save_fpu_registers) { 380 if (UseSSE < 2) { 381 int fpu_off = float_regs_as_doubles_off; 382 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 383 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 384 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 385 // %%% This is really a waste but we'll keep things as they were for now 386 if (true) { 387 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 388 } 389 fpu_off += 2; 390 } 391 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 392 } 393 394 if (UseSSE >= 2) { 395 int xmm_off = xmm_regs_as_doubles_off; 396 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 397 if (n < xmm_bypass_limit) { 398 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 399 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 400 // %%% This is really a waste but we'll keep things as they were for now 401 if (true) { 402 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 403 } 404 } 405 xmm_off += 2; 406 } 407 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 408 409 } else if (UseSSE == 1) { 410 int xmm_off = xmm_regs_as_doubles_off; 411 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 412 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 413 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 414 xmm_off += 2; 415 } 416 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 417 } 418 } 419 420 return map; 421 } 422 423 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 424 bool save_fpu_registers = true) { 425 __ block_comment("save_live_registers"); 426 427 __ pusha(); // integer registers 428 429 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 430 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 431 432 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 433 434 #ifdef ASSERT 435 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 436 #endif 437 438 if (save_fpu_registers) { 439 if (UseSSE < 2) { 440 // save FPU stack 441 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 442 __ fwait(); 443 444 #ifdef ASSERT 445 Label ok; 446 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 447 __ jccb(Assembler::equal, ok); 448 __ stop("corrupted control word detected"); 449 __ bind(ok); 450 #endif 451 452 // Reset the control word to guard against exceptions being unmasked 453 // since fstp_d can cause FPU stack underflow exceptions. Write it 454 // into the on stack copy and then reload that to make sure that the 455 // current and future values are correct. 456 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 457 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 458 459 // Save the FPU registers in de-opt-able form 460 int offset = 0; 461 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 462 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 463 offset += 8; 464 } 465 } 466 467 if (UseSSE >= 2) { 468 // save XMM registers 469 // XMM registers can contain float or double values, but this is not known here, 470 // so always save them as doubles. 471 // note that float values are _not_ converted automatically, so for float values 472 // the second word contains only garbage data. 473 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 474 int offset = 0; 475 #ifdef _LP64 476 if (UseAVX < 3) { 477 xmm_bypass_limit = xmm_bypass_limit / 2; 478 } 479 #endif 480 for (int n = 0; n < xmm_bypass_limit; n++) { 481 XMMRegister xmm_name = as_XMMRegister(n); 482 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 483 offset += 8; 484 } 485 } else if (UseSSE == 1) { 486 // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) 487 int offset = 0; 488 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 489 XMMRegister xmm_name = as_XMMRegister(n); 490 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 491 offset += 8; 492 } 493 } 494 } 495 496 // FPU stack must be empty now 497 __ verify_FPU(0, "save_live_registers"); 498 499 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 500 } 501 502 503 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { 504 if (restore_fpu_registers) { 505 if (UseSSE >= 2) { 506 // restore XMM registers 507 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 508 #ifdef _LP64 509 if (UseAVX < 3) { 510 xmm_bypass_limit = xmm_bypass_limit / 2; 511 } 512 #endif 513 int offset = 0; 514 for (int n = 0; n < xmm_bypass_limit; n++) { 515 XMMRegister xmm_name = as_XMMRegister(n); 516 __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 517 offset += 8; 518 } 519 } else if (UseSSE == 1) { 520 // restore XMM registers(num MMX == num fpu) 521 int offset = 0; 522 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 523 XMMRegister xmm_name = as_XMMRegister(n); 524 __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 525 offset += 8; 526 } 527 } 528 529 if (UseSSE < 2) { 530 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 531 } else { 532 // check that FPU stack is really empty 533 __ verify_FPU(0, "restore_live_registers"); 534 } 535 536 } else { 537 // check that FPU stack is really empty 538 __ verify_FPU(0, "restore_live_registers"); 539 } 540 541 #ifdef ASSERT 542 { 543 Label ok; 544 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 545 __ jcc(Assembler::equal, ok); 546 __ stop("bad offsets in frame"); 547 __ bind(ok); 548 } 549 #endif // ASSERT 550 551 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 552 } 553 554 555 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 556 __ block_comment("restore_live_registers"); 557 558 restore_fpu(sasm, restore_fpu_registers); 559 __ popa(); 560 } 561 562 563 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 564 __ block_comment("restore_live_registers_except_rax"); 565 566 restore_fpu(sasm, restore_fpu_registers); 567 568 #ifdef _LP64 569 __ movptr(r15, Address(rsp, 0)); 570 __ movptr(r14, Address(rsp, wordSize)); 571 __ movptr(r13, Address(rsp, 2 * wordSize)); 572 __ movptr(r12, Address(rsp, 3 * wordSize)); 573 __ movptr(r11, Address(rsp, 4 * wordSize)); 574 __ movptr(r10, Address(rsp, 5 * wordSize)); 575 __ movptr(r9, Address(rsp, 6 * wordSize)); 576 __ movptr(r8, Address(rsp, 7 * wordSize)); 577 __ movptr(rdi, Address(rsp, 8 * wordSize)); 578 __ movptr(rsi, Address(rsp, 9 * wordSize)); 579 __ movptr(rbp, Address(rsp, 10 * wordSize)); 580 // skip rsp 581 __ movptr(rbx, Address(rsp, 12 * wordSize)); 582 __ movptr(rdx, Address(rsp, 13 * wordSize)); 583 __ movptr(rcx, Address(rsp, 14 * wordSize)); 584 585 __ addptr(rsp, 16 * wordSize); 586 #else 587 588 __ pop(rdi); 589 __ pop(rsi); 590 __ pop(rbp); 591 __ pop(rbx); // skip this value 592 __ pop(rbx); 593 __ pop(rdx); 594 __ pop(rcx); 595 __ addptr(rsp, BytesPerWord); 596 #endif // _LP64 597 } 598 599 600 void Runtime1::initialize_pd() { 601 // nothing to do 602 } 603 604 605 // target: the entry point of the method that creates and posts the exception oop 606 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 607 608 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 609 // preserve all registers 610 int num_rt_args = has_argument ? 2 : 1; 611 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 612 613 // now all registers are saved and can be used freely 614 // verify that no old value is used accidentally 615 __ invalidate_registers(true, true, true, true, true, true); 616 617 // registers used by this stub 618 const Register temp_reg = rbx; 619 620 // load argument for exception that is passed as an argument into the stub 621 if (has_argument) { 622 #ifdef _LP64 623 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 624 #else 625 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 626 __ push(temp_reg); 627 #endif // _LP64 628 } 629 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 630 631 OopMapSet* oop_maps = new OopMapSet(); 632 oop_maps->add_gc_map(call_offset, oop_map); 633 634 __ stop("should not reach here"); 635 636 return oop_maps; 637 } 638 639 640 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 641 __ block_comment("generate_handle_exception"); 642 643 // incoming parameters 644 const Register exception_oop = rax; 645 const Register exception_pc = rdx; 646 // other registers used in this stub 647 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 648 649 // Save registers, if required. 650 OopMapSet* oop_maps = new OopMapSet(); 651 OopMap* oop_map = NULL; 652 switch (id) { 653 case forward_exception_id: 654 // We're handling an exception in the context of a compiled frame. 655 // The registers have been saved in the standard places. Perform 656 // an exception lookup in the caller and dispatch to the handler 657 // if found. Otherwise unwind and dispatch to the callers 658 // exception handler. 659 oop_map = generate_oop_map(sasm, 1 /*thread*/); 660 661 // load and clear pending exception oop into RAX 662 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 663 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 664 665 // load issuing PC (the return address for this stub) into rdx 666 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 667 668 // make sure that the vm_results are cleared (may be unnecessary) 669 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 670 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 671 break; 672 case handle_exception_nofpu_id: 673 case handle_exception_id: 674 // At this point all registers MAY be live. 675 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 676 break; 677 case handle_exception_from_callee_id: { 678 // At this point all registers except exception oop (RAX) and 679 // exception pc (RDX) are dead. 680 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 681 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 682 sasm->set_frame_size(frame_size); 683 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 684 break; 685 } 686 default: ShouldNotReachHere(); 687 } 688 689 #ifdef TIERED 690 // C2 can leave the fpu stack dirty 691 if (UseSSE < 2) { 692 __ empty_FPU_stack(); 693 } 694 #endif // TIERED 695 696 // verify that only rax, and rdx is valid at this time 697 __ invalidate_registers(false, true, true, false, true, true); 698 // verify that rax, contains a valid exception 699 __ verify_not_null_oop(exception_oop); 700 701 // load address of JavaThread object for thread-local data 702 NOT_LP64(__ get_thread(thread);) 703 704 #ifdef ASSERT 705 // check that fields in JavaThread for exception oop and issuing pc are 706 // empty before writing to them 707 Label oop_empty; 708 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 709 __ jcc(Assembler::equal, oop_empty); 710 __ stop("exception oop already set"); 711 __ bind(oop_empty); 712 713 Label pc_empty; 714 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 715 __ jcc(Assembler::equal, pc_empty); 716 __ stop("exception pc already set"); 717 __ bind(pc_empty); 718 #endif 719 720 // save exception oop and issuing pc into JavaThread 721 // (exception handler will load it from here) 722 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 723 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 724 725 // patch throwing pc into return address (has bci & oop map) 726 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 727 728 // compute the exception handler. 729 // the exception oop and the throwing pc are read from the fields in JavaThread 730 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 731 oop_maps->add_gc_map(call_offset, oop_map); 732 733 // rax: handler address 734 // will be the deopt blob if nmethod was deoptimized while we looked up 735 // handler regardless of whether handler existed in the nmethod. 736 737 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 738 __ invalidate_registers(false, true, true, true, true, true); 739 740 // patch the return address, this stub will directly return to the exception handler 741 __ movptr(Address(rbp, 1*BytesPerWord), rax); 742 743 switch (id) { 744 case forward_exception_id: 745 case handle_exception_nofpu_id: 746 case handle_exception_id: 747 // Restore the registers that were saved at the beginning. 748 restore_live_registers(sasm, id != handle_exception_nofpu_id); 749 break; 750 case handle_exception_from_callee_id: 751 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 752 // since we do a leave anyway. 753 754 // Pop the return address. 755 __ leave(); 756 __ pop(rcx); 757 __ jmp(rcx); // jump to exception handler 758 break; 759 default: ShouldNotReachHere(); 760 } 761 762 return oop_maps; 763 } 764 765 766 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 767 // incoming parameters 768 const Register exception_oop = rax; 769 // callee-saved copy of exception_oop during runtime call 770 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 771 // other registers used in this stub 772 const Register exception_pc = rdx; 773 const Register handler_addr = rbx; 774 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 775 776 // verify that only rax, is valid at this time 777 __ invalidate_registers(false, true, true, true, true, true); 778 779 #ifdef ASSERT 780 // check that fields in JavaThread for exception oop and issuing pc are empty 781 NOT_LP64(__ get_thread(thread);) 782 Label oop_empty; 783 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 784 __ jcc(Assembler::equal, oop_empty); 785 __ stop("exception oop must be empty"); 786 __ bind(oop_empty); 787 788 Label pc_empty; 789 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 790 __ jcc(Assembler::equal, pc_empty); 791 __ stop("exception pc must be empty"); 792 __ bind(pc_empty); 793 #endif 794 795 // clear the FPU stack in case any FPU results are left behind 796 __ empty_FPU_stack(); 797 798 // save exception_oop in callee-saved register to preserve it during runtime calls 799 __ verify_not_null_oop(exception_oop); 800 __ movptr(exception_oop_callee_saved, exception_oop); 801 802 NOT_LP64(__ get_thread(thread);) 803 // Get return address (is on top of stack after leave). 804 __ movptr(exception_pc, Address(rsp, 0)); 805 806 // search the exception handler address of the caller (using the return address) 807 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 808 // rax: exception handler address of the caller 809 810 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 811 __ invalidate_registers(false, true, true, true, false, true); 812 813 // move result of call into correct register 814 __ movptr(handler_addr, rax); 815 816 // Restore exception oop to RAX (required convention of exception handler). 817 __ movptr(exception_oop, exception_oop_callee_saved); 818 819 // verify that there is really a valid exception in rax 820 __ verify_not_null_oop(exception_oop); 821 822 // get throwing pc (= return address). 823 // rdx has been destroyed by the call, so it must be set again 824 // the pop is also necessary to simulate the effect of a ret(0) 825 __ pop(exception_pc); 826 827 // continue at exception handler (return address removed) 828 // note: do *not* remove arguments when unwinding the 829 // activation since the caller assumes having 830 // all arguments on the stack when entering the 831 // runtime to determine the exception handler 832 // (GC happens at call site with arguments!) 833 // rax: exception oop 834 // rdx: throwing pc 835 // rbx: exception handler 836 __ jmp(handler_addr); 837 } 838 839 840 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 841 // use the maximum number of runtime-arguments here because it is difficult to 842 // distinguish each RT-Call. 843 // Note: This number affects also the RT-Call in generate_handle_exception because 844 // the oop-map is shared for all calls. 845 const int num_rt_args = 2; // thread + dummy 846 847 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 848 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 849 850 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 851 852 #ifdef _LP64 853 const Register thread = r15_thread; 854 // No need to worry about dummy 855 __ mov(c_rarg0, thread); 856 #else 857 __ push(rax); // push dummy 858 859 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 860 // push java thread (becomes first argument of C function) 861 __ get_thread(thread); 862 __ push(thread); 863 #endif // _LP64 864 __ set_last_Java_frame(thread, noreg, rbp, NULL); 865 // do the call 866 __ call(RuntimeAddress(target)); 867 OopMapSet* oop_maps = new OopMapSet(); 868 oop_maps->add_gc_map(__ offset(), oop_map); 869 // verify callee-saved register 870 #ifdef ASSERT 871 guarantee(thread != rax, "change this code"); 872 __ push(rax); 873 { Label L; 874 __ get_thread(rax); 875 __ cmpptr(thread, rax); 876 __ jcc(Assembler::equal, L); 877 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 878 __ bind(L); 879 } 880 __ pop(rax); 881 #endif 882 __ reset_last_Java_frame(thread, true); 883 #ifndef _LP64 884 __ pop(rcx); // discard thread arg 885 __ pop(rcx); // discard dummy 886 #endif // _LP64 887 888 // check for pending exceptions 889 { Label L; 890 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 891 __ jcc(Assembler::equal, L); 892 // exception pending => remove activation and forward to exception handler 893 894 __ testptr(rax, rax); // have we deoptimized? 895 __ jump_cc(Assembler::equal, 896 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 897 898 // the deopt blob expects exceptions in the special fields of 899 // JavaThread, so copy and clear pending exception. 900 901 // load and clear pending exception 902 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 903 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 904 905 // check that there is really a valid exception 906 __ verify_not_null_oop(rax); 907 908 // load throwing pc: this is the return address of the stub 909 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 910 911 #ifdef ASSERT 912 // check that fields in JavaThread for exception oop and issuing pc are empty 913 Label oop_empty; 914 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 915 __ jcc(Assembler::equal, oop_empty); 916 __ stop("exception oop must be empty"); 917 __ bind(oop_empty); 918 919 Label pc_empty; 920 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 921 __ jcc(Assembler::equal, pc_empty); 922 __ stop("exception pc must be empty"); 923 __ bind(pc_empty); 924 #endif 925 926 // store exception oop and throwing pc to JavaThread 927 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 928 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 929 930 restore_live_registers(sasm); 931 932 __ leave(); 933 __ addptr(rsp, BytesPerWord); // remove return address from stack 934 935 // Forward the exception directly to deopt blob. We can blow no 936 // registers and must leave throwing pc on the stack. A patch may 937 // have values live in registers so the entry point with the 938 // exception in tls. 939 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 940 941 __ bind(L); 942 } 943 944 945 // Runtime will return true if the nmethod has been deoptimized during 946 // the patching process. In that case we must do a deopt reexecute instead. 947 948 Label reexecuteEntry, cont; 949 950 __ testptr(rax, rax); // have we deoptimized? 951 __ jcc(Assembler::equal, cont); // no 952 953 // Will reexecute. Proper return address is already on the stack we just restore 954 // registers, pop all of our frame but the return address and jump to the deopt blob 955 restore_live_registers(sasm); 956 __ leave(); 957 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 958 959 __ bind(cont); 960 restore_live_registers(sasm); 961 __ leave(); 962 __ ret(0); 963 964 return oop_maps; 965 } 966 967 968 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 969 970 // for better readability 971 const bool must_gc_arguments = true; 972 const bool dont_gc_arguments = false; 973 974 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 975 bool save_fpu_registers = true; 976 977 // stub code & info for the different stubs 978 OopMapSet* oop_maps = NULL; 979 switch (id) { 980 case forward_exception_id: 981 { 982 oop_maps = generate_handle_exception(id, sasm); 983 __ leave(); 984 __ ret(0); 985 } 986 break; 987 988 case new_instance_id: 989 case fast_new_instance_id: 990 case fast_new_instance_init_check_id: 991 { 992 Register klass = rdx; // Incoming 993 Register obj = rax; // Result 994 995 if (id == new_instance_id) { 996 __ set_info("new_instance", dont_gc_arguments); 997 } else if (id == fast_new_instance_id) { 998 __ set_info("fast new_instance", dont_gc_arguments); 999 } else { 1000 assert(id == fast_new_instance_init_check_id, "bad StubID"); 1001 __ set_info("fast new_instance init check", dont_gc_arguments); 1002 } 1003 1004 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB 1005 && Universe::heap()->supports_inline_contig_alloc()) { 1006 Label slow_path; 1007 Register obj_size = rcx; 1008 Register t1 = rbx; 1009 Register t2 = rsi; 1010 assert_different_registers(klass, obj, obj_size, t1, t2); 1011 1012 __ push(rdi); 1013 __ push(rbx); 1014 1015 if (id == fast_new_instance_init_check_id) { 1016 // make sure the klass is initialized 1017 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1018 __ jcc(Assembler::notEqual, slow_path); 1019 } 1020 1021 #ifdef ASSERT 1022 // assert object can be fast path allocated 1023 { 1024 Label ok, not_ok; 1025 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1026 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1027 __ jcc(Assembler::lessEqual, not_ok); 1028 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1029 __ jcc(Assembler::zero, ok); 1030 __ bind(not_ok); 1031 __ stop("assert(can be fast path allocated)"); 1032 __ should_not_reach_here(); 1033 __ bind(ok); 1034 } 1035 #endif // ASSERT 1036 1037 // if we got here then the TLAB allocation failed, so try 1038 // refilling the TLAB or allocating directly from eden. 1039 Label retry_tlab, try_eden; 1040 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 1041 NOT_LP64(__ get_thread(thread)); 1042 1043 __ bind(try_eden); 1044 // get the instance size (size is postive so movl is fine for 64bit) 1045 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1046 1047 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1048 __ incr_allocated_bytes(thread, obj_size, 0); 1049 1050 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 1051 __ verify_oop(obj); 1052 __ pop(rbx); 1053 __ pop(rdi); 1054 __ ret(0); 1055 1056 __ bind(slow_path); 1057 __ pop(rbx); 1058 __ pop(rdi); 1059 } 1060 1061 __ enter(); 1062 OopMap* map = save_live_registers(sasm, 2); 1063 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1064 oop_maps = new OopMapSet(); 1065 oop_maps->add_gc_map(call_offset, map); 1066 restore_live_registers_except_rax(sasm); 1067 __ verify_oop(obj); 1068 __ leave(); 1069 __ ret(0); 1070 1071 // rax,: new instance 1072 } 1073 1074 break; 1075 1076 case counter_overflow_id: 1077 { 1078 Register bci = rax, method = rbx; 1079 __ enter(); 1080 OopMap* map = save_live_registers(sasm, 3); 1081 // Retrieve bci 1082 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1083 // And a pointer to the Method* 1084 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1085 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1086 oop_maps = new OopMapSet(); 1087 oop_maps->add_gc_map(call_offset, map); 1088 restore_live_registers(sasm); 1089 __ leave(); 1090 __ ret(0); 1091 } 1092 break; 1093 1094 case new_type_array_id: 1095 case new_object_array_id: 1096 { 1097 Register length = rbx; // Incoming 1098 Register klass = rdx; // Incoming 1099 Register obj = rax; // Result 1100 1101 if (id == new_type_array_id) { 1102 __ set_info("new_type_array", dont_gc_arguments); 1103 } else { 1104 __ set_info("new_object_array", dont_gc_arguments); 1105 } 1106 1107 #ifdef ASSERT 1108 // assert object type is really an array of the proper kind 1109 { 1110 Label ok; 1111 Register t0 = obj; 1112 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1113 __ sarl(t0, Klass::_lh_array_tag_shift); 1114 int tag = ((id == new_type_array_id) 1115 ? Klass::_lh_array_tag_type_value 1116 : Klass::_lh_array_tag_obj_value); 1117 __ cmpl(t0, tag); 1118 __ jcc(Assembler::equal, ok); 1119 __ stop("assert(is an array klass)"); 1120 __ should_not_reach_here(); 1121 __ bind(ok); 1122 } 1123 #endif // ASSERT 1124 1125 // If we got here, the TLAB allocation failed, so try allocating from 1126 // eden if inline contiguous allocations are supported. 1127 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 1128 Register arr_size = rsi; 1129 Register t1 = rcx; // must be rcx for use as shift count 1130 Register t2 = rdi; 1131 Label slow_path; 1132 1133 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1134 // since size is positive movl does right thing on 64bit 1135 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1136 // since size is postive movl does right thing on 64bit 1137 __ movl(arr_size, length); 1138 assert(t1 == rcx, "fixed register usage"); 1139 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1140 __ shrptr(t1, Klass::_lh_header_size_shift); 1141 __ andptr(t1, Klass::_lh_header_size_mask); 1142 __ addptr(arr_size, t1); 1143 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1144 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1145 1146 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1147 1148 // Using t2 for non 64-bit. 1149 const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread); 1150 NOT_LP64(__ get_thread(thread)); 1151 __ incr_allocated_bytes(thread, arr_size, 0); 1152 1153 __ initialize_header(obj, klass, length, t1, t2); 1154 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1155 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1156 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1157 __ andptr(t1, Klass::_lh_header_size_mask); 1158 __ subptr(arr_size, t1); // body length 1159 __ addptr(t1, obj); // body start 1160 __ initialize_body(t1, arr_size, 0, t2); 1161 __ verify_oop(obj); 1162 __ ret(0); 1163 1164 __ bind(slow_path); 1165 } 1166 1167 __ enter(); 1168 OopMap* map = save_live_registers(sasm, 3); 1169 int call_offset; 1170 if (id == new_type_array_id) { 1171 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1172 } else { 1173 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1174 } 1175 1176 oop_maps = new OopMapSet(); 1177 oop_maps->add_gc_map(call_offset, map); 1178 restore_live_registers_except_rax(sasm); 1179 1180 __ verify_oop(obj); 1181 __ leave(); 1182 __ ret(0); 1183 1184 // rax,: new array 1185 } 1186 break; 1187 1188 case new_multi_array_id: 1189 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1190 // rax,: klass 1191 // rbx,: rank 1192 // rcx: address of 1st dimension 1193 OopMap* map = save_live_registers(sasm, 4); 1194 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1195 1196 oop_maps = new OopMapSet(); 1197 oop_maps->add_gc_map(call_offset, map); 1198 restore_live_registers_except_rax(sasm); 1199 1200 // rax,: new multi array 1201 __ verify_oop(rax); 1202 } 1203 break; 1204 1205 case register_finalizer_id: 1206 { 1207 __ set_info("register_finalizer", dont_gc_arguments); 1208 1209 // This is called via call_runtime so the arguments 1210 // will be place in C abi locations 1211 1212 #ifdef _LP64 1213 __ verify_oop(c_rarg0); 1214 __ mov(rax, c_rarg0); 1215 #else 1216 // The object is passed on the stack and we haven't pushed a 1217 // frame yet so it's one work away from top of stack. 1218 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1219 __ verify_oop(rax); 1220 #endif // _LP64 1221 1222 // load the klass and check the has finalizer flag 1223 Label register_finalizer; 1224 Register t = rsi; 1225 __ load_klass(t, rax); 1226 __ movl(t, Address(t, Klass::access_flags_offset())); 1227 __ testl(t, JVM_ACC_HAS_FINALIZER); 1228 __ jcc(Assembler::notZero, register_finalizer); 1229 __ ret(0); 1230 1231 __ bind(register_finalizer); 1232 __ enter(); 1233 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1234 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1235 oop_maps = new OopMapSet(); 1236 oop_maps->add_gc_map(call_offset, oop_map); 1237 1238 // Now restore all the live registers 1239 restore_live_registers(sasm); 1240 1241 __ leave(); 1242 __ ret(0); 1243 } 1244 break; 1245 1246 case throw_range_check_failed_id: 1247 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1248 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1249 } 1250 break; 1251 1252 case throw_index_exception_id: 1253 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1254 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1255 } 1256 break; 1257 1258 case throw_div0_exception_id: 1259 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1260 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1261 } 1262 break; 1263 1264 case throw_null_pointer_exception_id: 1265 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1266 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1267 } 1268 break; 1269 1270 case handle_exception_nofpu_id: 1271 case handle_exception_id: 1272 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1273 oop_maps = generate_handle_exception(id, sasm); 1274 } 1275 break; 1276 1277 case handle_exception_from_callee_id: 1278 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1279 oop_maps = generate_handle_exception(id, sasm); 1280 } 1281 break; 1282 1283 case unwind_exception_id: 1284 { __ set_info("unwind_exception", dont_gc_arguments); 1285 // note: no stubframe since we are about to leave the current 1286 // activation and we are calling a leaf VM function only. 1287 generate_unwind_exception(sasm); 1288 } 1289 break; 1290 1291 case throw_array_store_exception_id: 1292 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1293 // tos + 0: link 1294 // + 1: return address 1295 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1296 } 1297 break; 1298 1299 case throw_class_cast_exception_id: 1300 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1301 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1302 } 1303 break; 1304 1305 case throw_incompatible_class_change_error_id: 1306 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1307 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1308 } 1309 break; 1310 1311 case slow_subtype_check_id: 1312 { 1313 // Typical calling sequence: 1314 // __ push(klass_RInfo); // object klass or other subclass 1315 // __ push(sup_k_RInfo); // array element klass or other superclass 1316 // __ call(slow_subtype_check); 1317 // Note that the subclass is pushed first, and is therefore deepest. 1318 // Previous versions of this code reversed the names 'sub' and 'super'. 1319 // This was operationally harmless but made the code unreadable. 1320 enum layout { 1321 rax_off, SLOT2(raxH_off) 1322 rcx_off, SLOT2(rcxH_off) 1323 rsi_off, SLOT2(rsiH_off) 1324 rdi_off, SLOT2(rdiH_off) 1325 // saved_rbp_off, SLOT2(saved_rbpH_off) 1326 return_off, SLOT2(returnH_off) 1327 sup_k_off, SLOT2(sup_kH_off) 1328 klass_off, SLOT2(superH_off) 1329 framesize, 1330 result_off = klass_off // deepest argument is also the return value 1331 }; 1332 1333 __ set_info("slow_subtype_check", dont_gc_arguments); 1334 __ push(rdi); 1335 __ push(rsi); 1336 __ push(rcx); 1337 __ push(rax); 1338 1339 // This is called by pushing args and not with C abi 1340 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1341 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1342 1343 Label miss; 1344 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1345 1346 // fallthrough on success: 1347 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1348 __ pop(rax); 1349 __ pop(rcx); 1350 __ pop(rsi); 1351 __ pop(rdi); 1352 __ ret(0); 1353 1354 __ bind(miss); 1355 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1356 __ pop(rax); 1357 __ pop(rcx); 1358 __ pop(rsi); 1359 __ pop(rdi); 1360 __ ret(0); 1361 } 1362 break; 1363 1364 case monitorenter_nofpu_id: 1365 save_fpu_registers = false; 1366 // fall through 1367 case monitorenter_id: 1368 { 1369 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1370 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1371 1372 // Called with store_parameter and not C abi 1373 1374 f.load_argument(1, rax); // rax,: object 1375 f.load_argument(0, rbx); // rbx,: lock address 1376 1377 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1378 1379 oop_maps = new OopMapSet(); 1380 oop_maps->add_gc_map(call_offset, map); 1381 restore_live_registers(sasm, save_fpu_registers); 1382 } 1383 break; 1384 1385 case monitorexit_nofpu_id: 1386 save_fpu_registers = false; 1387 // fall through 1388 case monitorexit_id: 1389 { 1390 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1391 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1392 1393 // Called with store_parameter and not C abi 1394 1395 f.load_argument(0, rax); // rax,: lock address 1396 1397 // note: really a leaf routine but must setup last java sp 1398 // => use call_RT for now (speed can be improved by 1399 // doing last java sp setup manually) 1400 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1401 1402 oop_maps = new OopMapSet(); 1403 oop_maps->add_gc_map(call_offset, map); 1404 restore_live_registers(sasm, save_fpu_registers); 1405 } 1406 break; 1407 1408 case deoptimize_id: 1409 { 1410 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1411 const int num_rt_args = 2; // thread, trap_request 1412 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1413 f.load_argument(0, rax); 1414 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1415 oop_maps = new OopMapSet(); 1416 oop_maps->add_gc_map(call_offset, oop_map); 1417 restore_live_registers(sasm); 1418 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1419 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1420 __ leave(); 1421 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1422 } 1423 break; 1424 1425 case access_field_patching_id: 1426 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1427 // we should set up register map 1428 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1429 } 1430 break; 1431 1432 case load_klass_patching_id: 1433 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1434 // we should set up register map 1435 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1436 } 1437 break; 1438 1439 case load_mirror_patching_id: 1440 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1441 // we should set up register map 1442 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1443 } 1444 break; 1445 1446 case load_appendix_patching_id: 1447 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1448 // we should set up register map 1449 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1450 } 1451 break; 1452 1453 case dtrace_object_alloc_id: 1454 { // rax,: object 1455 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1456 // we can't gc here so skip the oopmap but make sure that all 1457 // the live registers get saved. 1458 save_live_registers(sasm, 1); 1459 1460 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1461 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1462 NOT_LP64(__ pop(rax)); 1463 1464 restore_live_registers(sasm); 1465 } 1466 break; 1467 1468 case fpu2long_stub_id: 1469 { 1470 // rax, and rdx are destroyed, but should be free since the result is returned there 1471 // preserve rsi,ecx 1472 __ push(rsi); 1473 __ push(rcx); 1474 LP64_ONLY(__ push(rdx);) 1475 1476 // check for NaN 1477 Label return0, do_return, return_min_jlong, do_convert; 1478 1479 Address value_high_word(rsp, wordSize + 4); 1480 Address value_low_word(rsp, wordSize); 1481 Address result_high_word(rsp, 3*wordSize + 4); 1482 Address result_low_word(rsp, 3*wordSize); 1483 1484 __ subptr(rsp, 32); // more than enough on 32bit 1485 __ fst_d(value_low_word); 1486 __ movl(rax, value_high_word); 1487 __ andl(rax, 0x7ff00000); 1488 __ cmpl(rax, 0x7ff00000); 1489 __ jcc(Assembler::notEqual, do_convert); 1490 __ movl(rax, value_high_word); 1491 __ andl(rax, 0xfffff); 1492 __ orl(rax, value_low_word); 1493 __ jcc(Assembler::notZero, return0); 1494 1495 __ bind(do_convert); 1496 __ fnstcw(Address(rsp, 0)); 1497 __ movzwl(rax, Address(rsp, 0)); 1498 __ orl(rax, 0xc00); 1499 __ movw(Address(rsp, 2), rax); 1500 __ fldcw(Address(rsp, 2)); 1501 __ fwait(); 1502 __ fistp_d(result_low_word); 1503 __ fldcw(Address(rsp, 0)); 1504 __ fwait(); 1505 // This gets the entire long in rax on 64bit 1506 __ movptr(rax, result_low_word); 1507 // testing of high bits 1508 __ movl(rdx, result_high_word); 1509 __ mov(rcx, rax); 1510 // What the heck is the point of the next instruction??? 1511 __ xorl(rcx, 0x0); 1512 __ movl(rsi, 0x80000000); 1513 __ xorl(rsi, rdx); 1514 __ orl(rcx, rsi); 1515 __ jcc(Assembler::notEqual, do_return); 1516 __ fldz(); 1517 __ fcomp_d(value_low_word); 1518 __ fnstsw_ax(); 1519 #ifdef _LP64 1520 __ testl(rax, 0x4100); // ZF & CF == 0 1521 __ jcc(Assembler::equal, return_min_jlong); 1522 #else 1523 __ sahf(); 1524 __ jcc(Assembler::above, return_min_jlong); 1525 #endif // _LP64 1526 // return max_jlong 1527 #ifndef _LP64 1528 __ movl(rdx, 0x7fffffff); 1529 __ movl(rax, 0xffffffff); 1530 #else 1531 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1532 #endif // _LP64 1533 __ jmp(do_return); 1534 1535 __ bind(return_min_jlong); 1536 #ifndef _LP64 1537 __ movl(rdx, 0x80000000); 1538 __ xorl(rax, rax); 1539 #else 1540 __ mov64(rax, UCONST64(0x8000000000000000)); 1541 #endif // _LP64 1542 __ jmp(do_return); 1543 1544 __ bind(return0); 1545 __ fpop(); 1546 #ifndef _LP64 1547 __ xorptr(rdx,rdx); 1548 __ xorptr(rax,rax); 1549 #else 1550 __ xorptr(rax, rax); 1551 #endif // _LP64 1552 1553 __ bind(do_return); 1554 __ addptr(rsp, 32); 1555 LP64_ONLY(__ pop(rdx);) 1556 __ pop(rcx); 1557 __ pop(rsi); 1558 __ ret(0); 1559 } 1560 break; 1561 1562 #if INCLUDE_ALL_GCS 1563 case g1_pre_barrier_slow_id: 1564 { 1565 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1566 // arg0 : previous value of memory 1567 1568 BarrierSet* bs = BarrierSet::barrier_set(); 1569 if (bs->kind() != BarrierSet::G1BarrierSet && bs->kind() != BarrierSet::Shenandoah) { 1570 __ movptr(rax, (int)id); 1571 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1572 __ should_not_reach_here(); 1573 break; 1574 } 1575 1576 if (bs->kind() == BarrierSet::Shenandoah && !(ShenandoahSATBBarrier || ShenandoahStoreValEnqueueBarrier)) { 1577 break; 1578 } 1579 1580 __ push(rax); 1581 __ push(rdx); 1582 1583 const Register pre_val = rax; 1584 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1585 const Register tmp = rdx; 1586 1587 NOT_LP64(__ get_thread(thread);) 1588 1589 Address queue_active(thread, in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset() 1590 : ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 1591 Address queue_index(thread, in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_index_offset() 1592 : ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 1593 Address buffer(thread, in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset() 1594 : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 1595 1596 Label done; 1597 Label runtime; 1598 1599 if (UseShenandoahGC) { 1600 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 1601 __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); 1602 __ jcc(Assembler::zero, done); 1603 } else { 1604 assert(UseG1GC, "Should be"); 1605 // Is marking still active? 1606 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 1607 __ cmpl(queue_active, 0); 1608 } else { 1609 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 1610 __ cmpb(queue_active, 0); 1611 } 1612 __ jcc(Assembler::equal, done); 1613 } 1614 1615 // Can we store original value in the thread's buffer? 1616 1617 __ movptr(tmp, queue_index); 1618 __ testptr(tmp, tmp); 1619 __ jcc(Assembler::zero, runtime); 1620 __ subptr(tmp, wordSize); 1621 __ movptr(queue_index, tmp); 1622 __ addptr(tmp, buffer); 1623 1624 // prev_val (rax) 1625 f.load_argument(0, pre_val); 1626 __ movptr(Address(tmp, 0), pre_val); 1627 __ jmp(done); 1628 1629 __ bind(runtime); 1630 1631 save_live_registers(sasm, 3); 1632 1633 // load the pre-value 1634 f.load_argument(0, rcx); 1635 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); 1636 1637 restore_live_registers(sasm); 1638 1639 __ bind(done); 1640 1641 __ pop(rdx); 1642 __ pop(rax); 1643 } 1644 break; 1645 1646 case g1_post_barrier_slow_id: 1647 { 1648 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1649 1650 BarrierSet* bs = BarrierSet::barrier_set(); 1651 if (bs->kind() != BarrierSet::G1BarrierSet) { 1652 __ movptr(rax, (int)id); 1653 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1654 __ should_not_reach_here(); 1655 break; 1656 } 1657 1658 // arg0: store_address 1659 Address store_addr(rbp, 2*BytesPerWord); 1660 1661 Label done; 1662 Label enqueued; 1663 Label runtime; 1664 1665 // At this point we know new_value is non-NULL and the new_value crosses regions. 1666 // Must check to see if card is already dirty 1667 1668 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1669 1670 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 1671 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 1672 1673 __ push(rax); 1674 __ push(rcx); 1675 1676 const Register cardtable = rax; 1677 const Register card_addr = rcx; 1678 1679 f.load_argument(0, card_addr); 1680 __ shrptr(card_addr, CardTable::card_shift); 1681 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 1682 // a valid address and therefore is not properly handled by the relocation code. 1683 __ movptr(cardtable, ci_card_table_address_as<intptr_t>()); 1684 __ addptr(card_addr, cardtable); 1685 1686 NOT_LP64(__ get_thread(thread);) 1687 1688 __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val()); 1689 __ jcc(Assembler::equal, done); 1690 1691 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 1692 __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); 1693 __ jcc(Assembler::equal, done); 1694 1695 // storing region crossing non-NULL, card is clean. 1696 // dirty card and log. 1697 1698 __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); 1699 1700 const Register tmp = rdx; 1701 __ push(rdx); 1702 1703 __ movptr(tmp, queue_index); 1704 __ testptr(tmp, tmp); 1705 __ jcc(Assembler::zero, runtime); 1706 __ subptr(tmp, wordSize); 1707 __ movptr(queue_index, tmp); 1708 __ addptr(tmp, buffer); 1709 __ movptr(Address(tmp, 0), card_addr); 1710 __ jmp(enqueued); 1711 1712 __ bind(runtime); 1713 1714 save_live_registers(sasm, 3); 1715 1716 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1717 1718 restore_live_registers(sasm); 1719 1720 __ bind(enqueued); 1721 __ pop(rdx); 1722 1723 __ bind(done); 1724 __ pop(rcx); 1725 __ pop(rax); 1726 } 1727 break; 1728 #endif // INCLUDE_ALL_GCS 1729 1730 case predicate_failed_trap_id: 1731 { 1732 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1733 1734 OopMap* map = save_live_registers(sasm, 1); 1735 1736 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1737 oop_maps = new OopMapSet(); 1738 oop_maps->add_gc_map(call_offset, map); 1739 restore_live_registers(sasm); 1740 __ leave(); 1741 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1742 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1743 1744 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1745 } 1746 break; 1747 1748 default: 1749 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1750 __ movptr(rax, (int)id); 1751 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1752 __ should_not_reach_here(); 1753 } 1754 break; 1755 } 1756 return oop_maps; 1757 } 1758 1759 #undef __ 1760 1761 const char *Runtime1::pd_name_for_address(address entry) { 1762 return "<unknown function>"; 1763 }