1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Defs.hpp" 27 #include "c1/c1_MacroAssembler.hpp" 28 #include "c1/c1_Runtime1.hpp" 29 #include "ci/ciUtilities.hpp" 30 #include "gc/shared/cardTable.hpp" 31 #include "gc/shared/cardTableBarrierSet.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "nativeInst_sparc.hpp" 34 #include "oops/compiledICHolder.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/signature.hpp" 39 #include "runtime/vframeArray.hpp" 40 #include "utilities/macros.hpp" 41 #include "utilities/align.hpp" 42 #include "vmreg_sparc.inline.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc/g1/g1BarrierSet.hpp" 45 #include "gc/g1/g1CardTable.hpp" 46 #endif 47 48 // Implementation of StubAssembler 49 50 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { 51 // for sparc changing the number of arguments doesn't change 52 // anything about the frame size so we'll always lie and claim that 53 // we are only passing 1 argument. 54 set_num_rt_args(1); 55 56 assert_not_delayed(); 57 // bang stack before going to runtime 58 set(-os::vm_page_size() + STACK_BIAS, G3_scratch); 59 st(G0, SP, G3_scratch); 60 61 // debugging support 62 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 63 64 set_last_Java_frame(SP, noreg); 65 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 66 save_thread(L7_thread_cache); 67 // do the call 68 call(entry_point, relocInfo::runtime_call_type); 69 if (!VerifyThread) { 70 delayed()->mov(G2_thread, O0); // pass thread as first argument 71 } else { 72 delayed()->nop(); // (thread already passed) 73 } 74 int call_offset = offset(); // offset of return address 75 restore_thread(L7_thread_cache); 76 reset_last_Java_frame(); 77 78 // check for pending exceptions 79 { Label L; 80 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 81 ld_ptr(exception_addr, Gtemp); 82 br_null_short(Gtemp, pt, L); 83 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 84 st_ptr(G0, vm_result_addr); 85 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 86 st_ptr(G0, vm_result_addr_2); 87 88 if (frame_size() == no_frame_size) { 89 // we use O7 linkage so that forward_exception_entry has the issuing PC 90 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 91 delayed()->restore(); 92 } else if (_stub_id == Runtime1::forward_exception_id) { 93 should_not_reach_here(); 94 } else { 95 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); 96 jump_to(exc, G4); 97 delayed()->nop(); 98 } 99 bind(L); 100 } 101 102 // get oop result if there is one and reset the value in the thread 103 if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread 104 get_vm_result (oop_result1); 105 } else { 106 // be a little paranoid and clear the result 107 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 108 st_ptr(G0, vm_result_addr); 109 } 110 111 // get second result if there is one and reset the value in the thread 112 if (metadata_result->is_valid()) { 113 get_vm_result_2 (metadata_result); 114 } else { 115 // be a little paranoid and clear the result 116 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 117 st_ptr(G0, vm_result_addr_2); 118 } 119 120 return call_offset; 121 } 122 123 124 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 125 // O0 is reserved for the thread 126 mov(arg1, O1); 127 return call_RT(oop_result1, metadata_result, entry, 1); 128 } 129 130 131 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 132 // O0 is reserved for the thread 133 mov(arg1, O1); 134 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 135 return call_RT(oop_result1, metadata_result, entry, 2); 136 } 137 138 139 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 140 // O0 is reserved for the thread 141 mov(arg1, O1); 142 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 143 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument"); 144 return call_RT(oop_result1, metadata_result, entry, 3); 145 } 146 147 148 // Implementation of Runtime1 149 150 #define __ sasm-> 151 152 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 153 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 154 static int reg_save_size_in_words; 155 static int frame_size_in_bytes = -1; 156 157 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 158 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 159 "mismatch in calculation"); 160 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 161 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 162 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 163 164 int i; 165 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 166 Register r = as_Register(i); 167 if (r == G1 || r == G3 || r == G4 || r == G5) { 168 int sp_offset = cpu_reg_save_offsets[i]; 169 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 170 r->as_VMReg()); 171 } 172 } 173 174 if (save_fpu_registers) { 175 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 176 FloatRegister r = as_FloatRegister(i); 177 int sp_offset = fpu_reg_save_offsets[i]; 178 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 179 r->as_VMReg()); 180 } 181 } 182 return oop_map; 183 } 184 185 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { 186 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 187 "mismatch in calculation"); 188 __ save_frame_c1(frame_size_in_bytes); 189 190 // Record volatile registers as callee-save values in an OopMap so their save locations will be 191 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 192 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 193 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 194 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 195 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)) 196 197 int i; 198 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 199 Register r = as_Register(i); 200 if (r == G1 || r == G3 || r == G4 || r == G5) { 201 int sp_offset = cpu_reg_save_offsets[i]; 202 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 203 } 204 } 205 206 if (save_fpu_registers) { 207 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 208 FloatRegister r = as_FloatRegister(i); 209 int sp_offset = fpu_reg_save_offsets[i]; 210 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 211 } 212 } 213 214 return generate_oop_map(sasm, save_fpu_registers); 215 } 216 217 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 218 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 219 Register r = as_Register(i); 220 if (r == G1 || r == G3 || r == G4 || r == G5) { 221 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 222 } 223 } 224 225 if (restore_fpu_registers) { 226 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 227 FloatRegister r = as_FloatRegister(i); 228 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 229 } 230 } 231 } 232 233 234 void Runtime1::initialize_pd() { 235 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines 236 // 237 // A stub routine will have a frame that is at least large enough to hold 238 // a register window save area (obviously) and the volatile g registers 239 // and floating registers. A user of save_live_registers can have a frame 240 // that has more scratch area in it (although typically they will use L-regs). 241 // in that case the frame will look like this (stack growing down) 242 // 243 // FP -> | | 244 // | scratch mem | 245 // | " " | 246 // -------------- 247 // | float regs | 248 // | " " | 249 // --------------- 250 // | G regs | 251 // | " " | 252 // --------------- 253 // | abi reg. | 254 // | window save | 255 // | area | 256 // SP -> --------------- 257 // 258 int i; 259 int sp_offset = align_up((int)frame::register_save_words, 2); // start doubleword aligned 260 261 // only G int registers are saved explicitly; others are found in register windows 262 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 263 Register r = as_Register(i); 264 if (r == G1 || r == G3 || r == G4 || r == G5) { 265 cpu_reg_save_offsets[i] = sp_offset; 266 sp_offset++; 267 } 268 } 269 270 // all float registers are saved explicitly 271 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); 272 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 273 fpu_reg_save_offsets[i] = sp_offset; 274 sp_offset++; 275 } 276 reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset; 277 // this should match assembler::total_frame_size_in_bytes, which 278 // isn't callable from this context. It's checked by an assert when 279 // it's used though. 280 frame_size_in_bytes = align_up(sp_offset * wordSize, 8); 281 } 282 283 284 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 285 // make a frame and preserve the caller's caller-save registers 286 OopMap* oop_map = save_live_registers(sasm); 287 int call_offset; 288 if (!has_argument) { 289 call_offset = __ call_RT(noreg, noreg, target); 290 } else { 291 call_offset = __ call_RT(noreg, noreg, target, G4); 292 } 293 OopMapSet* oop_maps = new OopMapSet(); 294 oop_maps->add_gc_map(call_offset, oop_map); 295 296 __ should_not_reach_here(); 297 return oop_maps; 298 } 299 300 301 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 302 Register arg1, Register arg2, Register arg3) { 303 // make a frame and preserve the caller's caller-save registers 304 OopMap* oop_map = save_live_registers(sasm); 305 306 int call_offset; 307 if (arg1 == noreg) { 308 call_offset = __ call_RT(result, noreg, target); 309 } else if (arg2 == noreg) { 310 call_offset = __ call_RT(result, noreg, target, arg1); 311 } else if (arg3 == noreg) { 312 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 313 } else { 314 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 315 } 316 OopMapSet* oop_maps = NULL; 317 318 oop_maps = new OopMapSet(); 319 oop_maps->add_gc_map(call_offset, oop_map); 320 restore_live_registers(sasm); 321 322 __ ret(); 323 __ delayed()->restore(); 324 325 return oop_maps; 326 } 327 328 329 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 330 // make a frame and preserve the caller's caller-save registers 331 OopMap* oop_map = save_live_registers(sasm); 332 333 // call the runtime patching routine, returns non-zero if nmethod got deopted. 334 int call_offset = __ call_RT(noreg, noreg, target); 335 OopMapSet* oop_maps = new OopMapSet(); 336 oop_maps->add_gc_map(call_offset, oop_map); 337 338 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the 339 // deoptimization handler entry that will cause re-execution of the current bytecode 340 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 341 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 342 343 Label no_deopt; 344 __ br_null_short(O0, Assembler::pt, no_deopt); 345 346 // return to the deoptimization handler entry for unpacking and rexecute 347 // if we simply returned the we'd deopt as if any call we patched had just 348 // returned. 349 350 restore_live_registers(sasm); 351 352 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 353 __ jump_to(dest, O0); 354 __ delayed()->restore(); 355 356 __ bind(no_deopt); 357 restore_live_registers(sasm); 358 __ ret(); 359 __ delayed()->restore(); 360 361 return oop_maps; 362 } 363 364 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 365 366 OopMapSet* oop_maps = NULL; 367 // for better readability 368 const bool must_gc_arguments = true; 369 const bool dont_gc_arguments = false; 370 371 // stub code & info for the different stubs 372 switch (id) { 373 case forward_exception_id: 374 { 375 oop_maps = generate_handle_exception(id, sasm); 376 } 377 break; 378 379 case new_instance_id: 380 case fast_new_instance_id: 381 case fast_new_instance_init_check_id: 382 { 383 Register G5_klass = G5; // Incoming 384 Register O0_obj = O0; // Outgoing 385 386 if (id == new_instance_id) { 387 __ set_info("new_instance", dont_gc_arguments); 388 } else if (id == fast_new_instance_id) { 389 __ set_info("fast new_instance", dont_gc_arguments); 390 } else { 391 assert(id == fast_new_instance_init_check_id, "bad StubID"); 392 __ set_info("fast new_instance init check", dont_gc_arguments); 393 } 394 395 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 396 UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 397 Label slow_path; 398 Register G1_obj_size = G1; 399 Register G3_t1 = G3; 400 Register G4_t2 = G4; 401 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2); 402 403 // Push a frame since we may do dtrace notification for the 404 // allocation which requires calling out and we don't want 405 // to stomp the real return address. 406 __ save_frame(0); 407 408 if (id == fast_new_instance_init_check_id) { 409 // make sure the klass is initialized 410 __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1); 411 __ cmp(G3_t1, InstanceKlass::fully_initialized); 412 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 413 __ delayed()->nop(); 414 } 415 #ifdef ASSERT 416 // assert object can be fast path allocated 417 { 418 Label ok, not_ok; 419 __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); 420 // make sure it's an instance (LH > 0) 421 __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok); 422 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); 423 __ br(Assembler::zero, false, Assembler::pn, ok); 424 __ delayed()->nop(); 425 __ bind(not_ok); 426 __ stop("assert(can be fast path allocated)"); 427 __ should_not_reach_here(); 428 __ bind(ok); 429 } 430 #endif // ASSERT 431 432 // If we got here then the TLAB allocation failed, so try allocating directly from eden. 433 // get the instance size 434 __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); 435 __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); 436 __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2); 437 438 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2, /* is_tlab_allocated */ false); 439 __ verify_oop(O0_obj); 440 __ mov(O0, I0); 441 __ ret(); 442 __ delayed()->restore(); 443 444 __ bind(slow_path); 445 446 // pop this frame so generate_stub_call can push it's own 447 __ restore(); 448 } 449 450 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass); 451 // I0->O0: new instance 452 } 453 454 break; 455 456 case counter_overflow_id: 457 // G4 contains bci, G5 contains method 458 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5); 459 break; 460 461 case new_type_array_id: 462 case new_object_array_id: 463 { 464 Register G5_klass = G5; // Incoming 465 Register G4_length = G4; // Incoming 466 Register O0_obj = O0; // Outgoing 467 468 Address klass_lh(G5_klass, Klass::layout_helper_offset()); 469 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 470 assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); 471 // Use this offset to pick out an individual byte of the layout_helper: 472 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0} 473 - Klass::_lh_header_size_shift / BitsPerByte); 474 475 if (id == new_type_array_id) { 476 __ set_info("new_type_array", dont_gc_arguments); 477 } else { 478 __ set_info("new_object_array", dont_gc_arguments); 479 } 480 481 #ifdef ASSERT 482 // assert object type is really an array of the proper kind 483 { 484 Label ok; 485 Register G3_t1 = G3; 486 __ ld(klass_lh, G3_t1); 487 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); 488 int tag = ((id == new_type_array_id) 489 ? Klass::_lh_array_tag_type_value 490 : Klass::_lh_array_tag_obj_value); 491 __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok); 492 __ stop("assert(is an array klass)"); 493 __ should_not_reach_here(); 494 __ bind(ok); 495 } 496 #endif // ASSERT 497 498 if (id == new_type_array_id) { 499 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); 500 } else { 501 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length); 502 } 503 // I0 -> O0: new array 504 } 505 break; 506 507 case new_multi_array_id: 508 { // O0: klass 509 // O1: rank 510 // O2: address of 1st dimension 511 __ set_info("new_multi_array", dont_gc_arguments); 512 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2); 513 // I0 -> O0: new multi array 514 } 515 break; 516 517 case register_finalizer_id: 518 { 519 __ set_info("register_finalizer", dont_gc_arguments); 520 521 // load the klass and check the has finalizer flag 522 Label register_finalizer; 523 Register t = O1; 524 __ load_klass(O0, t); 525 __ ld(t, in_bytes(Klass::access_flags_offset()), t); 526 __ set(JVM_ACC_HAS_FINALIZER, G3); 527 __ andcc(G3, t, G0); 528 __ br(Assembler::notZero, false, Assembler::pt, register_finalizer); 529 __ delayed()->nop(); 530 531 // do a leaf return 532 __ retl(); 533 __ delayed()->nop(); 534 535 __ bind(register_finalizer); 536 OopMap* oop_map = save_live_registers(sasm); 537 int call_offset = __ call_RT(noreg, noreg, 538 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0); 539 oop_maps = new OopMapSet(); 540 oop_maps->add_gc_map(call_offset, oop_map); 541 542 // Now restore all the live registers 543 restore_live_registers(sasm); 544 545 __ ret(); 546 __ delayed()->restore(); 547 } 548 break; 549 550 case throw_range_check_failed_id: 551 { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded 552 // G4: index 553 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 554 } 555 break; 556 557 case throw_index_exception_id: 558 { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded 559 // G4: index 560 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 561 } 562 break; 563 564 case throw_div0_exception_id: 565 { __ set_info("throw_div0_exception", dont_gc_arguments); 566 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 567 } 568 break; 569 570 case throw_null_pointer_exception_id: 571 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 572 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 573 } 574 break; 575 576 case handle_exception_id: 577 { __ set_info("handle_exception", dont_gc_arguments); 578 oop_maps = generate_handle_exception(id, sasm); 579 } 580 break; 581 582 case handle_exception_from_callee_id: 583 { __ set_info("handle_exception_from_callee", dont_gc_arguments); 584 oop_maps = generate_handle_exception(id, sasm); 585 } 586 break; 587 588 case unwind_exception_id: 589 { 590 // O0: exception 591 // I7: address of call to this method 592 593 __ set_info("unwind_exception", dont_gc_arguments); 594 __ mov(Oexception, Oexception->after_save()); 595 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); 596 597 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 598 G2_thread, Oissuing_pc->after_save()); 599 __ verify_not_null_oop(Oexception->after_save()); 600 601 // Restore SP from L7 if the exception PC is a method handle call site. 602 __ mov(O0, G5); // Save the target address. 603 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 604 __ tst(L0); // Condition codes are preserved over the restore. 605 __ restore(); 606 607 __ jmp(G5, 0); 608 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. 609 } 610 break; 611 612 case throw_array_store_exception_id: 613 { 614 __ set_info("throw_array_store_exception", dont_gc_arguments); 615 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 616 } 617 break; 618 619 case throw_class_cast_exception_id: 620 { 621 // G4: object 622 __ set_info("throw_class_cast_exception", dont_gc_arguments); 623 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 624 } 625 break; 626 627 case throw_incompatible_class_change_error_id: 628 { 629 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 630 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 631 } 632 break; 633 634 case slow_subtype_check_id: 635 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 636 // Arguments : 637 // 638 // ret : G3 639 // sub : G3, argument, destroyed 640 // super: G1, argument, not changed 641 // raddr: O7, blown by call 642 Label miss; 643 644 __ save_frame(0); // Blow no registers! 645 646 __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss); 647 648 __ mov(1, G3); 649 __ ret(); // Result in G5 is 'true' 650 __ delayed()->restore(); // free copy or add can go here 651 652 __ bind(miss); 653 __ mov(0, G3); 654 __ ret(); // Result in G5 is 'false' 655 __ delayed()->restore(); // free copy or add can go here 656 } 657 658 case monitorenter_nofpu_id: 659 case monitorenter_id: 660 { // G4: object 661 // G5: lock address 662 __ set_info("monitorenter", dont_gc_arguments); 663 664 int save_fpu_registers = (id == monitorenter_id); 665 // make a frame and preserve the caller's caller-save registers 666 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 667 668 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5); 669 670 oop_maps = new OopMapSet(); 671 oop_maps->add_gc_map(call_offset, oop_map); 672 restore_live_registers(sasm, save_fpu_registers); 673 674 __ ret(); 675 __ delayed()->restore(); 676 } 677 break; 678 679 case monitorexit_nofpu_id: 680 case monitorexit_id: 681 { // G4: lock address 682 // note: really a leaf routine but must setup last java sp 683 // => use call_RT for now (speed can be improved by 684 // doing last java sp setup manually) 685 __ set_info("monitorexit", dont_gc_arguments); 686 687 int save_fpu_registers = (id == monitorexit_id); 688 // make a frame and preserve the caller's caller-save registers 689 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 690 691 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4); 692 693 oop_maps = new OopMapSet(); 694 oop_maps->add_gc_map(call_offset, oop_map); 695 restore_live_registers(sasm, save_fpu_registers); 696 697 __ ret(); 698 __ delayed()->restore(); 699 } 700 break; 701 702 case deoptimize_id: 703 { 704 __ set_info("deoptimize", dont_gc_arguments); 705 OopMap* oop_map = save_live_registers(sasm); 706 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), G4); 707 oop_maps = new OopMapSet(); 708 oop_maps->add_gc_map(call_offset, oop_map); 709 restore_live_registers(sasm); 710 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 711 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 712 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 713 __ jump_to(dest, O0); 714 __ delayed()->restore(); 715 } 716 break; 717 718 case access_field_patching_id: 719 { __ set_info("access_field_patching", dont_gc_arguments); 720 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 721 } 722 break; 723 724 case load_klass_patching_id: 725 { __ set_info("load_klass_patching", dont_gc_arguments); 726 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 727 } 728 break; 729 730 case load_mirror_patching_id: 731 { __ set_info("load_mirror_patching", dont_gc_arguments); 732 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 733 } 734 break; 735 736 case load_appendix_patching_id: 737 { __ set_info("load_appendix_patching", dont_gc_arguments); 738 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 739 } 740 break; 741 742 case dtrace_object_alloc_id: 743 { // O0: object 744 __ set_info("dtrace_object_alloc", dont_gc_arguments); 745 // we can't gc here so skip the oopmap but make sure that all 746 // the live registers get saved. 747 save_live_registers(sasm); 748 749 __ save_thread(L7_thread_cache); 750 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 751 relocInfo::runtime_call_type); 752 __ delayed()->mov(I0, O0); 753 __ restore_thread(L7_thread_cache); 754 755 restore_live_registers(sasm); 756 __ ret(); 757 __ delayed()->restore(); 758 } 759 break; 760 761 #if INCLUDE_ALL_GCS 762 case g1_pre_barrier_slow_id: 763 { // G4: previous value of memory 764 BarrierSet* bs = Universe::heap()->barrier_set(); 765 if (bs->kind() != BarrierSet::G1BarrierSet) { 766 __ save_frame(0); 767 __ set((int)id, O1); 768 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 769 __ should_not_reach_here(); 770 break; 771 } 772 773 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 774 775 Register pre_val = G4; 776 Register tmp = G1_scratch; 777 Register tmp2 = G3_scratch; 778 779 Label refill, restart; 780 int satb_q_active_byte_offset = 781 in_bytes(JavaThread::satb_mark_queue_offset() + 782 SATBMarkQueue::byte_offset_of_active()); 783 int satb_q_index_byte_offset = 784 in_bytes(JavaThread::satb_mark_queue_offset() + 785 SATBMarkQueue::byte_offset_of_index()); 786 int satb_q_buf_byte_offset = 787 in_bytes(JavaThread::satb_mark_queue_offset() + 788 SATBMarkQueue::byte_offset_of_buf()); 789 790 // Is marking still active? 791 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 792 __ ld(G2_thread, satb_q_active_byte_offset, tmp); 793 } else { 794 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 795 __ ldsb(G2_thread, satb_q_active_byte_offset, tmp); 796 } 797 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart); 798 __ retl(); 799 __ delayed()->nop(); 800 801 __ bind(restart); 802 // Load the index into the SATB buffer. SATBMarkQueue::_index is a 803 // size_t so ld_ptr is appropriate 804 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); 805 806 // index == 0? 807 __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill); 808 809 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); 810 __ sub(tmp, oopSize, tmp); 811 812 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 813 // Use return-from-leaf 814 __ retl(); 815 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); 816 817 __ bind(refill); 818 819 save_live_registers(sasm); 820 821 __ call_VM_leaf(L7_thread_cache, 822 CAST_FROM_FN_PTR(address, 823 SATBMarkQueueSet::handle_zero_index_for_thread), 824 G2_thread); 825 826 restore_live_registers(sasm); 827 828 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 829 __ delayed()->restore(); 830 } 831 break; 832 833 case g1_post_barrier_slow_id: 834 { 835 BarrierSet* bs = Universe::heap()->barrier_set(); 836 if (bs->kind() != BarrierSet::G1BarrierSet) { 837 __ save_frame(0); 838 __ set((int)id, O1); 839 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 840 __ should_not_reach_here(); 841 break; 842 } 843 844 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 845 846 Register addr = G4; 847 Register cardtable = G5; 848 Register tmp = G1_scratch; 849 Register tmp2 = G3_scratch; 850 jbyte* byte_map_base = ci_card_table_address(); 851 852 Label not_already_dirty, restart, refill, young_card; 853 854 __ srlx(addr, CardTable::card_shift, addr); 855 856 AddressLiteral rs(byte_map_base); 857 __ set(rs, cardtable); // cardtable := <card table base> 858 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 859 860 __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 861 862 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 863 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 864 865 assert(CardTable::dirty_card_val() == 0, "otherwise check this code"); 866 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 867 868 __ bind(young_card); 869 // We didn't take the branch, so we're already dirty: return. 870 // Use return-from-leaf 871 __ retl(); 872 __ delayed()->nop(); 873 874 // Not dirty. 875 __ bind(not_already_dirty); 876 877 // Get cardtable + tmp into a reg by itself 878 __ add(addr, cardtable, tmp2); 879 880 // First, dirty it. 881 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). 882 883 Register tmp3 = cardtable; 884 Register tmp4 = tmp; 885 886 // these registers are now dead 887 addr = cardtable = tmp = noreg; 888 889 int dirty_card_q_index_byte_offset = 890 in_bytes(JavaThread::dirty_card_queue_offset() + 891 DirtyCardQueue::byte_offset_of_index()); 892 int dirty_card_q_buf_byte_offset = 893 in_bytes(JavaThread::dirty_card_queue_offset() + 894 DirtyCardQueue::byte_offset_of_buf()); 895 896 __ bind(restart); 897 898 // Get the index into the update buffer. DirtyCardQueue::_index is 899 // a size_t so ld_ptr is appropriate here. 900 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); 901 902 // index == 0? 903 __ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill); 904 905 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); 906 __ sub(tmp3, oopSize, tmp3); 907 908 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> 909 // Use return-from-leaf 910 __ retl(); 911 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); 912 913 __ bind(refill); 914 915 save_live_registers(sasm); 916 917 __ call_VM_leaf(L7_thread_cache, 918 CAST_FROM_FN_PTR(address, 919 DirtyCardQueueSet::handle_zero_index_for_thread), 920 G2_thread); 921 922 restore_live_registers(sasm); 923 924 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 925 __ delayed()->restore(); 926 } 927 break; 928 #endif // INCLUDE_ALL_GCS 929 930 case predicate_failed_trap_id: 931 { 932 __ set_info("predicate_failed_trap", dont_gc_arguments); 933 OopMap* oop_map = save_live_registers(sasm); 934 935 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 936 937 oop_maps = new OopMapSet(); 938 oop_maps->add_gc_map(call_offset, oop_map); 939 940 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 941 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 942 restore_live_registers(sasm); 943 944 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 945 __ jump_to(dest, O0); 946 __ delayed()->restore(); 947 } 948 break; 949 950 default: 951 { __ set_info("unimplemented entry", dont_gc_arguments); 952 __ save_frame(0); 953 __ set((int)id, O1); 954 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); 955 __ should_not_reach_here(); 956 } 957 break; 958 } 959 return oop_maps; 960 } 961 962 963 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 964 __ block_comment("generate_handle_exception"); 965 966 // Save registers, if required. 967 OopMapSet* oop_maps = new OopMapSet(); 968 OopMap* oop_map = NULL; 969 switch (id) { 970 case forward_exception_id: 971 // We're handling an exception in the context of a compiled frame. 972 // The registers have been saved in the standard places. Perform 973 // an exception lookup in the caller and dispatch to the handler 974 // if found. Otherwise unwind and dispatch to the callers 975 // exception handler. 976 oop_map = generate_oop_map(sasm, true); 977 978 // transfer the pending exception to the exception_oop 979 __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); 980 __ ld_ptr(Oexception, 0, G0); 981 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); 982 __ add(I7, frame::pc_return_offset, Oissuing_pc); 983 break; 984 case handle_exception_id: 985 // At this point all registers MAY be live. 986 oop_map = save_live_registers(sasm); 987 __ mov(Oexception->after_save(), Oexception); 988 __ mov(Oissuing_pc->after_save(), Oissuing_pc); 989 break; 990 case handle_exception_from_callee_id: 991 // At this point all registers except exception oop (Oexception) 992 // and exception pc (Oissuing_pc) are dead. 993 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 994 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 995 __ save_frame_c1(frame_size_in_bytes); 996 __ mov(Oexception->after_save(), Oexception); 997 __ mov(Oissuing_pc->after_save(), Oissuing_pc); 998 break; 999 default: ShouldNotReachHere(); 1000 } 1001 1002 __ verify_not_null_oop(Oexception); 1003 1004 #ifdef ASSERT 1005 // check that fields in JavaThread for exception oop and issuing pc are 1006 // empty before writing to them 1007 Label oop_empty; 1008 Register scratch = I7; // We can use I7 here because it's overwritten later anyway. 1009 __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch); 1010 __ br_null(scratch, false, Assembler::pt, oop_empty); 1011 __ delayed()->nop(); 1012 __ stop("exception oop already set"); 1013 __ bind(oop_empty); 1014 1015 Label pc_empty; 1016 __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch); 1017 __ br_null(scratch, false, Assembler::pt, pc_empty); 1018 __ delayed()->nop(); 1019 __ stop("exception pc already set"); 1020 __ bind(pc_empty); 1021 #endif 1022 1023 // save the exception and issuing pc in the thread 1024 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 1025 __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 1026 1027 // use the throwing pc as the return address to lookup (has bci & oop map) 1028 __ mov(Oissuing_pc, I7); 1029 __ sub(I7, frame::pc_return_offset, I7); 1030 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 1031 oop_maps->add_gc_map(call_offset, oop_map); 1032 1033 // Note: if nmethod has been deoptimized then regardless of 1034 // whether it had a handler or not we will deoptimize 1035 // by entering the deopt blob with a pending exception. 1036 1037 // Restore the registers that were saved at the beginning, remove 1038 // the frame and jump to the exception handler. 1039 switch (id) { 1040 case forward_exception_id: 1041 case handle_exception_id: 1042 restore_live_registers(sasm); 1043 __ jmp(O0, 0); 1044 __ delayed()->restore(); 1045 break; 1046 case handle_exception_from_callee_id: 1047 // Restore SP from L7 if the exception PC is a method handle call site. 1048 __ mov(O0, G5); // Save the target address. 1049 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 1050 __ tst(L0); // Condition codes are preserved over the restore. 1051 __ restore(); 1052 1053 __ jmp(G5, 0); // jump to the exception handler 1054 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. 1055 break; 1056 default: ShouldNotReachHere(); 1057 } 1058 1059 return oop_maps; 1060 } 1061 1062 1063 #undef __ 1064 1065 const char *Runtime1::pd_name_for_address(address entry) { 1066 return "<unknown function>"; 1067 }