1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Defs.hpp" 27 #include "c1/c1_MacroAssembler.hpp" 28 #include "c1/c1_Runtime1.hpp" 29 #include "ci/ciUtilities.hpp" 30 #include "gc/shared/cardTable.hpp" 31 #include "gc/shared/cardTableBarrierSet.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "nativeInst_sparc.hpp" 34 #include "oops/compiledICHolder.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/signature.hpp" 39 #include "runtime/vframeArray.hpp" 40 #include "utilities/macros.hpp" 41 #include "utilities/align.hpp" 42 #include "vmreg_sparc.inline.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc/g1/g1BarrierSet.hpp" 45 #include "gc/g1/g1CardTable.hpp" 46 #include "gc/g1/g1ThreadLocalData.hpp" 47 #endif 48 49 // Implementation of StubAssembler 50 51 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { 52 // for sparc changing the number of arguments doesn't change 53 // anything about the frame size so we'll always lie and claim that 54 // we are only passing 1 argument. 55 set_num_rt_args(1); 56 57 assert_not_delayed(); 58 // bang stack before going to runtime 59 set(-os::vm_page_size() + STACK_BIAS, G3_scratch); 60 st(G0, SP, G3_scratch); 61 62 // debugging support 63 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 64 65 set_last_Java_frame(SP, noreg); 66 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 67 save_thread(L7_thread_cache); 68 // do the call 69 call(entry_point, relocInfo::runtime_call_type); 70 if (!VerifyThread) { 71 delayed()->mov(G2_thread, O0); // pass thread as first argument 72 } else { 73 delayed()->nop(); // (thread already passed) 74 } 75 int call_offset = offset(); // offset of return address 76 restore_thread(L7_thread_cache); 77 reset_last_Java_frame(); 78 79 // check for pending exceptions 80 { Label L; 81 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 82 ld_ptr(exception_addr, Gtemp); 83 br_null_short(Gtemp, pt, L); 84 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 85 st_ptr(G0, vm_result_addr); 86 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 87 st_ptr(G0, vm_result_addr_2); 88 89 if (frame_size() == no_frame_size) { 90 // we use O7 linkage so that forward_exception_entry has the issuing PC 91 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 92 delayed()->restore(); 93 } else if (_stub_id == Runtime1::forward_exception_id) { 94 should_not_reach_here(); 95 } else { 96 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); 97 jump_to(exc, G4); 98 delayed()->nop(); 99 } 100 bind(L); 101 } 102 103 // get oop result if there is one and reset the value in the thread 104 if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread 105 get_vm_result (oop_result1); 106 } else { 107 // be a little paranoid and clear the result 108 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 109 st_ptr(G0, vm_result_addr); 110 } 111 112 // get second result if there is one and reset the value in the thread 113 if (metadata_result->is_valid()) { 114 get_vm_result_2 (metadata_result); 115 } else { 116 // be a little paranoid and clear the result 117 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 118 st_ptr(G0, vm_result_addr_2); 119 } 120 121 return call_offset; 122 } 123 124 125 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 126 // O0 is reserved for the thread 127 mov(arg1, O1); 128 return call_RT(oop_result1, metadata_result, entry, 1); 129 } 130 131 132 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 133 // O0 is reserved for the thread 134 mov(arg1, O1); 135 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 136 return call_RT(oop_result1, metadata_result, entry, 2); 137 } 138 139 140 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 141 // O0 is reserved for the thread 142 mov(arg1, O1); 143 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 144 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument"); 145 return call_RT(oop_result1, metadata_result, entry, 3); 146 } 147 148 149 // Implementation of Runtime1 150 151 #define __ sasm-> 152 153 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 154 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 155 static int reg_save_size_in_words; 156 static int frame_size_in_bytes = -1; 157 158 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 159 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 160 "mismatch in calculation"); 161 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 162 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 163 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 164 165 int i; 166 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 167 Register r = as_Register(i); 168 if (r == G1 || r == G3 || r == G4 || r == G5) { 169 int sp_offset = cpu_reg_save_offsets[i]; 170 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 171 r->as_VMReg()); 172 } 173 } 174 175 if (save_fpu_registers) { 176 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 177 FloatRegister r = as_FloatRegister(i); 178 int sp_offset = fpu_reg_save_offsets[i]; 179 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 180 r->as_VMReg()); 181 } 182 } 183 return oop_map; 184 } 185 186 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { 187 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 188 "mismatch in calculation"); 189 __ save_frame_c1(frame_size_in_bytes); 190 191 // Record volatile registers as callee-save values in an OopMap so their save locations will be 192 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 193 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 194 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 195 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 196 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)) 197 198 int i; 199 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 200 Register r = as_Register(i); 201 if (r == G1 || r == G3 || r == G4 || r == G5) { 202 int sp_offset = cpu_reg_save_offsets[i]; 203 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 204 } 205 } 206 207 if (save_fpu_registers) { 208 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 209 FloatRegister r = as_FloatRegister(i); 210 int sp_offset = fpu_reg_save_offsets[i]; 211 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 212 } 213 } 214 215 return generate_oop_map(sasm, save_fpu_registers); 216 } 217 218 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 219 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 220 Register r = as_Register(i); 221 if (r == G1 || r == G3 || r == G4 || r == G5) { 222 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 223 } 224 } 225 226 if (restore_fpu_registers) { 227 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 228 FloatRegister r = as_FloatRegister(i); 229 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 230 } 231 } 232 } 233 234 235 void Runtime1::initialize_pd() { 236 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines 237 // 238 // A stub routine will have a frame that is at least large enough to hold 239 // a register window save area (obviously) and the volatile g registers 240 // and floating registers. A user of save_live_registers can have a frame 241 // that has more scratch area in it (although typically they will use L-regs). 242 // in that case the frame will look like this (stack growing down) 243 // 244 // FP -> | | 245 // | scratch mem | 246 // | " " | 247 // -------------- 248 // | float regs | 249 // | " " | 250 // --------------- 251 // | G regs | 252 // | " " | 253 // --------------- 254 // | abi reg. | 255 // | window save | 256 // | area | 257 // SP -> --------------- 258 // 259 int i; 260 int sp_offset = align_up((int)frame::register_save_words, 2); // start doubleword aligned 261 262 // only G int registers are saved explicitly; others are found in register windows 263 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 264 Register r = as_Register(i); 265 if (r == G1 || r == G3 || r == G4 || r == G5) { 266 cpu_reg_save_offsets[i] = sp_offset; 267 sp_offset++; 268 } 269 } 270 271 // all float registers are saved explicitly 272 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); 273 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 274 fpu_reg_save_offsets[i] = sp_offset; 275 sp_offset++; 276 } 277 reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset; 278 // this should match assembler::total_frame_size_in_bytes, which 279 // isn't callable from this context. It's checked by an assert when 280 // it's used though. 281 frame_size_in_bytes = align_up(sp_offset * wordSize, 8); 282 } 283 284 285 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 286 // make a frame and preserve the caller's caller-save registers 287 OopMap* oop_map = save_live_registers(sasm); 288 int call_offset; 289 if (!has_argument) { 290 call_offset = __ call_RT(noreg, noreg, target); 291 } else { 292 call_offset = __ call_RT(noreg, noreg, target, G4); 293 } 294 OopMapSet* oop_maps = new OopMapSet(); 295 oop_maps->add_gc_map(call_offset, oop_map); 296 297 __ should_not_reach_here(); 298 return oop_maps; 299 } 300 301 302 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 303 Register arg1, Register arg2, Register arg3) { 304 // make a frame and preserve the caller's caller-save registers 305 OopMap* oop_map = save_live_registers(sasm); 306 307 int call_offset; 308 if (arg1 == noreg) { 309 call_offset = __ call_RT(result, noreg, target); 310 } else if (arg2 == noreg) { 311 call_offset = __ call_RT(result, noreg, target, arg1); 312 } else if (arg3 == noreg) { 313 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 314 } else { 315 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 316 } 317 OopMapSet* oop_maps = NULL; 318 319 oop_maps = new OopMapSet(); 320 oop_maps->add_gc_map(call_offset, oop_map); 321 restore_live_registers(sasm); 322 323 __ ret(); 324 __ delayed()->restore(); 325 326 return oop_maps; 327 } 328 329 330 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 331 // make a frame and preserve the caller's caller-save registers 332 OopMap* oop_map = save_live_registers(sasm); 333 334 // call the runtime patching routine, returns non-zero if nmethod got deopted. 335 int call_offset = __ call_RT(noreg, noreg, target); 336 OopMapSet* oop_maps = new OopMapSet(); 337 oop_maps->add_gc_map(call_offset, oop_map); 338 339 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the 340 // deoptimization handler entry that will cause re-execution of the current bytecode 341 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 342 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 343 344 Label no_deopt; 345 __ br_null_short(O0, Assembler::pt, no_deopt); 346 347 // return to the deoptimization handler entry for unpacking and rexecute 348 // if we simply returned the we'd deopt as if any call we patched had just 349 // returned. 350 351 restore_live_registers(sasm); 352 353 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 354 __ jump_to(dest, O0); 355 __ delayed()->restore(); 356 357 __ bind(no_deopt); 358 restore_live_registers(sasm); 359 __ ret(); 360 __ delayed()->restore(); 361 362 return oop_maps; 363 } 364 365 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 366 367 OopMapSet* oop_maps = NULL; 368 // for better readability 369 const bool must_gc_arguments = true; 370 const bool dont_gc_arguments = false; 371 372 // stub code & info for the different stubs 373 switch (id) { 374 case forward_exception_id: 375 { 376 oop_maps = generate_handle_exception(id, sasm); 377 } 378 break; 379 380 case new_instance_id: 381 case fast_new_instance_id: 382 case fast_new_instance_init_check_id: 383 { 384 Register G5_klass = G5; // Incoming 385 Register O0_obj = O0; // Outgoing 386 387 if (id == new_instance_id) { 388 __ set_info("new_instance", dont_gc_arguments); 389 } else if (id == fast_new_instance_id) { 390 __ set_info("fast new_instance", dont_gc_arguments); 391 } else { 392 assert(id == fast_new_instance_init_check_id, "bad StubID"); 393 __ set_info("fast new_instance init check", dont_gc_arguments); 394 } 395 396 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 397 UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 398 Label slow_path; 399 Register G1_obj_size = G1; 400 Register G3_t1 = G3; 401 Register G4_t2 = G4; 402 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2); 403 404 // Push a frame since we may do dtrace notification for the 405 // allocation which requires calling out and we don't want 406 // to stomp the real return address. 407 __ save_frame(0); 408 409 if (id == fast_new_instance_init_check_id) { 410 // make sure the klass is initialized 411 __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1); 412 __ cmp(G3_t1, InstanceKlass::fully_initialized); 413 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 414 __ delayed()->nop(); 415 } 416 #ifdef ASSERT 417 // assert object can be fast path allocated 418 { 419 Label ok, not_ok; 420 __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); 421 // make sure it's an instance (LH > 0) 422 __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok); 423 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); 424 __ br(Assembler::zero, false, Assembler::pn, ok); 425 __ delayed()->nop(); 426 __ bind(not_ok); 427 __ stop("assert(can be fast path allocated)"); 428 __ should_not_reach_here(); 429 __ bind(ok); 430 } 431 #endif // ASSERT 432 433 // If we got here then the TLAB allocation failed, so try allocating directly from eden. 434 // get the instance size 435 __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); 436 __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); 437 __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2); 438 439 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2, /* is_tlab_allocated */ false); 440 __ verify_oop(O0_obj); 441 __ mov(O0, I0); 442 __ ret(); 443 __ delayed()->restore(); 444 445 __ bind(slow_path); 446 447 // pop this frame so generate_stub_call can push it's own 448 __ restore(); 449 } 450 451 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass); 452 // I0->O0: new instance 453 } 454 455 break; 456 457 case counter_overflow_id: 458 // G4 contains bci, G5 contains method 459 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5); 460 break; 461 462 case new_type_array_id: 463 case new_object_array_id: 464 { 465 Register G5_klass = G5; // Incoming 466 Register G4_length = G4; // Incoming 467 Register O0_obj = O0; // Outgoing 468 469 Address klass_lh(G5_klass, Klass::layout_helper_offset()); 470 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 471 assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); 472 // Use this offset to pick out an individual byte of the layout_helper: 473 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0} 474 - Klass::_lh_header_size_shift / BitsPerByte); 475 476 if (id == new_type_array_id) { 477 __ set_info("new_type_array", dont_gc_arguments); 478 } else { 479 __ set_info("new_object_array", dont_gc_arguments); 480 } 481 482 #ifdef ASSERT 483 // assert object type is really an array of the proper kind 484 { 485 Label ok; 486 Register G3_t1 = G3; 487 __ ld(klass_lh, G3_t1); 488 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); 489 int tag = ((id == new_type_array_id) 490 ? Klass::_lh_array_tag_type_value 491 : Klass::_lh_array_tag_obj_value); 492 __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok); 493 __ stop("assert(is an array klass)"); 494 __ should_not_reach_here(); 495 __ bind(ok); 496 } 497 #endif // ASSERT 498 499 if (id == new_type_array_id) { 500 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); 501 } else { 502 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length); 503 } 504 // I0 -> O0: new array 505 } 506 break; 507 508 case new_multi_array_id: 509 { // O0: klass 510 // O1: rank 511 // O2: address of 1st dimension 512 __ set_info("new_multi_array", dont_gc_arguments); 513 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2); 514 // I0 -> O0: new multi array 515 } 516 break; 517 518 case register_finalizer_id: 519 { 520 __ set_info("register_finalizer", dont_gc_arguments); 521 522 // load the klass and check the has finalizer flag 523 Label register_finalizer; 524 Register t = O1; 525 __ load_klass(O0, t); 526 __ ld(t, in_bytes(Klass::access_flags_offset()), t); 527 __ set(JVM_ACC_HAS_FINALIZER, G3); 528 __ andcc(G3, t, G0); 529 __ br(Assembler::notZero, false, Assembler::pt, register_finalizer); 530 __ delayed()->nop(); 531 532 // do a leaf return 533 __ retl(); 534 __ delayed()->nop(); 535 536 __ bind(register_finalizer); 537 OopMap* oop_map = save_live_registers(sasm); 538 int call_offset = __ call_RT(noreg, noreg, 539 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0); 540 oop_maps = new OopMapSet(); 541 oop_maps->add_gc_map(call_offset, oop_map); 542 543 // Now restore all the live registers 544 restore_live_registers(sasm); 545 546 __ ret(); 547 __ delayed()->restore(); 548 } 549 break; 550 551 case throw_range_check_failed_id: 552 { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded 553 // G4: index 554 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 555 } 556 break; 557 558 case throw_index_exception_id: 559 { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded 560 // G4: index 561 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 562 } 563 break; 564 565 case throw_div0_exception_id: 566 { __ set_info("throw_div0_exception", dont_gc_arguments); 567 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 568 } 569 break; 570 571 case throw_null_pointer_exception_id: 572 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 573 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 574 } 575 break; 576 577 case handle_exception_id: 578 { __ set_info("handle_exception", dont_gc_arguments); 579 oop_maps = generate_handle_exception(id, sasm); 580 } 581 break; 582 583 case handle_exception_from_callee_id: 584 { __ set_info("handle_exception_from_callee", dont_gc_arguments); 585 oop_maps = generate_handle_exception(id, sasm); 586 } 587 break; 588 589 case unwind_exception_id: 590 { 591 // O0: exception 592 // I7: address of call to this method 593 594 __ set_info("unwind_exception", dont_gc_arguments); 595 __ mov(Oexception, Oexception->after_save()); 596 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); 597 598 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 599 G2_thread, Oissuing_pc->after_save()); 600 __ verify_not_null_oop(Oexception->after_save()); 601 602 // Restore SP from L7 if the exception PC is a method handle call site. 603 __ mov(O0, G5); // Save the target address. 604 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 605 __ tst(L0); // Condition codes are preserved over the restore. 606 __ restore(); 607 608 __ jmp(G5, 0); 609 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. 610 } 611 break; 612 613 case throw_array_store_exception_id: 614 { 615 __ set_info("throw_array_store_exception", dont_gc_arguments); 616 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 617 } 618 break; 619 620 case throw_class_cast_exception_id: 621 { 622 // G4: object 623 __ set_info("throw_class_cast_exception", dont_gc_arguments); 624 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 625 } 626 break; 627 628 case throw_incompatible_class_change_error_id: 629 { 630 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 631 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 632 } 633 break; 634 635 case slow_subtype_check_id: 636 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 637 // Arguments : 638 // 639 // ret : G3 640 // sub : G3, argument, destroyed 641 // super: G1, argument, not changed 642 // raddr: O7, blown by call 643 Label miss; 644 645 __ save_frame(0); // Blow no registers! 646 647 __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss); 648 649 __ mov(1, G3); 650 __ ret(); // Result in G5 is 'true' 651 __ delayed()->restore(); // free copy or add can go here 652 653 __ bind(miss); 654 __ mov(0, G3); 655 __ ret(); // Result in G5 is 'false' 656 __ delayed()->restore(); // free copy or add can go here 657 } 658 659 case monitorenter_nofpu_id: 660 case monitorenter_id: 661 { // G4: object 662 // G5: lock address 663 __ set_info("monitorenter", dont_gc_arguments); 664 665 int save_fpu_registers = (id == monitorenter_id); 666 // make a frame and preserve the caller's caller-save registers 667 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 668 669 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5); 670 671 oop_maps = new OopMapSet(); 672 oop_maps->add_gc_map(call_offset, oop_map); 673 restore_live_registers(sasm, save_fpu_registers); 674 675 __ ret(); 676 __ delayed()->restore(); 677 } 678 break; 679 680 case monitorexit_nofpu_id: 681 case monitorexit_id: 682 { // G4: lock address 683 // note: really a leaf routine but must setup last java sp 684 // => use call_RT for now (speed can be improved by 685 // doing last java sp setup manually) 686 __ set_info("monitorexit", dont_gc_arguments); 687 688 int save_fpu_registers = (id == monitorexit_id); 689 // make a frame and preserve the caller's caller-save registers 690 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 691 692 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4); 693 694 oop_maps = new OopMapSet(); 695 oop_maps->add_gc_map(call_offset, oop_map); 696 restore_live_registers(sasm, save_fpu_registers); 697 698 __ ret(); 699 __ delayed()->restore(); 700 } 701 break; 702 703 case deoptimize_id: 704 { 705 __ set_info("deoptimize", dont_gc_arguments); 706 OopMap* oop_map = save_live_registers(sasm); 707 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), G4); 708 oop_maps = new OopMapSet(); 709 oop_maps->add_gc_map(call_offset, oop_map); 710 restore_live_registers(sasm); 711 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 712 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 713 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 714 __ jump_to(dest, O0); 715 __ delayed()->restore(); 716 } 717 break; 718 719 case access_field_patching_id: 720 { __ set_info("access_field_patching", dont_gc_arguments); 721 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 722 } 723 break; 724 725 case load_klass_patching_id: 726 { __ set_info("load_klass_patching", dont_gc_arguments); 727 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 728 } 729 break; 730 731 case load_mirror_patching_id: 732 { __ set_info("load_mirror_patching", dont_gc_arguments); 733 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 734 } 735 break; 736 737 case load_appendix_patching_id: 738 { __ set_info("load_appendix_patching", dont_gc_arguments); 739 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 740 } 741 break; 742 743 case dtrace_object_alloc_id: 744 { // O0: object 745 __ set_info("dtrace_object_alloc", dont_gc_arguments); 746 // we can't gc here so skip the oopmap but make sure that all 747 // the live registers get saved. 748 save_live_registers(sasm); 749 750 __ save_thread(L7_thread_cache); 751 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 752 relocInfo::runtime_call_type); 753 __ delayed()->mov(I0, O0); 754 __ restore_thread(L7_thread_cache); 755 756 restore_live_registers(sasm); 757 __ ret(); 758 __ delayed()->restore(); 759 } 760 break; 761 762 #if INCLUDE_ALL_GCS 763 case g1_pre_barrier_slow_id: 764 { // G4: previous value of memory 765 BarrierSet* bs = BarrierSet::barrier_set(); 766 if (bs->kind() != BarrierSet::G1BarrierSet) { 767 __ save_frame(0); 768 __ set((int)id, O1); 769 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 770 __ should_not_reach_here(); 771 break; 772 } 773 774 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 775 776 Register pre_val = G4; 777 Register tmp = G1_scratch; 778 Register tmp2 = G3_scratch; 779 780 Label refill, restart; 781 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 782 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()); 783 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); 784 785 // Is marking still active? 786 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 787 __ ld(G2_thread, satb_q_active_byte_offset, tmp); 788 } else { 789 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 790 __ ldsb(G2_thread, satb_q_active_byte_offset, tmp); 791 } 792 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart); 793 __ retl(); 794 __ delayed()->nop(); 795 796 __ bind(restart); 797 // Load the index into the SATB buffer. SATBMarkQueue::_index is a 798 // size_t so ld_ptr is appropriate 799 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); 800 801 // index == 0? 802 __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill); 803 804 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); 805 __ sub(tmp, oopSize, tmp); 806 807 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 808 // Use return-from-leaf 809 __ retl(); 810 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); 811 812 __ bind(refill); 813 814 save_live_registers(sasm); 815 816 __ call_VM_leaf(L7_thread_cache, 817 CAST_FROM_FN_PTR(address, 818 SATBMarkQueueSet::handle_zero_index_for_thread), 819 G2_thread); 820 821 restore_live_registers(sasm); 822 823 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 824 __ delayed()->restore(); 825 } 826 break; 827 828 case g1_post_barrier_slow_id: 829 { 830 BarrierSet* bs = BarrierSet::barrier_set(); 831 if (bs->kind() != BarrierSet::G1BarrierSet) { 832 __ save_frame(0); 833 __ set((int)id, O1); 834 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 835 __ should_not_reach_here(); 836 break; 837 } 838 839 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 840 841 Register addr = G4; 842 Register cardtable = G5; 843 Register tmp = G1_scratch; 844 Register tmp2 = G3_scratch; 845 jbyte* byte_map_base = ci_card_table_address(); 846 847 Label not_already_dirty, restart, refill, young_card; 848 849 __ srlx(addr, CardTable::card_shift, addr); 850 851 AddressLiteral rs(byte_map_base); 852 __ set(rs, cardtable); // cardtable := <card table base> 853 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 854 855 __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 856 857 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 858 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 859 860 assert(CardTable::dirty_card_val() == 0, "otherwise check this code"); 861 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 862 863 __ bind(young_card); 864 // We didn't take the branch, so we're already dirty: return. 865 // Use return-from-leaf 866 __ retl(); 867 __ delayed()->nop(); 868 869 // Not dirty. 870 __ bind(not_already_dirty); 871 872 // Get cardtable + tmp into a reg by itself 873 __ add(addr, cardtable, tmp2); 874 875 // First, dirty it. 876 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). 877 878 Register tmp3 = cardtable; 879 Register tmp4 = tmp; 880 881 // these registers are now dead 882 addr = cardtable = tmp = noreg; 883 884 int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()); 885 int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()); 886 887 __ bind(restart); 888 889 // Get the index into the update buffer. DirtyCardQueue::_index is 890 // a size_t so ld_ptr is appropriate here. 891 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); 892 893 // index == 0? 894 __ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill); 895 896 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); 897 __ sub(tmp3, oopSize, tmp3); 898 899 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> 900 // Use return-from-leaf 901 __ retl(); 902 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); 903 904 __ bind(refill); 905 906 save_live_registers(sasm); 907 908 __ call_VM_leaf(L7_thread_cache, 909 CAST_FROM_FN_PTR(address, 910 DirtyCardQueueSet::handle_zero_index_for_thread), 911 G2_thread); 912 913 restore_live_registers(sasm); 914 915 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 916 __ delayed()->restore(); 917 } 918 break; 919 #endif // INCLUDE_ALL_GCS 920 921 case predicate_failed_trap_id: 922 { 923 __ set_info("predicate_failed_trap", dont_gc_arguments); 924 OopMap* oop_map = save_live_registers(sasm); 925 926 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 927 928 oop_maps = new OopMapSet(); 929 oop_maps->add_gc_map(call_offset, oop_map); 930 931 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 932 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 933 restore_live_registers(sasm); 934 935 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 936 __ jump_to(dest, O0); 937 __ delayed()->restore(); 938 } 939 break; 940 941 default: 942 { __ set_info("unimplemented entry", dont_gc_arguments); 943 __ save_frame(0); 944 __ set((int)id, O1); 945 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); 946 __ should_not_reach_here(); 947 } 948 break; 949 } 950 return oop_maps; 951 } 952 953 954 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 955 __ block_comment("generate_handle_exception"); 956 957 // Save registers, if required. 958 OopMapSet* oop_maps = new OopMapSet(); 959 OopMap* oop_map = NULL; 960 switch (id) { 961 case forward_exception_id: 962 // We're handling an exception in the context of a compiled frame. 963 // The registers have been saved in the standard places. Perform 964 // an exception lookup in the caller and dispatch to the handler 965 // if found. Otherwise unwind and dispatch to the callers 966 // exception handler. 967 oop_map = generate_oop_map(sasm, true); 968 969 // transfer the pending exception to the exception_oop 970 __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); 971 __ ld_ptr(Oexception, 0, G0); 972 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); 973 __ add(I7, frame::pc_return_offset, Oissuing_pc); 974 break; 975 case handle_exception_id: 976 // At this point all registers MAY be live. 977 oop_map = save_live_registers(sasm); 978 __ mov(Oexception->after_save(), Oexception); 979 __ mov(Oissuing_pc->after_save(), Oissuing_pc); 980 break; 981 case handle_exception_from_callee_id: 982 // At this point all registers except exception oop (Oexception) 983 // and exception pc (Oissuing_pc) are dead. 984 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 985 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 986 __ save_frame_c1(frame_size_in_bytes); 987 __ mov(Oexception->after_save(), Oexception); 988 __ mov(Oissuing_pc->after_save(), Oissuing_pc); 989 break; 990 default: ShouldNotReachHere(); 991 } 992 993 __ verify_not_null_oop(Oexception); 994 995 #ifdef ASSERT 996 // check that fields in JavaThread for exception oop and issuing pc are 997 // empty before writing to them 998 Label oop_empty; 999 Register scratch = I7; // We can use I7 here because it's overwritten later anyway. 1000 __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch); 1001 __ br_null(scratch, false, Assembler::pt, oop_empty); 1002 __ delayed()->nop(); 1003 __ stop("exception oop already set"); 1004 __ bind(oop_empty); 1005 1006 Label pc_empty; 1007 __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch); 1008 __ br_null(scratch, false, Assembler::pt, pc_empty); 1009 __ delayed()->nop(); 1010 __ stop("exception pc already set"); 1011 __ bind(pc_empty); 1012 #endif 1013 1014 // save the exception and issuing pc in the thread 1015 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 1016 __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 1017 1018 // use the throwing pc as the return address to lookup (has bci & oop map) 1019 __ mov(Oissuing_pc, I7); 1020 __ sub(I7, frame::pc_return_offset, I7); 1021 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 1022 oop_maps->add_gc_map(call_offset, oop_map); 1023 1024 // Note: if nmethod has been deoptimized then regardless of 1025 // whether it had a handler or not we will deoptimize 1026 // by entering the deopt blob with a pending exception. 1027 1028 // Restore the registers that were saved at the beginning, remove 1029 // the frame and jump to the exception handler. 1030 switch (id) { 1031 case forward_exception_id: 1032 case handle_exception_id: 1033 restore_live_registers(sasm); 1034 __ jmp(O0, 0); 1035 __ delayed()->restore(); 1036 break; 1037 case handle_exception_from_callee_id: 1038 // Restore SP from L7 if the exception PC is a method handle call site. 1039 __ mov(O0, G5); // Save the target address. 1040 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 1041 __ tst(L0); // Condition codes are preserved over the restore. 1042 __ restore(); 1043 1044 __ jmp(G5, 0); // jump to the exception handler 1045 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. 1046 break; 1047 default: ShouldNotReachHere(); 1048 } 1049 1050 return oop_maps; 1051 } 1052 1053 1054 #undef __ 1055 1056 const char *Runtime1::pd_name_for_address(address entry) { 1057 return "<unknown function>"; 1058 }