1 /* 2 * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_c1_Runtime1_sparc.cpp.incl" 27 28 // Implementation of StubAssembler 29 30 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry_point, int number_of_arguments) { 31 // for sparc changing the number of arguments doesn't change 32 // anything about the frame size so we'll always lie and claim that 33 // we are only passing 1 argument. 34 set_num_rt_args(1); 35 36 assert_not_delayed(); 37 // bang stack before going to runtime 38 set(-os::vm_page_size() + STACK_BIAS, G3_scratch); 39 st(G0, SP, G3_scratch); 40 41 // debugging support 42 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 43 44 set_last_Java_frame(SP, noreg); 45 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 46 save_thread(L7_thread_cache); 47 // do the call 48 call(entry_point, relocInfo::runtime_call_type); 49 if (!VerifyThread) { 50 delayed()->mov(G2_thread, O0); // pass thread as first argument 51 } else { 52 delayed()->nop(); // (thread already passed) 53 } 54 int call_offset = offset(); // offset of return address 55 restore_thread(L7_thread_cache); 56 reset_last_Java_frame(); 57 58 // check for pending exceptions 59 { Label L; 60 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 61 ld_ptr(exception_addr, Gtemp); 62 br_null(Gtemp, false, pt, L); 63 delayed()->nop(); 64 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 65 st_ptr(G0, vm_result_addr); 66 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 67 st_ptr(G0, vm_result_addr_2); 68 69 if (frame_size() == no_frame_size) { 70 // we use O7 linkage so that forward_exception_entry has the issuing PC 71 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 72 delayed()->restore(); 73 } else if (_stub_id == Runtime1::forward_exception_id) { 74 should_not_reach_here(); 75 } else { 76 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); 77 jump_to(exc, G4); 78 delayed()->nop(); 79 } 80 bind(L); 81 } 82 83 // get oop result if there is one and reset the value in the thread 84 if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread 85 get_vm_result (oop_result1); 86 } else { 87 // be a little paranoid and clear the result 88 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 89 st_ptr(G0, vm_result_addr); 90 } 91 92 if (oop_result2->is_valid()) { 93 get_vm_result_2(oop_result2); 94 } else { 95 // be a little paranoid and clear the result 96 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 97 st_ptr(G0, vm_result_addr_2); 98 } 99 100 return call_offset; 101 } 102 103 104 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { 105 // O0 is reserved for the thread 106 mov(arg1, O1); 107 return call_RT(oop_result1, oop_result2, entry, 1); 108 } 109 110 111 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { 112 // O0 is reserved for the thread 113 mov(arg1, O1); 114 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 115 return call_RT(oop_result1, oop_result2, entry, 2); 116 } 117 118 119 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { 120 // O0 is reserved for the thread 121 mov(arg1, O1); 122 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 123 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument"); 124 return call_RT(oop_result1, oop_result2, entry, 3); 125 } 126 127 128 // Implementation of Runtime1 129 130 #define __ sasm-> 131 132 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 133 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 134 static int reg_save_size_in_words; 135 static int frame_size_in_bytes = -1; 136 137 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 138 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 139 " mismatch in calculation"); 140 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 141 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 142 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 143 144 int i; 145 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 146 Register r = as_Register(i); 147 if (r == G1 || r == G3 || r == G4 || r == G5) { 148 int sp_offset = cpu_reg_save_offsets[i]; 149 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 150 r->as_VMReg()); 151 } 152 } 153 154 if (save_fpu_registers) { 155 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 156 FloatRegister r = as_FloatRegister(i); 157 int sp_offset = fpu_reg_save_offsets[i]; 158 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 159 r->as_VMReg()); 160 } 161 } 162 return oop_map; 163 } 164 165 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { 166 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 167 " mismatch in calculation"); 168 __ save_frame_c1(frame_size_in_bytes); 169 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 170 171 // Record volatile registers as callee-save values in an OopMap so their save locations will be 172 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 173 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 174 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 175 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 176 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)) 177 178 int i; 179 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 180 Register r = as_Register(i); 181 if (r == G1 || r == G3 || r == G4 || r == G5) { 182 int sp_offset = cpu_reg_save_offsets[i]; 183 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 184 } 185 } 186 187 if (save_fpu_registers) { 188 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 189 FloatRegister r = as_FloatRegister(i); 190 int sp_offset = fpu_reg_save_offsets[i]; 191 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 192 } 193 } 194 195 return generate_oop_map(sasm, save_fpu_registers); 196 } 197 198 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 199 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 200 Register r = as_Register(i); 201 if (r == G1 || r == G3 || r == G4 || r == G5) { 202 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 203 } 204 } 205 206 if (restore_fpu_registers) { 207 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 208 FloatRegister r = as_FloatRegister(i); 209 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 210 } 211 } 212 } 213 214 215 void Runtime1::initialize_pd() { 216 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines 217 // 218 // A stub routine will have a frame that is at least large enough to hold 219 // a register window save area (obviously) and the volatile g registers 220 // and floating registers. A user of save_live_registers can have a frame 221 // that has more scratch area in it (although typically they will use L-regs). 222 // in that case the frame will look like this (stack growing down) 223 // 224 // FP -> | | 225 // | scratch mem | 226 // | " " | 227 // -------------- 228 // | float regs | 229 // | " " | 230 // --------------- 231 // | G regs | 232 // | " " | 233 // --------------- 234 // | abi reg. | 235 // | window save | 236 // | area | 237 // SP -> --------------- 238 // 239 int i; 240 int sp_offset = round_to(frame::register_save_words, 2); // start doubleword aligned 241 242 // only G int registers are saved explicitly; others are found in register windows 243 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 244 Register r = as_Register(i); 245 if (r == G1 || r == G3 || r == G4 || r == G5) { 246 cpu_reg_save_offsets[i] = sp_offset; 247 sp_offset++; 248 } 249 } 250 251 // all float registers are saved explicitly 252 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); 253 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 254 fpu_reg_save_offsets[i] = sp_offset; 255 sp_offset++; 256 } 257 reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset; 258 // this should match assembler::total_frame_size_in_bytes, which 259 // isn't callable from this context. It's checked by an assert when 260 // it's used though. 261 frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8); 262 } 263 264 265 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 266 // make a frame and preserve the caller's caller-save registers 267 OopMap* oop_map = save_live_registers(sasm); 268 int call_offset; 269 if (!has_argument) { 270 call_offset = __ call_RT(noreg, noreg, target); 271 } else { 272 call_offset = __ call_RT(noreg, noreg, target, G4); 273 } 274 OopMapSet* oop_maps = new OopMapSet(); 275 oop_maps->add_gc_map(call_offset, oop_map); 276 277 __ should_not_reach_here(); 278 return oop_maps; 279 } 280 281 282 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 283 Register arg1, Register arg2, Register arg3) { 284 // make a frame and preserve the caller's caller-save registers 285 OopMap* oop_map = save_live_registers(sasm); 286 287 int call_offset; 288 if (arg1 == noreg) { 289 call_offset = __ call_RT(result, noreg, target); 290 } else if (arg2 == noreg) { 291 call_offset = __ call_RT(result, noreg, target, arg1); 292 } else if (arg3 == noreg) { 293 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 294 } else { 295 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 296 } 297 OopMapSet* oop_maps = NULL; 298 299 oop_maps = new OopMapSet(); 300 oop_maps->add_gc_map(call_offset, oop_map); 301 restore_live_registers(sasm); 302 303 __ ret(); 304 __ delayed()->restore(); 305 306 return oop_maps; 307 } 308 309 310 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 311 // make a frame and preserve the caller's caller-save registers 312 OopMap* oop_map = save_live_registers(sasm); 313 314 // call the runtime patching routine, returns non-zero if nmethod got deopted. 315 int call_offset = __ call_RT(noreg, noreg, target); 316 OopMapSet* oop_maps = new OopMapSet(); 317 oop_maps->add_gc_map(call_offset, oop_map); 318 319 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the 320 // deoptimization handler entry that will cause re-execution of the current bytecode 321 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 322 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 323 324 Label no_deopt; 325 __ tst(O0); 326 __ brx(Assembler::equal, false, Assembler::pt, no_deopt); 327 __ delayed()->nop(); 328 329 // return to the deoptimization handler entry for unpacking and rexecute 330 // if we simply returned the we'd deopt as if any call we patched had just 331 // returned. 332 333 restore_live_registers(sasm); 334 __ restore(); 335 __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); 336 __ delayed()->nop(); 337 338 __ bind(no_deopt); 339 restore_live_registers(sasm); 340 __ ret(); 341 __ delayed()->restore(); 342 343 return oop_maps; 344 } 345 346 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 347 348 OopMapSet* oop_maps = NULL; 349 // for better readability 350 const bool must_gc_arguments = true; 351 const bool dont_gc_arguments = false; 352 353 // stub code & info for the different stubs 354 switch (id) { 355 case forward_exception_id: 356 { 357 // we're handling an exception in the context of a compiled 358 // frame. The registers have been saved in the standard 359 // places. Perform an exception lookup in the caller and 360 // dispatch to the handler if found. Otherwise unwind and 361 // dispatch to the callers exception handler. 362 363 oop_maps = new OopMapSet(); 364 OopMap* oop_map = generate_oop_map(sasm, true); 365 366 // transfer the pending exception to the exception_oop 367 __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); 368 __ ld_ptr(Oexception, 0, G0); 369 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); 370 __ add(I7, frame::pc_return_offset, Oissuing_pc); 371 372 generate_handle_exception(sasm, oop_maps, oop_map); 373 __ should_not_reach_here(); 374 } 375 break; 376 377 case new_instance_id: 378 case fast_new_instance_id: 379 case fast_new_instance_init_check_id: 380 { 381 Register G5_klass = G5; // Incoming 382 Register O0_obj = O0; // Outgoing 383 384 if (id == new_instance_id) { 385 __ set_info("new_instance", dont_gc_arguments); 386 } else if (id == fast_new_instance_id) { 387 __ set_info("fast new_instance", dont_gc_arguments); 388 } else { 389 assert(id == fast_new_instance_init_check_id, "bad StubID"); 390 __ set_info("fast new_instance init check", dont_gc_arguments); 391 } 392 393 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 394 UseTLAB && FastTLABRefill) { 395 Label slow_path; 396 Register G1_obj_size = G1; 397 Register G3_t1 = G3; 398 Register G4_t2 = G4; 399 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2); 400 401 // Push a frame since we may do dtrace notification for the 402 // allocation which requires calling out and we don't want 403 // to stomp the real return address. 404 __ save_frame(0); 405 406 if (id == fast_new_instance_init_check_id) { 407 // make sure the klass is initialized 408 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1); 409 __ cmp(G3_t1, instanceKlass::fully_initialized); 410 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 411 __ delayed()->nop(); 412 } 413 #ifdef ASSERT 414 // assert object can be fast path allocated 415 { 416 Label ok, not_ok; 417 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size); 418 __ cmp(G1_obj_size, 0); // make sure it's an instance (LH > 0) 419 __ br(Assembler::lessEqual, false, Assembler::pn, not_ok); 420 __ delayed()->nop(); 421 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); 422 __ br(Assembler::zero, false, Assembler::pn, ok); 423 __ delayed()->nop(); 424 __ bind(not_ok); 425 __ stop("assert(can be fast path allocated)"); 426 __ should_not_reach_here(); 427 __ bind(ok); 428 } 429 #endif // ASSERT 430 // if we got here then the TLAB allocation failed, so try 431 // refilling the TLAB or allocating directly from eden. 432 Label retry_tlab, try_eden; 433 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass 434 435 __ bind(retry_tlab); 436 437 // get the instance size 438 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); 439 __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path); 440 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); 441 __ verify_oop(O0_obj); 442 __ mov(O0, I0); 443 __ ret(); 444 __ delayed()->restore(); 445 446 __ bind(try_eden); 447 // get the instance size 448 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); 449 __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); 450 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); 451 __ verify_oop(O0_obj); 452 __ mov(O0, I0); 453 __ ret(); 454 __ delayed()->restore(); 455 456 __ bind(slow_path); 457 458 // pop this frame so generate_stub_call can push it's own 459 __ restore(); 460 } 461 462 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass); 463 // I0->O0: new instance 464 } 465 466 break; 467 468 #ifdef TIERED 469 case counter_overflow_id: 470 // G4 contains bci 471 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4); 472 break; 473 #endif // TIERED 474 475 case new_type_array_id: 476 case new_object_array_id: 477 { 478 Register G5_klass = G5; // Incoming 479 Register G4_length = G4; // Incoming 480 Register O0_obj = O0; // Outgoing 481 482 Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize) 483 + Klass::layout_helper_offset_in_bytes())); 484 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 485 assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); 486 // Use this offset to pick out an individual byte of the layout_helper: 487 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0} 488 - Klass::_lh_header_size_shift / BitsPerByte); 489 490 if (id == new_type_array_id) { 491 __ set_info("new_type_array", dont_gc_arguments); 492 } else { 493 __ set_info("new_object_array", dont_gc_arguments); 494 } 495 496 #ifdef ASSERT 497 // assert object type is really an array of the proper kind 498 { 499 Label ok; 500 Register G3_t1 = G3; 501 __ ld(klass_lh, G3_t1); 502 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); 503 int tag = ((id == new_type_array_id) 504 ? Klass::_lh_array_tag_type_value 505 : Klass::_lh_array_tag_obj_value); 506 __ cmp(G3_t1, tag); 507 __ brx(Assembler::equal, false, Assembler::pt, ok); 508 __ delayed()->nop(); 509 __ stop("assert(is an array klass)"); 510 __ should_not_reach_here(); 511 __ bind(ok); 512 } 513 #endif // ASSERT 514 515 if (UseTLAB && FastTLABRefill) { 516 Label slow_path; 517 Register G1_arr_size = G1; 518 Register G3_t1 = G3; 519 Register O1_t2 = O1; 520 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2); 521 522 // check that array length is small enough for fast path 523 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); 524 __ cmp(G4_length, G3_t1); 525 __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path); 526 __ delayed()->nop(); 527 528 // if we got here then the TLAB allocation failed, so try 529 // refilling the TLAB or allocating directly from eden. 530 Label retry_tlab, try_eden; 531 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass 532 533 __ bind(retry_tlab); 534 535 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size 536 __ ld(klass_lh, G3_t1); 537 __ sll(G4_length, G3_t1, G1_arr_size); 538 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); 539 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); 540 __ add(G1_arr_size, G3_t1, G1_arr_size); 541 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up 542 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); 543 544 __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size 545 546 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); 547 __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); 548 __ sub(G1_arr_size, G3_t1, O1_t2); // body length 549 __ add(O0_obj, G3_t1, G3_t1); // body start 550 __ initialize_body(G3_t1, O1_t2); 551 __ verify_oop(O0_obj); 552 __ retl(); 553 __ delayed()->nop(); 554 555 __ bind(try_eden); 556 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size 557 __ ld(klass_lh, G3_t1); 558 __ sll(G4_length, G3_t1, G1_arr_size); 559 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); 560 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); 561 __ add(G1_arr_size, G3_t1, G1_arr_size); 562 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); 563 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); 564 565 __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size 566 567 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); 568 __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); 569 __ sub(G1_arr_size, G3_t1, O1_t2); // body length 570 __ add(O0_obj, G3_t1, G3_t1); // body start 571 __ initialize_body(G3_t1, O1_t2); 572 __ verify_oop(O0_obj); 573 __ retl(); 574 __ delayed()->nop(); 575 576 __ bind(slow_path); 577 } 578 579 if (id == new_type_array_id) { 580 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); 581 } else { 582 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length); 583 } 584 // I0 -> O0: new array 585 } 586 break; 587 588 case new_multi_array_id: 589 { // O0: klass 590 // O1: rank 591 // O2: address of 1st dimension 592 __ set_info("new_multi_array", dont_gc_arguments); 593 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2); 594 // I0 -> O0: new multi array 595 } 596 break; 597 598 case register_finalizer_id: 599 { 600 __ set_info("register_finalizer", dont_gc_arguments); 601 602 // load the klass and check the has finalizer flag 603 Label register_finalizer; 604 Register t = O1; 605 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t); 606 __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t); 607 __ set(JVM_ACC_HAS_FINALIZER, G3); 608 __ andcc(G3, t, G0); 609 __ br(Assembler::notZero, false, Assembler::pt, register_finalizer); 610 __ delayed()->nop(); 611 612 // do a leaf return 613 __ retl(); 614 __ delayed()->nop(); 615 616 __ bind(register_finalizer); 617 OopMap* oop_map = save_live_registers(sasm); 618 int call_offset = __ call_RT(noreg, noreg, 619 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0); 620 oop_maps = new OopMapSet(); 621 oop_maps->add_gc_map(call_offset, oop_map); 622 623 // Now restore all the live registers 624 restore_live_registers(sasm); 625 626 __ ret(); 627 __ delayed()->restore(); 628 } 629 break; 630 631 case throw_range_check_failed_id: 632 { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded 633 // G4: index 634 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 635 } 636 break; 637 638 case throw_index_exception_id: 639 { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded 640 // G4: index 641 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 642 } 643 break; 644 645 case throw_div0_exception_id: 646 { __ set_info("throw_div0_exception", dont_gc_arguments); 647 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 648 } 649 break; 650 651 case throw_null_pointer_exception_id: 652 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 653 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 654 } 655 break; 656 657 case handle_exception_id: 658 { 659 __ set_info("handle_exception", dont_gc_arguments); 660 // make a frame and preserve the caller's caller-save registers 661 662 oop_maps = new OopMapSet(); 663 OopMap* oop_map = save_live_registers(sasm); 664 __ mov(Oexception->after_save(), Oexception); 665 __ mov(Oissuing_pc->after_save(), Oissuing_pc); 666 generate_handle_exception(sasm, oop_maps, oop_map); 667 } 668 break; 669 670 case unwind_exception_id: 671 { 672 // O0: exception 673 // I7: address of call to this method 674 675 __ set_info("unwind_exception", dont_gc_arguments); 676 __ mov(Oexception, Oexception->after_save()); 677 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); 678 679 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 680 G2_thread, Oissuing_pc->after_save()); 681 __ verify_not_null_oop(Oexception->after_save()); 682 683 // Restore SP from L7 if the exception PC is a MethodHandle call site. 684 __ mov(O0, G5); // Save the target address. 685 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 686 __ tst(L0); // Condition codes are preserved over the restore. 687 __ restore(); 688 689 __ jmp(G5, 0); 690 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7, SP); // Restore SP if required. 691 } 692 break; 693 694 case throw_array_store_exception_id: 695 { 696 __ set_info("throw_array_store_exception", dont_gc_arguments); 697 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false); 698 } 699 break; 700 701 case throw_class_cast_exception_id: 702 { 703 // G4: object 704 __ set_info("throw_class_cast_exception", dont_gc_arguments); 705 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 706 } 707 break; 708 709 case throw_incompatible_class_change_error_id: 710 { 711 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 712 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 713 } 714 break; 715 716 case slow_subtype_check_id: 717 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 718 // Arguments : 719 // 720 // ret : G3 721 // sub : G3, argument, destroyed 722 // super: G1, argument, not changed 723 // raddr: O7, blown by call 724 Label miss; 725 726 __ save_frame(0); // Blow no registers! 727 728 __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss); 729 730 __ mov(1, G3); 731 __ ret(); // Result in G5 is 'true' 732 __ delayed()->restore(); // free copy or add can go here 733 734 __ bind(miss); 735 __ mov(0, G3); 736 __ ret(); // Result in G5 is 'false' 737 __ delayed()->restore(); // free copy or add can go here 738 } 739 740 case monitorenter_nofpu_id: 741 case monitorenter_id: 742 { // G4: object 743 // G5: lock address 744 __ set_info("monitorenter", dont_gc_arguments); 745 746 int save_fpu_registers = (id == monitorenter_id); 747 // make a frame and preserve the caller's caller-save registers 748 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 749 750 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5); 751 752 oop_maps = new OopMapSet(); 753 oop_maps->add_gc_map(call_offset, oop_map); 754 restore_live_registers(sasm, save_fpu_registers); 755 756 __ ret(); 757 __ delayed()->restore(); 758 } 759 break; 760 761 case monitorexit_nofpu_id: 762 case monitorexit_id: 763 { // G4: lock address 764 // note: really a leaf routine but must setup last java sp 765 // => use call_RT for now (speed can be improved by 766 // doing last java sp setup manually) 767 __ set_info("monitorexit", dont_gc_arguments); 768 769 int save_fpu_registers = (id == monitorexit_id); 770 // make a frame and preserve the caller's caller-save registers 771 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 772 773 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4); 774 775 oop_maps = new OopMapSet(); 776 oop_maps->add_gc_map(call_offset, oop_map); 777 restore_live_registers(sasm, save_fpu_registers); 778 779 __ ret(); 780 __ delayed()->restore(); 781 782 } 783 break; 784 785 case access_field_patching_id: 786 { __ set_info("access_field_patching", dont_gc_arguments); 787 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 788 } 789 break; 790 791 case load_klass_patching_id: 792 { __ set_info("load_klass_patching", dont_gc_arguments); 793 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 794 } 795 break; 796 797 case jvmti_exception_throw_id: 798 { // Oexception : exception 799 __ set_info("jvmti_exception_throw", dont_gc_arguments); 800 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0); 801 } 802 break; 803 804 case dtrace_object_alloc_id: 805 { // O0: object 806 __ set_info("dtrace_object_alloc", dont_gc_arguments); 807 // we can't gc here so skip the oopmap but make sure that all 808 // the live registers get saved. 809 save_live_registers(sasm); 810 811 __ save_thread(L7_thread_cache); 812 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 813 relocInfo::runtime_call_type); 814 __ delayed()->mov(I0, O0); 815 __ restore_thread(L7_thread_cache); 816 817 restore_live_registers(sasm); 818 __ ret(); 819 __ delayed()->restore(); 820 } 821 break; 822 823 #ifndef SERIALGC 824 case g1_pre_barrier_slow_id: 825 { // G4: previous value of memory 826 BarrierSet* bs = Universe::heap()->barrier_set(); 827 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 828 __ save_frame(0); 829 __ set((int)id, O1); 830 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 831 __ should_not_reach_here(); 832 break; 833 } 834 835 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 836 837 Register pre_val = G4; 838 Register tmp = G1_scratch; 839 Register tmp2 = G3_scratch; 840 841 Label refill, restart; 842 bool with_frame = false; // I don't know if we can do with-frame. 843 int satb_q_index_byte_offset = 844 in_bytes(JavaThread::satb_mark_queue_offset() + 845 PtrQueue::byte_offset_of_index()); 846 int satb_q_buf_byte_offset = 847 in_bytes(JavaThread::satb_mark_queue_offset() + 848 PtrQueue::byte_offset_of_buf()); 849 __ bind(restart); 850 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); 851 852 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, 853 Assembler::pn, tmp, refill); 854 855 // If the branch is taken, no harm in executing this in the delay slot. 856 __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); 857 __ sub(tmp, oopSize, tmp); 858 859 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 860 // Use return-from-leaf 861 __ retl(); 862 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); 863 864 __ bind(refill); 865 __ save_frame(0); 866 867 __ mov(pre_val, L0); 868 __ mov(tmp, L1); 869 __ mov(tmp2, L2); 870 871 __ call_VM_leaf(L7_thread_cache, 872 CAST_FROM_FN_PTR(address, 873 SATBMarkQueueSet::handle_zero_index_for_thread), 874 G2_thread); 875 876 __ mov(L0, pre_val); 877 __ mov(L1, tmp); 878 __ mov(L2, tmp2); 879 880 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 881 __ delayed()->restore(); 882 } 883 break; 884 885 case g1_post_barrier_slow_id: 886 { 887 BarrierSet* bs = Universe::heap()->barrier_set(); 888 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 889 __ save_frame(0); 890 __ set((int)id, O1); 891 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 892 __ should_not_reach_here(); 893 break; 894 } 895 896 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 897 898 Register addr = G4; 899 Register cardtable = G5; 900 Register tmp = G1_scratch; 901 Register tmp2 = G3_scratch; 902 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; 903 904 Label not_already_dirty, restart, refill; 905 906 #ifdef _LP64 907 __ srlx(addr, CardTableModRefBS::card_shift, addr); 908 #else 909 __ srl(addr, CardTableModRefBS::card_shift, addr); 910 #endif 911 912 AddressLiteral rs(byte_map_base); 913 __ set(rs, cardtable); // cardtable := <card table base> 914 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 915 916 __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, 917 tmp, not_already_dirty); 918 // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch 919 // case, harmless if not. 920 __ delayed()->add(addr, cardtable, tmp2); 921 922 // We didn't take the branch, so we're already dirty: return. 923 // Use return-from-leaf 924 __ retl(); 925 __ delayed()->nop(); 926 927 // Not dirty. 928 __ bind(not_already_dirty); 929 // First, dirty it. 930 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). 931 932 Register tmp3 = cardtable; 933 Register tmp4 = tmp; 934 935 // these registers are now dead 936 addr = cardtable = tmp = noreg; 937 938 int dirty_card_q_index_byte_offset = 939 in_bytes(JavaThread::dirty_card_queue_offset() + 940 PtrQueue::byte_offset_of_index()); 941 int dirty_card_q_buf_byte_offset = 942 in_bytes(JavaThread::dirty_card_queue_offset() + 943 PtrQueue::byte_offset_of_buf()); 944 __ bind(restart); 945 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); 946 947 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, 948 tmp3, refill); 949 // If the branch is taken, no harm in executing this in the delay slot. 950 __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); 951 __ sub(tmp3, oopSize, tmp3); 952 953 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> 954 // Use return-from-leaf 955 __ retl(); 956 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); 957 958 __ bind(refill); 959 __ save_frame(0); 960 961 __ mov(tmp2, L0); 962 __ mov(tmp3, L1); 963 __ mov(tmp4, L2); 964 965 __ call_VM_leaf(L7_thread_cache, 966 CAST_FROM_FN_PTR(address, 967 DirtyCardQueueSet::handle_zero_index_for_thread), 968 G2_thread); 969 970 __ mov(L0, tmp2); 971 __ mov(L1, tmp3); 972 __ mov(L2, tmp4); 973 974 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 975 __ delayed()->restore(); 976 } 977 break; 978 #endif // !SERIALGC 979 980 default: 981 { __ set_info("unimplemented entry", dont_gc_arguments); 982 __ save_frame(0); 983 __ set((int)id, O1); 984 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); 985 __ should_not_reach_here(); 986 } 987 break; 988 } 989 return oop_maps; 990 } 991 992 993 void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { 994 Label no_deopt; 995 996 __ verify_not_null_oop(Oexception); 997 998 // save the exception and issuing pc in the thread 999 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 1000 __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 1001 1002 // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) 1003 __ mov(I7, L0); 1004 __ mov(Oissuing_pc, I7); 1005 __ sub(I7, frame::pc_return_offset, I7); 1006 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 1007 1008 // Note: if nmethod has been deoptimized then regardless of 1009 // whether it had a handler or not we will deoptimize 1010 // by entering the deopt blob with a pending exception. 1011 1012 #ifdef ASSERT 1013 Label done; 1014 __ tst(O0); 1015 __ br(Assembler::notZero, false, Assembler::pn, done); 1016 __ delayed()->nop(); 1017 __ stop("should have found address"); 1018 __ bind(done); 1019 #endif 1020 1021 // restore the registers that were saved at the beginning and jump to the exception handler. 1022 restore_live_registers(sasm); 1023 1024 __ jmp(O0, 0); 1025 __ delayed()->restore(); 1026 1027 oop_maps->add_gc_map(call_offset, oop_map); 1028 } 1029 1030 1031 #undef __ 1032 1033 #define __ masm->