1 /* 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Defs.hpp" 27 #include "c1/c1_MacroAssembler.hpp" 28 #include "c1/c1_Runtime1.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "nativeInst_sparc.hpp" 31 #include "oops/compiledICHolderOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "register_sparc.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/signature.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "vmreg_sparc.inline.hpp" 39 40 // Implementation of StubAssembler 41 42 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry_point, int number_of_arguments) { 43 // for sparc changing the number of arguments doesn't change 44 // anything about the frame size so we'll always lie and claim that 45 // we are only passing 1 argument. 46 set_num_rt_args(1); 47 48 assert_not_delayed(); 49 // bang stack before going to runtime 50 set(-os::vm_page_size() + STACK_BIAS, G3_scratch); 51 st(G0, SP, G3_scratch); 52 53 // debugging support 54 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 55 56 set_last_Java_frame(SP, noreg); 57 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 58 save_thread(L7_thread_cache); 59 // do the call 60 call(entry_point, relocInfo::runtime_call_type); 61 if (!VerifyThread) { 62 delayed()->mov(G2_thread, O0); // pass thread as first argument 63 } else { 64 delayed()->nop(); // (thread already passed) 65 } 66 int call_offset = offset(); // offset of return address 67 restore_thread(L7_thread_cache); 68 reset_last_Java_frame(); 69 70 // check for pending exceptions 71 { Label L; 72 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 73 ld_ptr(exception_addr, Gtemp); 74 br_null(Gtemp, false, pt, L); 75 delayed()->nop(); 76 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 77 st_ptr(G0, vm_result_addr); 78 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 79 st_ptr(G0, vm_result_addr_2); 80 81 if (frame_size() == no_frame_size) { 82 // we use O7 linkage so that forward_exception_entry has the issuing PC 83 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 84 delayed()->restore(); 85 } else if (_stub_id == Runtime1::forward_exception_id) { 86 should_not_reach_here(); 87 } else { 88 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); 89 jump_to(exc, G4); 90 delayed()->nop(); 91 } 92 bind(L); 93 } 94 95 // get oop result if there is one and reset the value in the thread 96 if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread 97 get_vm_result (oop_result1); 98 } else { 99 // be a little paranoid and clear the result 100 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 101 st_ptr(G0, vm_result_addr); 102 } 103 104 if (oop_result2->is_valid()) { 105 get_vm_result_2(oop_result2); 106 } else { 107 // be a little paranoid and clear the result 108 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 109 st_ptr(G0, vm_result_addr_2); 110 } 111 112 return call_offset; 113 } 114 115 116 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { 117 // O0 is reserved for the thread 118 mov(arg1, O1); 119 return call_RT(oop_result1, oop_result2, entry, 1); 120 } 121 122 123 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { 124 // O0 is reserved for the thread 125 mov(arg1, O1); 126 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 127 return call_RT(oop_result1, oop_result2, entry, 2); 128 } 129 130 131 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { 132 // O0 is reserved for the thread 133 mov(arg1, O1); 134 mov(arg2, O2); assert(arg2 != O1, "smashed argument"); 135 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument"); 136 return call_RT(oop_result1, oop_result2, entry, 3); 137 } 138 139 140 // Implementation of Runtime1 141 142 #define __ sasm-> 143 144 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 145 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 146 static int reg_save_size_in_words; 147 static int frame_size_in_bytes = -1; 148 149 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 150 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 151 " mismatch in calculation"); 152 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 153 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 154 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 155 156 int i; 157 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 158 Register r = as_Register(i); 159 if (r == G1 || r == G3 || r == G4 || r == G5) { 160 int sp_offset = cpu_reg_save_offsets[i]; 161 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 162 r->as_VMReg()); 163 } 164 } 165 166 if (save_fpu_registers) { 167 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 168 FloatRegister r = as_FloatRegister(i); 169 int sp_offset = fpu_reg_save_offsets[i]; 170 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 171 r->as_VMReg()); 172 } 173 } 174 return oop_map; 175 } 176 177 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { 178 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 179 " mismatch in calculation"); 180 __ save_frame_c1(frame_size_in_bytes); 181 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 182 183 // Record volatile registers as callee-save values in an OopMap so their save locations will be 184 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 185 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 186 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 187 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 188 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)) 189 190 int i; 191 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 192 Register r = as_Register(i); 193 if (r == G1 || r == G3 || r == G4 || r == G5) { 194 int sp_offset = cpu_reg_save_offsets[i]; 195 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 196 } 197 } 198 199 if (save_fpu_registers) { 200 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 201 FloatRegister r = as_FloatRegister(i); 202 int sp_offset = fpu_reg_save_offsets[i]; 203 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); 204 } 205 } 206 207 return generate_oop_map(sasm, save_fpu_registers); 208 } 209 210 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 211 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 212 Register r = as_Register(i); 213 if (r == G1 || r == G3 || r == G4 || r == G5) { 214 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 215 } 216 } 217 218 if (restore_fpu_registers) { 219 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 220 FloatRegister r = as_FloatRegister(i); 221 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); 222 } 223 } 224 } 225 226 227 void Runtime1::initialize_pd() { 228 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines 229 // 230 // A stub routine will have a frame that is at least large enough to hold 231 // a register window save area (obviously) and the volatile g registers 232 // and floating registers. A user of save_live_registers can have a frame 233 // that has more scratch area in it (although typically they will use L-regs). 234 // in that case the frame will look like this (stack growing down) 235 // 236 // FP -> | | 237 // | scratch mem | 238 // | " " | 239 // -------------- 240 // | float regs | 241 // | " " | 242 // --------------- 243 // | G regs | 244 // | " " | 245 // --------------- 246 // | abi reg. | 247 // | window save | 248 // | area | 249 // SP -> --------------- 250 // 251 int i; 252 int sp_offset = round_to(frame::register_save_words, 2); // start doubleword aligned 253 254 // only G int registers are saved explicitly; others are found in register windows 255 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 256 Register r = as_Register(i); 257 if (r == G1 || r == G3 || r == G4 || r == G5) { 258 cpu_reg_save_offsets[i] = sp_offset; 259 sp_offset++; 260 } 261 } 262 263 // all float registers are saved explicitly 264 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); 265 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 266 fpu_reg_save_offsets[i] = sp_offset; 267 sp_offset++; 268 } 269 reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset; 270 // this should match assembler::total_frame_size_in_bytes, which 271 // isn't callable from this context. It's checked by an assert when 272 // it's used though. 273 frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8); 274 } 275 276 277 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 278 // make a frame and preserve the caller's caller-save registers 279 OopMap* oop_map = save_live_registers(sasm); 280 int call_offset; 281 if (!has_argument) { 282 call_offset = __ call_RT(noreg, noreg, target); 283 } else { 284 call_offset = __ call_RT(noreg, noreg, target, G4); 285 } 286 OopMapSet* oop_maps = new OopMapSet(); 287 oop_maps->add_gc_map(call_offset, oop_map); 288 289 __ should_not_reach_here(); 290 return oop_maps; 291 } 292 293 294 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 295 Register arg1, Register arg2, Register arg3) { 296 // make a frame and preserve the caller's caller-save registers 297 OopMap* oop_map = save_live_registers(sasm); 298 299 int call_offset; 300 if (arg1 == noreg) { 301 call_offset = __ call_RT(result, noreg, target); 302 } else if (arg2 == noreg) { 303 call_offset = __ call_RT(result, noreg, target, arg1); 304 } else if (arg3 == noreg) { 305 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 306 } else { 307 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 308 } 309 OopMapSet* oop_maps = NULL; 310 311 oop_maps = new OopMapSet(); 312 oop_maps->add_gc_map(call_offset, oop_map); 313 restore_live_registers(sasm); 314 315 __ ret(); 316 __ delayed()->restore(); 317 318 return oop_maps; 319 } 320 321 322 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 323 // make a frame and preserve the caller's caller-save registers 324 OopMap* oop_map = save_live_registers(sasm); 325 326 // call the runtime patching routine, returns non-zero if nmethod got deopted. 327 int call_offset = __ call_RT(noreg, noreg, target); 328 OopMapSet* oop_maps = new OopMapSet(); 329 oop_maps->add_gc_map(call_offset, oop_map); 330 331 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the 332 // deoptimization handler entry that will cause re-execution of the current bytecode 333 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 334 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 335 336 Label no_deopt; 337 __ tst(O0); 338 __ brx(Assembler::equal, false, Assembler::pt, no_deopt); 339 __ delayed()->nop(); 340 341 // return to the deoptimization handler entry for unpacking and rexecute 342 // if we simply returned the we'd deopt as if any call we patched had just 343 // returned. 344 345 restore_live_registers(sasm); 346 __ restore(); 347 __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); 348 __ delayed()->nop(); 349 350 __ bind(no_deopt); 351 restore_live_registers(sasm); 352 __ ret(); 353 __ delayed()->restore(); 354 355 return oop_maps; 356 } 357 358 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 359 360 OopMapSet* oop_maps = NULL; 361 // for better readability 362 const bool must_gc_arguments = true; 363 const bool dont_gc_arguments = false; 364 365 // stub code & info for the different stubs 366 switch (id) { 367 case forward_exception_id: 368 { 369 // we're handling an exception in the context of a compiled 370 // frame. The registers have been saved in the standard 371 // places. Perform an exception lookup in the caller and 372 // dispatch to the handler if found. Otherwise unwind and 373 // dispatch to the callers exception handler. 374 375 oop_maps = new OopMapSet(); 376 OopMap* oop_map = generate_oop_map(sasm, true); 377 378 // transfer the pending exception to the exception_oop 379 __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); 380 __ ld_ptr(Oexception, 0, G0); 381 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); 382 __ add(I7, frame::pc_return_offset, Oissuing_pc); 383 384 generate_handle_exception(sasm, oop_maps, oop_map); 385 __ should_not_reach_here(); 386 } 387 break; 388 389 case new_instance_id: 390 case fast_new_instance_id: 391 case fast_new_instance_init_check_id: 392 { 393 Register G5_klass = G5; // Incoming 394 Register O0_obj = O0; // Outgoing 395 396 if (id == new_instance_id) { 397 __ set_info("new_instance", dont_gc_arguments); 398 } else if (id == fast_new_instance_id) { 399 __ set_info("fast new_instance", dont_gc_arguments); 400 } else { 401 assert(id == fast_new_instance_init_check_id, "bad StubID"); 402 __ set_info("fast new_instance init check", dont_gc_arguments); 403 } 404 405 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 406 UseTLAB && FastTLABRefill) { 407 Label slow_path; 408 Register G1_obj_size = G1; 409 Register G3_t1 = G3; 410 Register G4_t2 = G4; 411 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2); 412 413 // Push a frame since we may do dtrace notification for the 414 // allocation which requires calling out and we don't want 415 // to stomp the real return address. 416 __ save_frame(0); 417 418 if (id == fast_new_instance_init_check_id) { 419 // make sure the klass is initialized 420 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1); 421 __ cmp(G3_t1, instanceKlass::fully_initialized); 422 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 423 __ delayed()->nop(); 424 } 425 #ifdef ASSERT 426 // assert object can be fast path allocated 427 { 428 Label ok, not_ok; 429 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size); 430 __ cmp(G1_obj_size, 0); // make sure it's an instance (LH > 0) 431 __ br(Assembler::lessEqual, false, Assembler::pn, not_ok); 432 __ delayed()->nop(); 433 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); 434 __ br(Assembler::zero, false, Assembler::pn, ok); 435 __ delayed()->nop(); 436 __ bind(not_ok); 437 __ stop("assert(can be fast path allocated)"); 438 __ should_not_reach_here(); 439 __ bind(ok); 440 } 441 #endif // ASSERT 442 // if we got here then the TLAB allocation failed, so try 443 // refilling the TLAB or allocating directly from eden. 444 Label retry_tlab, try_eden; 445 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass 446 447 __ bind(retry_tlab); 448 449 // get the instance size 450 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); 451 452 __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path); 453 454 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); 455 __ verify_oop(O0_obj); 456 __ mov(O0, I0); 457 __ ret(); 458 __ delayed()->restore(); 459 460 __ bind(try_eden); 461 // get the instance size 462 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); 463 __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); 464 __ incr_allocated_bytes(G1_obj_size, 0, G3_t1); 465 466 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); 467 __ verify_oop(O0_obj); 468 __ mov(O0, I0); 469 __ ret(); 470 __ delayed()->restore(); 471 472 __ bind(slow_path); 473 474 // pop this frame so generate_stub_call can push it's own 475 __ restore(); 476 } 477 478 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass); 479 // I0->O0: new instance 480 } 481 482 break; 483 484 case counter_overflow_id: 485 // G4 contains bci, G5 contains method 486 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5); 487 break; 488 489 case new_type_array_id: 490 case new_object_array_id: 491 { 492 Register G5_klass = G5; // Incoming 493 Register G4_length = G4; // Incoming 494 Register O0_obj = O0; // Outgoing 495 496 Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize) 497 + Klass::layout_helper_offset_in_bytes())); 498 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 499 assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); 500 // Use this offset to pick out an individual byte of the layout_helper: 501 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0} 502 - Klass::_lh_header_size_shift / BitsPerByte); 503 504 if (id == new_type_array_id) { 505 __ set_info("new_type_array", dont_gc_arguments); 506 } else { 507 __ set_info("new_object_array", dont_gc_arguments); 508 } 509 510 #ifdef ASSERT 511 // assert object type is really an array of the proper kind 512 { 513 Label ok; 514 Register G3_t1 = G3; 515 __ ld(klass_lh, G3_t1); 516 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); 517 int tag = ((id == new_type_array_id) 518 ? Klass::_lh_array_tag_type_value 519 : Klass::_lh_array_tag_obj_value); 520 __ cmp(G3_t1, tag); 521 __ brx(Assembler::equal, false, Assembler::pt, ok); 522 __ delayed()->nop(); 523 __ stop("assert(is an array klass)"); 524 __ should_not_reach_here(); 525 __ bind(ok); 526 } 527 #endif // ASSERT 528 529 if (UseTLAB && FastTLABRefill) { 530 Label slow_path; 531 Register G1_arr_size = G1; 532 Register G3_t1 = G3; 533 Register O1_t2 = O1; 534 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2); 535 536 // check that array length is small enough for fast path 537 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); 538 __ cmp(G4_length, G3_t1); 539 __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path); 540 __ delayed()->nop(); 541 542 // if we got here then the TLAB allocation failed, so try 543 // refilling the TLAB or allocating directly from eden. 544 Label retry_tlab, try_eden; 545 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass 546 547 __ bind(retry_tlab); 548 549 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size 550 __ ld(klass_lh, G3_t1); 551 __ sll(G4_length, G3_t1, G1_arr_size); 552 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); 553 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); 554 __ add(G1_arr_size, G3_t1, G1_arr_size); 555 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up 556 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); 557 558 __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size 559 560 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); 561 __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); 562 __ sub(G1_arr_size, G3_t1, O1_t2); // body length 563 __ add(O0_obj, G3_t1, G3_t1); // body start 564 __ initialize_body(G3_t1, O1_t2); 565 __ verify_oop(O0_obj); 566 __ retl(); 567 __ delayed()->nop(); 568 569 __ bind(try_eden); 570 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size 571 __ ld(klass_lh, G3_t1); 572 __ sll(G4_length, G3_t1, G1_arr_size); 573 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); 574 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); 575 __ add(G1_arr_size, G3_t1, G1_arr_size); 576 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); 577 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); 578 579 __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size 580 __ incr_allocated_bytes(G1_arr_size, 0, G3_t1); 581 582 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); 583 __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); 584 __ sub(G1_arr_size, G3_t1, O1_t2); // body length 585 __ add(O0_obj, G3_t1, G3_t1); // body start 586 __ initialize_body(G3_t1, O1_t2); 587 __ verify_oop(O0_obj); 588 __ retl(); 589 __ delayed()->nop(); 590 591 __ bind(slow_path); 592 } 593 594 if (id == new_type_array_id) { 595 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); 596 } else { 597 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length); 598 } 599 // I0 -> O0: new array 600 } 601 break; 602 603 case new_multi_array_id: 604 { // O0: klass 605 // O1: rank 606 // O2: address of 1st dimension 607 __ set_info("new_multi_array", dont_gc_arguments); 608 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2); 609 // I0 -> O0: new multi array 610 } 611 break; 612 613 case register_finalizer_id: 614 { 615 __ set_info("register_finalizer", dont_gc_arguments); 616 617 // load the klass and check the has finalizer flag 618 Label register_finalizer; 619 Register t = O1; 620 __ load_klass(O0, t); 621 __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t); 622 __ set(JVM_ACC_HAS_FINALIZER, G3); 623 __ andcc(G3, t, G0); 624 __ br(Assembler::notZero, false, Assembler::pt, register_finalizer); 625 __ delayed()->nop(); 626 627 // do a leaf return 628 __ retl(); 629 __ delayed()->nop(); 630 631 __ bind(register_finalizer); 632 OopMap* oop_map = save_live_registers(sasm); 633 int call_offset = __ call_RT(noreg, noreg, 634 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0); 635 oop_maps = new OopMapSet(); 636 oop_maps->add_gc_map(call_offset, oop_map); 637 638 // Now restore all the live registers 639 restore_live_registers(sasm); 640 641 __ ret(); 642 __ delayed()->restore(); 643 } 644 break; 645 646 case throw_range_check_failed_id: 647 { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded 648 // G4: index 649 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 650 } 651 break; 652 653 case throw_index_exception_id: 654 { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded 655 // G4: index 656 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 657 } 658 break; 659 660 case throw_div0_exception_id: 661 { __ set_info("throw_div0_exception", dont_gc_arguments); 662 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 663 } 664 break; 665 666 case throw_null_pointer_exception_id: 667 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 668 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 669 } 670 break; 671 672 case handle_exception_id: 673 { 674 __ set_info("handle_exception", dont_gc_arguments); 675 // make a frame and preserve the caller's caller-save registers 676 677 oop_maps = new OopMapSet(); 678 OopMap* oop_map = save_live_registers(sasm); 679 __ mov(Oexception->after_save(), Oexception); 680 __ mov(Oissuing_pc->after_save(), Oissuing_pc); 681 generate_handle_exception(sasm, oop_maps, oop_map); 682 } 683 break; 684 685 case unwind_exception_id: 686 { 687 // O0: exception 688 // I7: address of call to this method 689 690 __ set_info("unwind_exception", dont_gc_arguments); 691 __ mov(Oexception, Oexception->after_save()); 692 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); 693 694 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 695 G2_thread, Oissuing_pc->after_save()); 696 __ verify_not_null_oop(Oexception->after_save()); 697 698 // Restore SP from L7 if the exception PC is a MethodHandle call site. 699 __ mov(O0, G5); // Save the target address. 700 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 701 __ tst(L0); // Condition codes are preserved over the restore. 702 __ restore(); 703 704 __ jmp(G5, 0); 705 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. 706 } 707 break; 708 709 case throw_array_store_exception_id: 710 { 711 __ set_info("throw_array_store_exception", dont_gc_arguments); 712 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 713 } 714 break; 715 716 case throw_class_cast_exception_id: 717 { 718 // G4: object 719 __ set_info("throw_class_cast_exception", dont_gc_arguments); 720 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 721 } 722 break; 723 724 case throw_incompatible_class_change_error_id: 725 { 726 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 727 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 728 } 729 break; 730 731 case slow_subtype_check_id: 732 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 733 // Arguments : 734 // 735 // ret : G3 736 // sub : G3, argument, destroyed 737 // super: G1, argument, not changed 738 // raddr: O7, blown by call 739 Label miss; 740 741 __ save_frame(0); // Blow no registers! 742 743 __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss); 744 745 __ mov(1, G3); 746 __ ret(); // Result in G5 is 'true' 747 __ delayed()->restore(); // free copy or add can go here 748 749 __ bind(miss); 750 __ mov(0, G3); 751 __ ret(); // Result in G5 is 'false' 752 __ delayed()->restore(); // free copy or add can go here 753 } 754 755 case monitorenter_nofpu_id: 756 case monitorenter_id: 757 { // G4: object 758 // G5: lock address 759 __ set_info("monitorenter", dont_gc_arguments); 760 761 int save_fpu_registers = (id == monitorenter_id); 762 // make a frame and preserve the caller's caller-save registers 763 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 764 765 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5); 766 767 oop_maps = new OopMapSet(); 768 oop_maps->add_gc_map(call_offset, oop_map); 769 restore_live_registers(sasm, save_fpu_registers); 770 771 __ ret(); 772 __ delayed()->restore(); 773 } 774 break; 775 776 case monitorexit_nofpu_id: 777 case monitorexit_id: 778 { // G4: lock address 779 // note: really a leaf routine but must setup last java sp 780 // => use call_RT for now (speed can be improved by 781 // doing last java sp setup manually) 782 __ set_info("monitorexit", dont_gc_arguments); 783 784 int save_fpu_registers = (id == monitorexit_id); 785 // make a frame and preserve the caller's caller-save registers 786 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 787 788 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4); 789 790 oop_maps = new OopMapSet(); 791 oop_maps->add_gc_map(call_offset, oop_map); 792 restore_live_registers(sasm, save_fpu_registers); 793 794 __ ret(); 795 __ delayed()->restore(); 796 797 } 798 break; 799 800 case access_field_patching_id: 801 { __ set_info("access_field_patching", dont_gc_arguments); 802 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 803 } 804 break; 805 806 case load_klass_patching_id: 807 { __ set_info("load_klass_patching", dont_gc_arguments); 808 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 809 } 810 break; 811 812 case jvmti_exception_throw_id: 813 { // Oexception : exception 814 __ set_info("jvmti_exception_throw", dont_gc_arguments); 815 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0); 816 } 817 break; 818 819 case dtrace_object_alloc_id: 820 { // O0: object 821 __ set_info("dtrace_object_alloc", dont_gc_arguments); 822 // we can't gc here so skip the oopmap but make sure that all 823 // the live registers get saved. 824 save_live_registers(sasm); 825 826 __ save_thread(L7_thread_cache); 827 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 828 relocInfo::runtime_call_type); 829 __ delayed()->mov(I0, O0); 830 __ restore_thread(L7_thread_cache); 831 832 restore_live_registers(sasm); 833 __ ret(); 834 __ delayed()->restore(); 835 } 836 break; 837 838 #ifndef SERIALGC 839 case g1_pre_barrier_slow_id: 840 { // G4: previous value of memory 841 BarrierSet* bs = Universe::heap()->barrier_set(); 842 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 843 __ save_frame(0); 844 __ set((int)id, O1); 845 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 846 __ should_not_reach_here(); 847 break; 848 } 849 850 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 851 852 Register pre_val = G4; 853 Register tmp = G1_scratch; 854 Register tmp2 = G3_scratch; 855 856 Label refill, restart; 857 bool with_frame = false; // I don't know if we can do with-frame. 858 int satb_q_index_byte_offset = 859 in_bytes(JavaThread::satb_mark_queue_offset() + 860 PtrQueue::byte_offset_of_index()); 861 int satb_q_buf_byte_offset = 862 in_bytes(JavaThread::satb_mark_queue_offset() + 863 PtrQueue::byte_offset_of_buf()); 864 __ bind(restart); 865 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); 866 867 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, 868 Assembler::pn, tmp, refill); 869 870 // If the branch is taken, no harm in executing this in the delay slot. 871 __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); 872 __ sub(tmp, oopSize, tmp); 873 874 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 875 // Use return-from-leaf 876 __ retl(); 877 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); 878 879 __ bind(refill); 880 __ save_frame(0); 881 882 __ mov(pre_val, L0); 883 __ mov(tmp, L1); 884 __ mov(tmp2, L2); 885 886 __ call_VM_leaf(L7_thread_cache, 887 CAST_FROM_FN_PTR(address, 888 SATBMarkQueueSet::handle_zero_index_for_thread), 889 G2_thread); 890 891 __ mov(L0, pre_val); 892 __ mov(L1, tmp); 893 __ mov(L2, tmp2); 894 895 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 896 __ delayed()->restore(); 897 } 898 break; 899 900 case g1_post_barrier_slow_id: 901 { 902 BarrierSet* bs = Universe::heap()->barrier_set(); 903 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 904 __ save_frame(0); 905 __ set((int)id, O1); 906 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); 907 __ should_not_reach_here(); 908 break; 909 } 910 911 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 912 913 Register addr = G4; 914 Register cardtable = G5; 915 Register tmp = G1_scratch; 916 Register tmp2 = G3_scratch; 917 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; 918 919 Label not_already_dirty, restart, refill; 920 921 #ifdef _LP64 922 __ srlx(addr, CardTableModRefBS::card_shift, addr); 923 #else 924 __ srl(addr, CardTableModRefBS::card_shift, addr); 925 #endif 926 927 AddressLiteral rs(byte_map_base); 928 __ set(rs, cardtable); // cardtable := <card table base> 929 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 930 931 __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, 932 tmp, not_already_dirty); 933 // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch 934 // case, harmless if not. 935 __ delayed()->add(addr, cardtable, tmp2); 936 937 // We didn't take the branch, so we're already dirty: return. 938 // Use return-from-leaf 939 __ retl(); 940 __ delayed()->nop(); 941 942 // Not dirty. 943 __ bind(not_already_dirty); 944 // First, dirty it. 945 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). 946 947 Register tmp3 = cardtable; 948 Register tmp4 = tmp; 949 950 // these registers are now dead 951 addr = cardtable = tmp = noreg; 952 953 int dirty_card_q_index_byte_offset = 954 in_bytes(JavaThread::dirty_card_queue_offset() + 955 PtrQueue::byte_offset_of_index()); 956 int dirty_card_q_buf_byte_offset = 957 in_bytes(JavaThread::dirty_card_queue_offset() + 958 PtrQueue::byte_offset_of_buf()); 959 __ bind(restart); 960 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); 961 962 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, 963 tmp3, refill); 964 // If the branch is taken, no harm in executing this in the delay slot. 965 __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); 966 __ sub(tmp3, oopSize, tmp3); 967 968 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> 969 // Use return-from-leaf 970 __ retl(); 971 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); 972 973 __ bind(refill); 974 __ save_frame(0); 975 976 __ mov(tmp2, L0); 977 __ mov(tmp3, L1); 978 __ mov(tmp4, L2); 979 980 __ call_VM_leaf(L7_thread_cache, 981 CAST_FROM_FN_PTR(address, 982 DirtyCardQueueSet::handle_zero_index_for_thread), 983 G2_thread); 984 985 __ mov(L0, tmp2); 986 __ mov(L1, tmp3); 987 __ mov(L2, tmp4); 988 989 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 990 __ delayed()->restore(); 991 } 992 break; 993 #endif // !SERIALGC 994 995 default: 996 { __ set_info("unimplemented entry", dont_gc_arguments); 997 __ save_frame(0); 998 __ set((int)id, O1); 999 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); 1000 __ should_not_reach_here(); 1001 } 1002 break; 1003 } 1004 return oop_maps; 1005 } 1006 1007 1008 void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { 1009 Label no_deopt; 1010 1011 __ verify_not_null_oop(Oexception); 1012 1013 // save the exception and issuing pc in the thread 1014 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 1015 __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 1016 1017 // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) 1018 __ mov(I7, L0); 1019 __ mov(Oissuing_pc, I7); 1020 __ sub(I7, frame::pc_return_offset, I7); 1021 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 1022 1023 // Note: if nmethod has been deoptimized then regardless of 1024 // whether it had a handler or not we will deoptimize 1025 // by entering the deopt blob with a pending exception. 1026 1027 #ifdef ASSERT 1028 Label done; 1029 __ tst(O0); 1030 __ br(Assembler::notZero, false, Assembler::pn, done); 1031 __ delayed()->nop(); 1032 __ stop("should have found address"); 1033 __ bind(done); 1034 #endif 1035 1036 // restore the registers that were saved at the beginning and jump to the exception handler. 1037 restore_live_registers(sasm); 1038 1039 __ jmp(O0, 0); 1040 __ delayed()->restore(); 1041 1042 oop_maps->add_gc_map(call_offset, oop_map); 1043 } 1044 1045 1046 #undef __ 1047 1048 #define __ masm-> 1049 1050 const char *Runtime1::pd_name_for_address(address entry) { 1051 return "<unknown function>"; 1052 }