1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Defs.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "ci/ciUtilities.hpp" 32 #include "gc/shared/cardTable.hpp" 33 #include "gc/shared/cardTableBarrierSet.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "nativeInst_ppc.hpp" 36 #include "oops/compiledICHolder.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "register_ppc.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/signature.hpp" 42 #include "runtime/vframeArray.hpp" 43 #include "utilities/align.hpp" 44 #include "utilities/macros.hpp" 45 #include "utilities/powerOfTwo.hpp" 46 #include "vmreg_ppc.inline.hpp" 47 48 // Implementation of StubAssembler 49 50 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, 51 address entry_point, int number_of_arguments) { 52 set_num_rt_args(0); // Nothing on stack 53 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || 54 oop_result1 != metadata_result, "registers must be different"); 55 56 // Currently no stack banging. We assume that there are enough 57 // StackShadowPages (which have been banged in generate_stack_overflow_check) 58 // for the stub frame and the runtime frames. 59 60 set_last_Java_frame(R1_SP, noreg); 61 62 // ARG1 must hold thread address. 63 mr(R3_ARG1, R16_thread); 64 65 address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0); 66 67 reset_last_Java_frame(); 68 69 // Check for pending exceptions. 70 { 71 ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread); 72 cmpdi(CCR0, R0, 0); 73 74 // This used to conditionally jump to forward_exception however it is 75 // possible if we relocate that the branch will not reach. So we must jump 76 // around so we can always reach. 77 78 Label ok; 79 beq(CCR0, ok); 80 81 // Make sure that the vm_results are cleared. 82 if (oop_result1->is_valid() || metadata_result->is_valid()) { 83 li(R0, 0); 84 if (oop_result1->is_valid()) { 85 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); 86 } 87 if (metadata_result->is_valid()) { 88 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); 89 } 90 } 91 92 if (frame_size() == no_frame_size) { 93 ShouldNotReachHere(); // We always have a frame size. 94 //pop_frame(); // pop the stub frame 95 //ld(R0, _abi(lr), R1_SP); 96 //mtlr(R0); 97 //load_const_optimized(R0, StubRoutines::forward_exception_entry()); 98 //mtctr(R0); 99 //bctr(); 100 } else if (_stub_id == Runtime1::forward_exception_id) { 101 should_not_reach_here(); 102 } else { 103 // keep stub frame for next call_RT 104 //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id)); 105 add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id))); 106 mtctr(R0); 107 bctr(); 108 } 109 110 bind(ok); 111 } 112 113 // Get oop results if there are any and reset the values in the thread. 114 if (oop_result1->is_valid()) { 115 get_vm_result(oop_result1); 116 } 117 if (metadata_result->is_valid()) { 118 get_vm_result_2(metadata_result); 119 } 120 121 return (int)(return_pc - code_section()->start()); 122 } 123 124 125 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 126 mr_if_needed(R4_ARG2, arg1); 127 return call_RT(oop_result1, metadata_result, entry, 1); 128 } 129 130 131 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 132 mr_if_needed(R4_ARG2, arg1); 133 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 134 return call_RT(oop_result1, metadata_result, entry, 2); 135 } 136 137 138 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 139 mr_if_needed(R4_ARG2, arg1); 140 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 141 mr_if_needed(R6_ARG4, arg3); assert(arg3 != R4_ARG2 && arg3 != R5_ARG3, "smashed argument"); 142 return call_RT(oop_result1, metadata_result, entry, 3); 143 } 144 145 146 // Implementation of Runtime1 147 148 #define __ sasm-> 149 150 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 151 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 152 static int frame_size_in_bytes = -1; 153 154 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 155 assert(frame_size_in_bytes > frame::abi_reg_args_size, "init"); 156 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 157 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 158 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 159 160 int i; 161 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 162 Register r = as_Register(i); 163 if (FrameMap::reg_needs_save(r)) { 164 int sp_offset = cpu_reg_save_offsets[i]; 165 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 166 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 167 } 168 } 169 170 if (save_fpu_registers) { 171 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 172 FloatRegister r = as_FloatRegister(i); 173 int sp_offset = fpu_reg_save_offsets[i]; 174 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 175 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 176 } 177 } 178 179 return oop_map; 180 } 181 182 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, 183 Register ret_pc = noreg, int stack_preserve = 0) { 184 if (ret_pc == noreg) { 185 ret_pc = R0; 186 __ mflr(ret_pc); 187 } 188 __ std(ret_pc, _abi(lr), R1_SP); // C code needs pc in C1 method. 189 __ push_frame(frame_size_in_bytes + stack_preserve, R0); 190 191 // Record volatile registers as callee-save values in an OopMap so 192 // their save locations will be propagated to the caller frame's 193 // RegisterMap during StackFrameStream construction (needed for 194 // deoptimization; see compiledVFrame::create_stack_value). 195 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)). 196 197 int i; 198 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 199 Register r = as_Register(i); 200 if (FrameMap::reg_needs_save(r)) { 201 int sp_offset = cpu_reg_save_offsets[i]; 202 __ std(r, sp_offset + STACK_BIAS, R1_SP); 203 } 204 } 205 206 if (save_fpu_registers) { 207 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 208 FloatRegister r = as_FloatRegister(i); 209 int sp_offset = fpu_reg_save_offsets[i]; 210 __ stfd(r, sp_offset + STACK_BIAS, R1_SP); 211 } 212 } 213 214 return generate_oop_map(sasm, save_fpu_registers); 215 } 216 217 static void restore_live_registers(StubAssembler* sasm, Register result1, Register result2, 218 bool restore_fpu_registers = true) { 219 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 220 Register r = as_Register(i); 221 if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) { 222 int sp_offset = cpu_reg_save_offsets[i]; 223 __ ld(r, sp_offset + STACK_BIAS, R1_SP); 224 } 225 } 226 227 if (restore_fpu_registers) { 228 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 229 FloatRegister r = as_FloatRegister(i); 230 int sp_offset = fpu_reg_save_offsets[i]; 231 __ lfd(r, sp_offset + STACK_BIAS, R1_SP); 232 } 233 } 234 235 __ pop_frame(); 236 __ ld(R0, _abi(lr), R1_SP); 237 __ mtlr(R0); 238 } 239 240 241 void Runtime1::initialize_pd() { 242 int i; 243 int sp_offset = frame::abi_reg_args_size; 244 245 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 246 Register r = as_Register(i); 247 if (FrameMap::reg_needs_save(r)) { 248 cpu_reg_save_offsets[i] = sp_offset; 249 sp_offset += BytesPerWord; 250 } 251 } 252 253 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 254 fpu_reg_save_offsets[i] = sp_offset; 255 sp_offset += BytesPerWord; 256 } 257 frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes); 258 } 259 260 261 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 262 // Make a frame and preserve the caller's caller-save registers. 263 OopMap* oop_map = save_live_registers(sasm); 264 265 int call_offset; 266 if (!has_argument) { 267 call_offset = __ call_RT(noreg, noreg, target); 268 } else { 269 call_offset = __ call_RT(noreg, noreg, target, R4_ARG2); 270 } 271 OopMapSet* oop_maps = new OopMapSet(); 272 oop_maps->add_gc_map(call_offset, oop_map); 273 274 __ should_not_reach_here(); 275 return oop_maps; 276 } 277 278 static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target, 279 int stack_parms) { 280 // Make a frame and preserve the caller's caller-save registers. 281 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); 282 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 283 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 284 285 int call_offset = 0; 286 switch (stack_parms) { 287 case 3: 288 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 289 case 2: 290 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 291 case 1: 292 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 293 case 0: 294 call_offset = __ call_RT(noreg, noreg, target); 295 break; 296 default: Unimplemented(); break; 297 } 298 OopMapSet* oop_maps = new OopMapSet(); 299 oop_maps->add_gc_map(call_offset, oop_map); 300 301 __ should_not_reach_here(); 302 return oop_maps; 303 } 304 305 306 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 307 Register arg1, Register arg2, Register arg3) { 308 // Make a frame and preserve the caller's caller-save registers. 309 OopMap* oop_map = save_live_registers(sasm); 310 311 int call_offset; 312 if (arg1 == noreg) { 313 call_offset = __ call_RT(result, noreg, target); 314 } else if (arg2 == noreg) { 315 call_offset = __ call_RT(result, noreg, target, arg1); 316 } else if (arg3 == noreg) { 317 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 318 } else { 319 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 320 } 321 OopMapSet* oop_maps = new OopMapSet(); 322 oop_maps->add_gc_map(call_offset, oop_map); 323 324 restore_live_registers(sasm, result, noreg); 325 __ blr(); 326 return oop_maps; 327 } 328 329 static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target, 330 int stack_parms, bool do_return = true) { 331 // Make a frame and preserve the caller's caller-save registers. 332 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); 333 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 334 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 335 336 int call_offset = 0; 337 switch (stack_parms) { 338 case 3: 339 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 340 case 2: 341 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 342 case 1: 343 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 344 case 0: 345 call_offset = __ call_RT(result, noreg, target); 346 break; 347 default: Unimplemented(); break; 348 } 349 OopMapSet* oop_maps = new OopMapSet(); 350 oop_maps->add_gc_map(call_offset, oop_map); 351 352 restore_live_registers(sasm, result, noreg); 353 if (do_return) __ blr(); 354 return oop_maps; 355 } 356 357 358 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 359 // Make a frame and preserve the caller's caller-save registers. 360 OopMap* oop_map = save_live_registers(sasm); 361 362 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 363 int call_offset = __ call_RT(noreg, noreg, target); 364 OopMapSet* oop_maps = new OopMapSet(); 365 oop_maps->add_gc_map(call_offset, oop_map); 366 __ cmpdi(CCR0, R3_RET, 0); 367 368 // Re-execute the patched instruction or, if the nmethod was deoptmized, 369 // return to the deoptimization handler entry that will cause re-execution 370 // of the current bytecode. 371 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 372 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 373 374 // Return to the deoptimization handler entry for unpacking and rexecute. 375 // If we simply returned the we'd deopt as if any call we patched had just 376 // returned. 377 378 restore_live_registers(sasm, noreg, noreg); 379 // Return if patching routine returned 0. 380 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 381 382 address stub = deopt_blob->unpack_with_reexecution(); 383 //__ load_const_optimized(R0, stub); 384 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 385 __ mtctr(R0); 386 __ bctr(); 387 388 return oop_maps; 389 } 390 391 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 392 OopMapSet* oop_maps = NULL; 393 394 // For better readability. 395 const bool must_gc_arguments = true; 396 const bool dont_gc_arguments = false; 397 398 // Stub code & info for the different stubs. 399 switch (id) { 400 case forward_exception_id: 401 { 402 oop_maps = generate_handle_exception(id, sasm); 403 } 404 break; 405 406 case new_instance_id: 407 case fast_new_instance_id: 408 case fast_new_instance_init_check_id: 409 { 410 if (id == new_instance_id) { 411 __ set_info("new_instance", dont_gc_arguments); 412 } else if (id == fast_new_instance_id) { 413 __ set_info("fast new_instance", dont_gc_arguments); 414 } else { 415 assert(id == fast_new_instance_init_check_id, "bad StubID"); 416 __ set_info("fast new_instance init check", dont_gc_arguments); 417 } 418 419 // We don't support eden allocation. 420 421 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2); 422 } 423 break; 424 425 case counter_overflow_id: 426 // Bci and method are on stack. 427 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2); 428 break; 429 430 case new_type_array_id: 431 case new_object_array_id: 432 { 433 if (id == new_type_array_id) { 434 __ set_info("new_type_array", dont_gc_arguments); 435 } else { 436 __ set_info("new_object_array", dont_gc_arguments); 437 } 438 439 #ifdef ASSERT 440 // Assert object type is really an array of the proper kind. 441 { 442 int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; 443 Label ok; 444 __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2); 445 __ srawi(R0, R0, Klass::_lh_array_tag_shift); 446 __ cmpwi(CCR0, R0, tag); 447 __ beq(CCR0, ok); 448 __ stop("assert(is an array klass)"); 449 __ should_not_reach_here(); 450 __ bind(ok); 451 } 452 #endif // ASSERT 453 454 // We don't support eden allocation. 455 456 if (id == new_type_array_id) { 457 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3); 458 } else { 459 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3); 460 } 461 } 462 break; 463 464 case new_multi_array_id: 465 { 466 // R4: klass 467 // R5: rank 468 // R6: address of 1st dimension 469 __ set_info("new_multi_array", dont_gc_arguments); 470 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_multi_array), R4_ARG2, R5_ARG3, R6_ARG4); 471 } 472 break; 473 474 case register_finalizer_id: 475 { 476 __ set_info("register_finalizer", dont_gc_arguments); 477 // This code is called via rt_call. Hence, caller-save registers have been saved. 478 Register t = R11_scratch1; 479 480 // Load the klass and check the has finalizer flag. 481 __ load_klass(t, R3_ARG1); 482 __ lwz(t, in_bytes(Klass::access_flags_offset()), t); 483 __ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER)); 484 // Return if has_finalizer bit == 0 (CR0.eq). 485 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 486 487 __ mflr(R0); 488 __ std(R0, _abi(lr), R1_SP); 489 __ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs). 490 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 491 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 492 int call_offset = __ call_RT(noreg, noreg, 493 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1); 494 oop_maps = new OopMapSet(); 495 oop_maps->add_gc_map(call_offset, oop_map); 496 497 __ pop_frame(); 498 __ ld(R0, _abi(lr), R1_SP); 499 __ mtlr(R0); 500 __ blr(); 501 } 502 break; 503 504 case throw_range_check_failed_id: 505 { 506 __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. 507 oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2); 508 } 509 break; 510 511 case throw_index_exception_id: 512 { 513 __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded. 514 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 515 } 516 break; 517 518 case throw_div0_exception_id: 519 { 520 __ set_info("throw_div0_exception", dont_gc_arguments); 521 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 522 } 523 break; 524 525 case throw_null_pointer_exception_id: 526 { 527 __ set_info("throw_null_pointer_exception", dont_gc_arguments); 528 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 529 } 530 break; 531 532 case handle_exception_nofpu_id: 533 case handle_exception_id: 534 { 535 __ set_info("handle_exception", dont_gc_arguments); 536 oop_maps = generate_handle_exception(id, sasm); 537 } 538 break; 539 540 case handle_exception_from_callee_id: 541 { 542 __ set_info("handle_exception_from_callee", dont_gc_arguments); 543 oop_maps = generate_handle_exception(id, sasm); 544 } 545 break; 546 547 case unwind_exception_id: 548 { 549 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 550 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/, 551 Rexception_save = R31, Rcaller_sp = R30; 552 __ set_info("unwind_exception", dont_gc_arguments); 553 554 __ ld(Rcaller_sp, 0, R1_SP); 555 __ push_frame_reg_args(0, R0); // dummy frame for C call 556 __ mr(Rexception_save, Rexception); // save over C call 557 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 558 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, Rexception_pc); 559 __ verify_not_null_oop(Rexception_save); 560 __ mtctr(R3_RET); 561 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 562 __ mr(R1_SP, Rcaller_sp); // Pop both frames at once. 563 __ mr(Rexception, Rexception_save); // restore 564 __ mtlr(Rexception_pc); 565 __ bctr(); 566 } 567 break; 568 569 case throw_array_store_exception_id: 570 { 571 __ set_info("throw_array_store_exception", dont_gc_arguments); 572 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 573 } 574 break; 575 576 case throw_class_cast_exception_id: 577 { 578 __ set_info("throw_class_cast_exception", dont_gc_arguments); 579 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 580 } 581 break; 582 583 case throw_incompatible_class_change_error_id: 584 { 585 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 586 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 587 } 588 break; 589 590 case slow_subtype_check_id: 591 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 592 const Register sub_klass = R5, 593 super_klass = R4, 594 temp1_reg = R6, 595 temp2_reg = R0; 596 __ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful 597 __ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne 598 __ blr(); 599 } 600 break; 601 602 case monitorenter_nofpu_id: 603 case monitorenter_id: 604 { 605 __ set_info("monitorenter", dont_gc_arguments); 606 607 int save_fpu_registers = (id == monitorenter_id); 608 // Make a frame and preserve the caller's caller-save registers. 609 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 610 611 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), R4_ARG2, R5_ARG3); 612 613 oop_maps = new OopMapSet(); 614 oop_maps->add_gc_map(call_offset, oop_map); 615 616 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 617 __ blr(); 618 } 619 break; 620 621 case monitorexit_nofpu_id: 622 case monitorexit_id: 623 { 624 // note: Really a leaf routine but must setup last java sp 625 // => use call_RT for now (speed can be improved by 626 // doing last java sp setup manually). 627 __ set_info("monitorexit", dont_gc_arguments); 628 629 int save_fpu_registers = (id == monitorexit_id); 630 // Make a frame and preserve the caller's caller-save registers. 631 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 632 633 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), R4_ARG2); 634 635 oop_maps = new OopMapSet(); 636 oop_maps->add_gc_map(call_offset, oop_map); 637 638 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 639 __ blr(); 640 } 641 break; 642 643 case deoptimize_id: 644 { 645 __ set_info("deoptimize", dont_gc_arguments); 646 __ std(R0, -8, R1_SP); // Pass trap_request on stack. 647 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false); 648 649 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 650 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 651 address stub = deopt_blob->unpack_with_reexecution(); 652 //__ load_const_optimized(R0, stub); 653 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 654 __ mtctr(R0); 655 __ bctr(); 656 } 657 break; 658 659 case access_field_patching_id: 660 { 661 __ set_info("access_field_patching", dont_gc_arguments); 662 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 663 } 664 break; 665 666 case load_klass_patching_id: 667 { 668 __ set_info("load_klass_patching", dont_gc_arguments); 669 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 670 } 671 break; 672 673 case load_mirror_patching_id: 674 { 675 __ set_info("load_mirror_patching", dont_gc_arguments); 676 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 677 } 678 break; 679 680 case load_appendix_patching_id: 681 { 682 __ set_info("load_appendix_patching", dont_gc_arguments); 683 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 684 } 685 break; 686 687 case dtrace_object_alloc_id: 688 { // O0: object 689 __ unimplemented("stub dtrace_object_alloc_id"); 690 __ set_info("dtrace_object_alloc", dont_gc_arguments); 691 // // We can't gc here so skip the oopmap but make sure that all 692 // // the live registers get saved. 693 // save_live_registers(sasm); 694 // 695 // __ save_thread(L7_thread_cache); 696 // __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 697 // relocInfo::runtime_call_type); 698 // __ delayed()->mov(I0, O0); 699 // __ restore_thread(L7_thread_cache); 700 // 701 // restore_live_registers(sasm); 702 // __ ret(); 703 // __ delayed()->restore(); 704 } 705 break; 706 707 case predicate_failed_trap_id: 708 { 709 __ set_info("predicate_failed_trap", dont_gc_arguments); 710 OopMap* oop_map = save_live_registers(sasm); 711 712 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 713 714 oop_maps = new OopMapSet(); 715 oop_maps->add_gc_map(call_offset, oop_map); 716 717 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 718 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 719 restore_live_registers(sasm, noreg, noreg); 720 721 address stub = deopt_blob->unpack_with_reexecution(); 722 //__ load_const_optimized(R0, stub); 723 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 724 __ mtctr(R0); 725 __ bctr(); 726 } 727 break; 728 729 default: 730 { 731 __ set_info("unimplemented entry", dont_gc_arguments); 732 __ mflr(R0); 733 __ std(R0, _abi(lr), R1_SP); 734 __ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame 735 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 736 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 737 738 __ load_const_optimized(R4_ARG2, (int)id); 739 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2); 740 741 oop_maps = new OopMapSet(); 742 oop_maps->add_gc_map(call_offset, oop_map); 743 __ should_not_reach_here(); 744 } 745 break; 746 } 747 return oop_maps; 748 } 749 750 751 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 752 __ block_comment("generate_handle_exception"); 753 754 // Save registers, if required. 755 OopMapSet* oop_maps = new OopMapSet(); 756 OopMap* oop_map = NULL; 757 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 758 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; 759 760 switch (id) { 761 case forward_exception_id: 762 // We're handling an exception in the context of a compiled frame. 763 // The registers have been saved in the standard places. Perform 764 // an exception lookup in the caller and dispatch to the handler 765 // if found. Otherwise unwind and dispatch to the callers 766 // exception handler. 767 oop_map = generate_oop_map(sasm, true); 768 // Transfer the pending exception to the exception_oop. 769 // Also load the PC which is typically at SP + frame_size_in_bytes + _abi(lr), 770 // but we support additional slots in the frame for parameter passing. 771 __ ld(Rexception_pc, 0, R1_SP); 772 __ ld(Rexception, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 773 __ li(R0, 0); 774 __ ld(Rexception_pc, _abi(lr), Rexception_pc); 775 __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 776 break; 777 case handle_exception_nofpu_id: 778 case handle_exception_id: 779 // At this point all registers MAY be live. 780 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc); 781 break; 782 case handle_exception_from_callee_id: 783 // At this point all registers except exception oop and exception pc are dead. 784 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 785 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 786 __ std(Rexception_pc, _abi(lr), R1_SP); 787 __ push_frame(frame_size_in_bytes, R0); 788 break; 789 default: ShouldNotReachHere(); 790 } 791 792 __ verify_not_null_oop(Rexception); 793 794 #ifdef ASSERT 795 // Check that fields in JavaThread for exception oop and issuing pc are 796 // empty before writing to them. 797 __ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 798 __ cmpdi(CCR0, R0, 0); 799 __ asm_assert_eq("exception oop already set"); 800 __ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 801 __ cmpdi(CCR0, R0, 0); 802 __ asm_assert_eq("exception pc already set"); 803 #endif 804 805 // Save the exception and issuing pc in the thread. 806 __ std(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 807 __ std(Rexception_pc, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 808 809 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 810 oop_maps->add_gc_map(call_offset, oop_map); 811 812 __ mtctr(R3_RET); 813 814 // Note: if nmethod has been deoptimized then regardless of 815 // whether it had a handler or not we will deoptimize 816 // by entering the deopt blob with a pending exception. 817 818 // Restore the registers that were saved at the beginning, remove 819 // the frame and jump to the exception handler. 820 switch (id) { 821 case forward_exception_id: 822 case handle_exception_nofpu_id: 823 case handle_exception_id: 824 restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id); 825 __ bctr(); 826 break; 827 case handle_exception_from_callee_id: { 828 __ pop_frame(); 829 __ ld(Rexception_pc, _abi(lr), R1_SP); 830 __ mtlr(Rexception_pc); 831 __ bctr(); 832 break; 833 } 834 default: ShouldNotReachHere(); 835 } 836 837 return oop_maps; 838 } 839 840 const char *Runtime1::pd_name_for_address(address entry) { 841 return "<unknown function>"; 842 } 843 844 #undef __