1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2015 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_ppc.hpp" 32 #include "oops/compiledICHolder.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "register_ppc.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/signature.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "utilities/align.hpp" 40 #include "utilities/macros.hpp" 41 #include "vmreg_ppc.inline.hpp" 42 #if INCLUDE_ALL_GCS 43 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 44 #endif 45 46 // Implementation of StubAssembler 47 48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, 49 address entry_point, int number_of_arguments) { 50 set_num_rt_args(0); // Nothing on stack 51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || 52 oop_result1 != metadata_result, "registers must be different"); 53 54 // Currently no stack banging. We assume that there are enough 55 // StackShadowPages (which have been banged in generate_stack_overflow_check) 56 // for the stub frame and the runtime frames. 57 58 set_last_Java_frame(R1_SP, noreg); 59 60 // ARG1 must hold thread address. 61 mr(R3_ARG1, R16_thread); 62 63 address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0); 64 65 reset_last_Java_frame(); 66 67 // Check for pending exceptions. 68 { 69 ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread); 70 cmpdi(CCR0, R0, 0); 71 72 // This used to conditionally jump to forward_exception however it is 73 // possible if we relocate that the branch will not reach. So we must jump 74 // around so we can always reach. 75 76 Label ok; 77 beq(CCR0, ok); 78 79 // Make sure that the vm_results are cleared. 80 if (oop_result1->is_valid() || metadata_result->is_valid()) { 81 li(R0, 0); 82 if (oop_result1->is_valid()) { 83 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); 84 } 85 if (metadata_result->is_valid()) { 86 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); 87 } 88 } 89 90 if (frame_size() == no_frame_size) { 91 ShouldNotReachHere(); // We always have a frame size. 92 //pop_frame(); // pop the stub frame 93 //ld(R0, _abi(lr), R1_SP); 94 //mtlr(R0); 95 //load_const_optimized(R0, StubRoutines::forward_exception_entry()); 96 //mtctr(R0); 97 //bctr(); 98 } else if (_stub_id == Runtime1::forward_exception_id) { 99 should_not_reach_here(); 100 } else { 101 // keep stub frame for next call_RT 102 //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id)); 103 add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id))); 104 mtctr(R0); 105 bctr(); 106 } 107 108 bind(ok); 109 } 110 111 // Get oop results if there are any and reset the values in the thread. 112 if (oop_result1->is_valid()) { 113 get_vm_result(oop_result1); 114 } 115 if (metadata_result->is_valid()) { 116 get_vm_result_2(metadata_result); 117 } 118 119 return (int)(return_pc - code_section()->start()); 120 } 121 122 123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 124 mr_if_needed(R4_ARG2, arg1); 125 return call_RT(oop_result1, metadata_result, entry, 1); 126 } 127 128 129 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 130 mr_if_needed(R4_ARG2, arg1); 131 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 132 return call_RT(oop_result1, metadata_result, entry, 2); 133 } 134 135 136 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 137 mr_if_needed(R4_ARG2, arg1); 138 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 139 mr_if_needed(R6_ARG4, arg3); assert(arg3 != R4_ARG2 && arg3 != R5_ARG3, "smashed argument"); 140 return call_RT(oop_result1, metadata_result, entry, 3); 141 } 142 143 144 // Implementation of Runtime1 145 146 #define __ sasm-> 147 148 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 149 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 150 static int frame_size_in_bytes = -1; 151 152 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 153 assert(frame_size_in_bytes > frame::abi_reg_args_size, "init"); 154 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 155 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 156 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 157 158 int i; 159 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 160 Register r = as_Register(i); 161 if (FrameMap::reg_needs_save(r)) { 162 int sp_offset = cpu_reg_save_offsets[i]; 163 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 164 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 165 } 166 } 167 168 if (save_fpu_registers) { 169 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 170 FloatRegister r = as_FloatRegister(i); 171 int sp_offset = fpu_reg_save_offsets[i]; 172 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 173 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 174 } 175 } 176 177 return oop_map; 178 } 179 180 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, 181 Register ret_pc = noreg, int stack_preserve = 0) { 182 if (ret_pc == noreg) { 183 ret_pc = R0; 184 __ mflr(ret_pc); 185 } 186 __ std(ret_pc, _abi(lr), R1_SP); // C code needs pc in C1 method. 187 __ push_frame(frame_size_in_bytes + stack_preserve, R0); 188 189 // Record volatile registers as callee-save values in an OopMap so 190 // their save locations will be propagated to the caller frame's 191 // RegisterMap during StackFrameStream construction (needed for 192 // deoptimization; see compiledVFrame::create_stack_value). 193 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)). 194 195 int i; 196 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 197 Register r = as_Register(i); 198 if (FrameMap::reg_needs_save(r)) { 199 int sp_offset = cpu_reg_save_offsets[i]; 200 __ std(r, sp_offset + STACK_BIAS, R1_SP); 201 } 202 } 203 204 if (save_fpu_registers) { 205 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 206 FloatRegister r = as_FloatRegister(i); 207 int sp_offset = fpu_reg_save_offsets[i]; 208 __ stfd(r, sp_offset + STACK_BIAS, R1_SP); 209 } 210 } 211 212 return generate_oop_map(sasm, save_fpu_registers); 213 } 214 215 static void restore_live_registers(StubAssembler* sasm, Register result1, Register result2, 216 bool restore_fpu_registers = true) { 217 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 218 Register r = as_Register(i); 219 if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) { 220 int sp_offset = cpu_reg_save_offsets[i]; 221 __ ld(r, sp_offset + STACK_BIAS, R1_SP); 222 } 223 } 224 225 if (restore_fpu_registers) { 226 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 227 FloatRegister r = as_FloatRegister(i); 228 int sp_offset = fpu_reg_save_offsets[i]; 229 __ lfd(r, sp_offset + STACK_BIAS, R1_SP); 230 } 231 } 232 233 __ pop_frame(); 234 __ ld(R0, _abi(lr), R1_SP); 235 __ mtlr(R0); 236 } 237 238 239 void Runtime1::initialize_pd() { 240 int i; 241 int sp_offset = frame::abi_reg_args_size; 242 243 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 244 Register r = as_Register(i); 245 if (FrameMap::reg_needs_save(r)) { 246 cpu_reg_save_offsets[i] = sp_offset; 247 sp_offset += BytesPerWord; 248 } 249 } 250 251 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 252 fpu_reg_save_offsets[i] = sp_offset; 253 sp_offset += BytesPerWord; 254 } 255 frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes); 256 } 257 258 259 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 260 // Make a frame and preserve the caller's caller-save registers. 261 OopMap* oop_map = save_live_registers(sasm); 262 263 int call_offset; 264 if (!has_argument) { 265 call_offset = __ call_RT(noreg, noreg, target); 266 } else { 267 call_offset = __ call_RT(noreg, noreg, target, R4_ARG2); 268 } 269 OopMapSet* oop_maps = new OopMapSet(); 270 oop_maps->add_gc_map(call_offset, oop_map); 271 272 __ should_not_reach_here(); 273 return oop_maps; 274 } 275 276 static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target, 277 int stack_parms) { 278 // Make a frame and preserve the caller's caller-save registers. 279 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); 280 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 281 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 282 283 int call_offset = 0; 284 switch (stack_parms) { 285 case 3: 286 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 287 case 2: 288 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 289 case 1: 290 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 291 call_offset = __ call_RT(noreg, noreg, target); 292 break; 293 default: Unimplemented(); break; 294 } 295 OopMapSet* oop_maps = new OopMapSet(); 296 oop_maps->add_gc_map(call_offset, oop_map); 297 298 __ should_not_reach_here(); 299 return oop_maps; 300 } 301 302 303 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 304 Register arg1, Register arg2, Register arg3) { 305 // Make a frame and preserve the caller's caller-save registers. 306 OopMap* oop_map = save_live_registers(sasm); 307 308 int call_offset; 309 if (arg1 == noreg) { 310 call_offset = __ call_RT(result, noreg, target); 311 } else if (arg2 == noreg) { 312 call_offset = __ call_RT(result, noreg, target, arg1); 313 } else if (arg3 == noreg) { 314 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 315 } else { 316 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 317 } 318 OopMapSet* oop_maps = new OopMapSet(); 319 oop_maps->add_gc_map(call_offset, oop_map); 320 321 restore_live_registers(sasm, result, noreg); 322 __ blr(); 323 return oop_maps; 324 } 325 326 static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target, 327 int stack_parms, bool do_return = true) { 328 // Make a frame and preserve the caller's caller-save registers. 329 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); 330 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 331 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 332 333 int call_offset = 0; 334 switch (stack_parms) { 335 case 3: 336 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 337 case 2: 338 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 339 case 1: 340 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 341 call_offset = __ call_RT(result, noreg, target); 342 break; 343 default: Unimplemented(); break; 344 } 345 OopMapSet* oop_maps = new OopMapSet(); 346 oop_maps->add_gc_map(call_offset, oop_map); 347 348 restore_live_registers(sasm, result, noreg); 349 if (do_return) __ blr(); 350 return oop_maps; 351 } 352 353 354 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 355 // Make a frame and preserve the caller's caller-save registers. 356 OopMap* oop_map = save_live_registers(sasm); 357 358 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 359 int call_offset = __ call_RT(noreg, noreg, target); 360 OopMapSet* oop_maps = new OopMapSet(); 361 oop_maps->add_gc_map(call_offset, oop_map); 362 __ cmpdi(CCR0, R3_RET, 0); 363 364 // Re-execute the patched instruction or, if the nmethod was deoptmized, 365 // return to the deoptimization handler entry that will cause re-execution 366 // of the current bytecode. 367 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 368 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 369 370 // Return to the deoptimization handler entry for unpacking and rexecute. 371 // If we simply returned the we'd deopt as if any call we patched had just 372 // returned. 373 374 restore_live_registers(sasm, noreg, noreg); 375 // Return if patching routine returned 0. 376 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 377 378 address stub = deopt_blob->unpack_with_reexecution(); 379 //__ load_const_optimized(R0, stub); 380 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 381 __ mtctr(R0); 382 __ bctr(); 383 384 return oop_maps; 385 } 386 387 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 388 OopMapSet* oop_maps = NULL; 389 390 // For better readability. 391 const bool must_gc_arguments = true; 392 const bool dont_gc_arguments = false; 393 394 // Stub code & info for the different stubs. 395 switch (id) { 396 case forward_exception_id: 397 { 398 oop_maps = generate_handle_exception(id, sasm); 399 } 400 break; 401 402 case new_instance_id: 403 case fast_new_instance_id: 404 case fast_new_instance_init_check_id: 405 { 406 if (id == new_instance_id) { 407 __ set_info("new_instance", dont_gc_arguments); 408 } else if (id == fast_new_instance_id) { 409 __ set_info("fast new_instance", dont_gc_arguments); 410 } else { 411 assert(id == fast_new_instance_init_check_id, "bad StubID"); 412 __ set_info("fast new_instance init check", dont_gc_arguments); 413 } 414 // We don't support eden allocation. 415 // if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 416 // UseTLAB && FastTLABRefill) { 417 // if (id == fast_new_instance_init_check_id) { 418 // // make sure the klass is initialized 419 // __ lbz(R0, in_bytes(InstanceKlass::init_state_offset()), R3_ARG1); 420 // __ cmpwi(CCR0, R0, InstanceKlass::fully_initialized); 421 // __ bne(CCR0, slow_path); 422 // } 423 //#ifdef ASSERT 424 // // assert object can be fast path allocated 425 // { 426 // Label ok, not_ok; 427 // __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R3_ARG1); 428 // // make sure it's an instance (LH > 0) 429 // __ cmpwi(CCR0, R0, 0); 430 // __ ble(CCR0, not_ok); 431 // __ testbitdi(CCR0, R0, R0, Klass::_lh_instance_slow_path_bit); 432 // __ beq(CCR0, ok); 433 // 434 // __ bind(not_ok); 435 // __ stop("assert(can be fast path allocated)"); 436 // __ bind(ok); 437 // } 438 //#endif // ASSERT 439 // // We don't support eden allocation. 440 // __ bind(slow_path); 441 // } 442 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2); 443 } 444 break; 445 446 case counter_overflow_id: 447 // Bci and method are on stack. 448 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2); 449 break; 450 451 case new_type_array_id: 452 case new_object_array_id: 453 { 454 if (id == new_type_array_id) { 455 __ set_info("new_type_array", dont_gc_arguments); 456 } else { 457 __ set_info("new_object_array", dont_gc_arguments); 458 } 459 460 #ifdef ASSERT 461 // Assert object type is really an array of the proper kind. 462 { 463 int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; 464 Label ok; 465 __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2); 466 __ srawi(R0, R0, Klass::_lh_array_tag_shift); 467 __ cmpwi(CCR0, R0, tag); 468 __ beq(CCR0, ok); 469 __ stop("assert(is an array klass)"); 470 __ should_not_reach_here(); 471 __ bind(ok); 472 } 473 #endif // ASSERT 474 475 // We don't support eden allocation. 476 477 if (id == new_type_array_id) { 478 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3); 479 } else { 480 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3); 481 } 482 } 483 break; 484 485 case new_multi_array_id: 486 { 487 // R4: klass 488 // R5: rank 489 // R6: address of 1st dimension 490 __ set_info("new_multi_array", dont_gc_arguments); 491 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_multi_array), R4_ARG2, R5_ARG3, R6_ARG4); 492 } 493 break; 494 495 case register_finalizer_id: 496 { 497 __ set_info("register_finalizer", dont_gc_arguments); 498 // This code is called via rt_call. Hence, caller-save registers have been saved. 499 Register t = R11_scratch1; 500 501 // Load the klass and check the has finalizer flag. 502 __ load_klass(t, R3_ARG1); 503 __ lwz(t, in_bytes(Klass::access_flags_offset()), t); 504 __ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER)); 505 // Return if has_finalizer bit == 0 (CR0.eq). 506 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 507 508 __ mflr(R0); 509 __ std(R0, _abi(lr), R1_SP); 510 __ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs). 511 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 512 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 513 int call_offset = __ call_RT(noreg, noreg, 514 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1); 515 oop_maps = new OopMapSet(); 516 oop_maps->add_gc_map(call_offset, oop_map); 517 518 __ pop_frame(); 519 __ ld(R0, _abi(lr), R1_SP); 520 __ mtlr(R0); 521 __ blr(); 522 } 523 break; 524 525 case throw_range_check_failed_id: 526 { 527 __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. 528 __ std(R0, -8, R1_SP); // Pass index on stack. 529 oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1); 530 } 531 break; 532 533 case throw_index_exception_id: 534 { 535 __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded. 536 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 537 } 538 break; 539 540 case throw_div0_exception_id: 541 { 542 __ set_info("throw_div0_exception", dont_gc_arguments); 543 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 544 } 545 break; 546 547 case throw_null_pointer_exception_id: 548 { 549 __ set_info("throw_null_pointer_exception", dont_gc_arguments); 550 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 551 } 552 break; 553 554 case handle_exception_nofpu_id: 555 case handle_exception_id: 556 { 557 __ set_info("handle_exception", dont_gc_arguments); 558 oop_maps = generate_handle_exception(id, sasm); 559 } 560 break; 561 562 case handle_exception_from_callee_id: 563 { 564 __ set_info("handle_exception_from_callee", dont_gc_arguments); 565 oop_maps = generate_handle_exception(id, sasm); 566 } 567 break; 568 569 case unwind_exception_id: 570 { 571 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 572 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/, 573 Rexception_save = R31, Rcaller_sp = R30; 574 __ set_info("unwind_exception", dont_gc_arguments); 575 576 __ ld(Rcaller_sp, 0, R1_SP); 577 __ push_frame_reg_args(0, R0); // dummy frame for C call 578 __ mr(Rexception_save, Rexception); // save over C call 579 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 580 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, Rexception_pc); 581 __ verify_not_null_oop(Rexception_save); 582 __ mtctr(R3_RET); 583 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 584 __ mr(R1_SP, Rcaller_sp); // Pop both frames at once. 585 __ mr(Rexception, Rexception_save); // restore 586 __ mtlr(Rexception_pc); 587 __ bctr(); 588 } 589 break; 590 591 case throw_array_store_exception_id: 592 { 593 __ set_info("throw_array_store_exception", dont_gc_arguments); 594 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 595 } 596 break; 597 598 case throw_class_cast_exception_id: 599 { 600 __ set_info("throw_class_cast_exception", dont_gc_arguments); 601 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 602 } 603 break; 604 605 case throw_incompatible_class_change_error_id: 606 { 607 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 608 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 609 } 610 break; 611 612 case slow_subtype_check_id: 613 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 614 const Register sub_klass = R5, 615 super_klass = R4, 616 temp1_reg = R6, 617 temp2_reg = R0; 618 __ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful 619 __ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne 620 __ blr(); 621 } 622 break; 623 624 case monitorenter_nofpu_id: 625 case monitorenter_id: 626 { 627 __ set_info("monitorenter", dont_gc_arguments); 628 629 int save_fpu_registers = (id == monitorenter_id); 630 // Make a frame and preserve the caller's caller-save registers. 631 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 632 633 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), R4_ARG2, R5_ARG3); 634 635 oop_maps = new OopMapSet(); 636 oop_maps->add_gc_map(call_offset, oop_map); 637 638 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 639 __ blr(); 640 } 641 break; 642 643 case monitorexit_nofpu_id: 644 case monitorexit_id: 645 { 646 // note: Really a leaf routine but must setup last java sp 647 // => use call_RT for now (speed can be improved by 648 // doing last java sp setup manually). 649 __ set_info("monitorexit", dont_gc_arguments); 650 651 int save_fpu_registers = (id == monitorexit_id); 652 // Make a frame and preserve the caller's caller-save registers. 653 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 654 655 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), R4_ARG2); 656 657 oop_maps = new OopMapSet(); 658 oop_maps->add_gc_map(call_offset, oop_map); 659 660 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 661 __ blr(); 662 } 663 break; 664 665 case deoptimize_id: 666 { 667 __ set_info("deoptimize", dont_gc_arguments); 668 __ std(R0, -8, R1_SP); // Pass trap_request on stack. 669 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false); 670 671 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 672 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 673 address stub = deopt_blob->unpack_with_reexecution(); 674 //__ load_const_optimized(R0, stub); 675 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 676 __ mtctr(R0); 677 __ bctr(); 678 } 679 break; 680 681 case access_field_patching_id: 682 { 683 __ set_info("access_field_patching", dont_gc_arguments); 684 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 685 } 686 break; 687 688 case load_klass_patching_id: 689 { 690 __ set_info("load_klass_patching", dont_gc_arguments); 691 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 692 } 693 break; 694 695 case load_mirror_patching_id: 696 { 697 __ set_info("load_mirror_patching", dont_gc_arguments); 698 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 699 } 700 break; 701 702 case load_appendix_patching_id: 703 { 704 __ set_info("load_appendix_patching", dont_gc_arguments); 705 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 706 } 707 break; 708 709 case dtrace_object_alloc_id: 710 { // O0: object 711 __ unimplemented("stub dtrace_object_alloc_id"); 712 __ set_info("dtrace_object_alloc", dont_gc_arguments); 713 // // We can't gc here so skip the oopmap but make sure that all 714 // // the live registers get saved. 715 // save_live_registers(sasm); 716 // 717 // __ save_thread(L7_thread_cache); 718 // __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 719 // relocInfo::runtime_call_type); 720 // __ delayed()->mov(I0, O0); 721 // __ restore_thread(L7_thread_cache); 722 // 723 // restore_live_registers(sasm); 724 // __ ret(); 725 // __ delayed()->restore(); 726 } 727 break; 728 729 #if INCLUDE_ALL_GCS 730 case g1_pre_barrier_slow_id: 731 { 732 BarrierSet* bs = Universe::heap()->barrier_set(); 733 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 734 goto unimplemented_entry; 735 } 736 737 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 738 739 // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2. 740 const int stack_slots = 3; 741 Register pre_val = R0; // previous value of memory 742 Register tmp = R14; 743 Register tmp2 = R15; 744 745 Label refill, restart, marking_not_active; 746 int satb_q_active_byte_offset = 747 in_bytes(JavaThread::satb_mark_queue_offset() + 748 SATBMarkQueue::byte_offset_of_active()); 749 int satb_q_index_byte_offset = 750 in_bytes(JavaThread::satb_mark_queue_offset() + 751 SATBMarkQueue::byte_offset_of_index()); 752 int satb_q_buf_byte_offset = 753 in_bytes(JavaThread::satb_mark_queue_offset() + 754 SATBMarkQueue::byte_offset_of_buf()); 755 756 // Spill 757 __ std(tmp, -16, R1_SP); 758 __ std(tmp2, -24, R1_SP); 759 760 // Is marking still active? 761 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 762 __ lwz(tmp, satb_q_active_byte_offset, R16_thread); 763 } else { 764 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 765 __ lbz(tmp, satb_q_active_byte_offset, R16_thread); 766 } 767 __ cmpdi(CCR0, tmp, 0); 768 __ beq(CCR0, marking_not_active); 769 770 __ bind(restart); 771 // Load the index into the SATB buffer. SATBMarkQueue::_index is a 772 // size_t so ld_ptr is appropriate. 773 __ ld(tmp, satb_q_index_byte_offset, R16_thread); 774 775 // index == 0? 776 __ cmpdi(CCR0, tmp, 0); 777 __ beq(CCR0, refill); 778 779 __ ld(tmp2, satb_q_buf_byte_offset, R16_thread); 780 __ ld(pre_val, -8, R1_SP); // Load from stack. 781 __ addi(tmp, tmp, -oopSize); 782 783 __ std(tmp, satb_q_index_byte_offset, R16_thread); 784 __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 785 786 __ bind(marking_not_active); 787 // Restore temp registers and return-from-leaf. 788 __ ld(tmp2, -24, R1_SP); 789 __ ld(tmp, -16, R1_SP); 790 __ blr(); 791 792 __ bind(refill); 793 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord; 794 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 795 __ mflr(R0); 796 __ std(R0, _abi(lr), R1_SP); 797 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call 798 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread); 799 __ pop_frame(); 800 __ ld(R0, _abi(lr), R1_SP); 801 __ mtlr(R0); 802 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 803 __ b(restart); 804 } 805 break; 806 807 case g1_post_barrier_slow_id: 808 { 809 BarrierSet* bs = Universe::heap()->barrier_set(); 810 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 811 goto unimplemented_entry; 812 } 813 814 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 815 816 // Using stack slots: spill addr, spill tmp2 817 const int stack_slots = 2; 818 Register tmp = R0; 819 Register addr = R14; 820 Register tmp2 = R15; 821 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; 822 823 Label restart, refill, ret; 824 825 // Spill 826 __ std(addr, -8, R1_SP); 827 __ std(tmp2, -16, R1_SP); 828 829 __ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0. 830 __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp); 831 __ add(addr, tmp2, addr); 832 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] 833 834 // Return if young card. 835 __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val()); 836 __ beq(CCR0, ret); 837 838 // Return if sequential consistent value is already dirty. 839 __ membar(Assembler::StoreLoad); 840 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] 841 842 __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val()); 843 __ beq(CCR0, ret); 844 845 // Not dirty. 846 847 // First, dirty it. 848 __ li(tmp, G1SATBCardTableModRefBS::dirty_card_val()); 849 __ stb(tmp, 0, addr); 850 851 int dirty_card_q_index_byte_offset = 852 in_bytes(JavaThread::dirty_card_queue_offset() + 853 DirtyCardQueue::byte_offset_of_index()); 854 int dirty_card_q_buf_byte_offset = 855 in_bytes(JavaThread::dirty_card_queue_offset() + 856 DirtyCardQueue::byte_offset_of_buf()); 857 858 __ bind(restart); 859 860 // Get the index into the update buffer. DirtyCardQueue::_index is 861 // a size_t so ld_ptr is appropriate here. 862 __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread); 863 864 // index == 0? 865 __ cmpdi(CCR0, tmp2, 0); 866 __ beq(CCR0, refill); 867 868 __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread); 869 __ addi(tmp2, tmp2, -oopSize); 870 871 __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread); 872 __ add(tmp2, tmp, tmp2); 873 __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card> 874 875 // Restore temp registers and return-from-leaf. 876 __ bind(ret); 877 __ ld(tmp2, -16, R1_SP); 878 __ ld(addr, -8, R1_SP); 879 __ blr(); 880 881 __ bind(refill); 882 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord; 883 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 884 __ mflr(R0); 885 __ std(R0, _abi(lr), R1_SP); 886 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call 887 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread); 888 __ pop_frame(); 889 __ ld(R0, _abi(lr), R1_SP); 890 __ mtlr(R0); 891 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 892 __ b(restart); 893 } 894 break; 895 #endif // INCLUDE_ALL_GCS 896 897 case predicate_failed_trap_id: 898 { 899 __ set_info("predicate_failed_trap", dont_gc_arguments); 900 OopMap* oop_map = save_live_registers(sasm); 901 902 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 903 904 oop_maps = new OopMapSet(); 905 oop_maps->add_gc_map(call_offset, oop_map); 906 907 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 908 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 909 restore_live_registers(sasm, noreg, noreg); 910 911 address stub = deopt_blob->unpack_with_reexecution(); 912 //__ load_const_optimized(R0, stub); 913 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 914 __ mtctr(R0); 915 __ bctr(); 916 } 917 break; 918 919 default: 920 unimplemented_entry: 921 { 922 __ set_info("unimplemented entry", dont_gc_arguments); 923 __ mflr(R0); 924 __ std(R0, _abi(lr), R1_SP); 925 __ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame 926 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 927 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 928 929 __ load_const_optimized(R4_ARG2, (int)id); 930 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2); 931 932 oop_maps = new OopMapSet(); 933 oop_maps->add_gc_map(call_offset, oop_map); 934 __ should_not_reach_here(); 935 } 936 break; 937 } 938 return oop_maps; 939 } 940 941 942 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 943 __ block_comment("generate_handle_exception"); 944 945 // Save registers, if required. 946 OopMapSet* oop_maps = new OopMapSet(); 947 OopMap* oop_map = NULL; 948 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 949 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; 950 951 switch (id) { 952 case forward_exception_id: 953 // We're handling an exception in the context of a compiled frame. 954 // The registers have been saved in the standard places. Perform 955 // an exception lookup in the caller and dispatch to the handler 956 // if found. Otherwise unwind and dispatch to the callers 957 // exception handler. 958 oop_map = generate_oop_map(sasm, true); 959 // Transfer the pending exception to the exception_oop. 960 // Also load the PC which is typically at SP + frame_size_in_bytes + _abi(lr), 961 // but we support additional slots in the frame for parameter passing. 962 __ ld(Rexception_pc, 0, R1_SP); 963 __ ld(Rexception, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 964 __ li(R0, 0); 965 __ ld(Rexception_pc, _abi(lr), Rexception_pc); 966 __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 967 break; 968 case handle_exception_nofpu_id: 969 case handle_exception_id: 970 // At this point all registers MAY be live. 971 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc); 972 break; 973 case handle_exception_from_callee_id: 974 // At this point all registers except exception oop and exception pc are dead. 975 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 976 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 977 __ std(Rexception_pc, _abi(lr), R1_SP); 978 __ push_frame(frame_size_in_bytes, R0); 979 break; 980 default: ShouldNotReachHere(); 981 } 982 983 __ verify_not_null_oop(Rexception); 984 985 #ifdef ASSERT 986 // Check that fields in JavaThread for exception oop and issuing pc are 987 // empty before writing to them. 988 __ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 989 __ cmpdi(CCR0, R0, 0); 990 __ asm_assert_eq("exception oop already set", 0x963); 991 __ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 992 __ cmpdi(CCR0, R0, 0); 993 __ asm_assert_eq("exception pc already set", 0x962); 994 #endif 995 996 // Save the exception and issuing pc in the thread. 997 __ std(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 998 __ std(Rexception_pc, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 999 1000 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 1001 oop_maps->add_gc_map(call_offset, oop_map); 1002 1003 __ mtctr(R3_RET); 1004 1005 // Note: if nmethod has been deoptimized then regardless of 1006 // whether it had a handler or not we will deoptimize 1007 // by entering the deopt blob with a pending exception. 1008 1009 // Restore the registers that were saved at the beginning, remove 1010 // the frame and jump to the exception handler. 1011 switch (id) { 1012 case forward_exception_id: 1013 case handle_exception_nofpu_id: 1014 case handle_exception_id: 1015 restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id); 1016 __ bctr(); 1017 break; 1018 case handle_exception_from_callee_id: { 1019 __ pop_frame(); 1020 __ ld(Rexception_pc, _abi(lr), R1_SP); 1021 __ mtlr(Rexception_pc); 1022 __ bctr(); 1023 break; 1024 } 1025 default: ShouldNotReachHere(); 1026 } 1027 1028 return oop_maps; 1029 } 1030 1031 const char *Runtime1::pd_name_for_address(address entry) { 1032 return "<unknown function>"; 1033 } 1034 1035 #undef __