1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2015 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_ppc.hpp" 32 #include "oops/compiledICHolder.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "register_ppc.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/signature.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "utilities/align.hpp" 40 #include "utilities/macros.hpp" 41 #include "vmreg_ppc.inline.hpp" 42 #if INCLUDE_ALL_GCS 43 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 44 #endif 45 46 // Implementation of StubAssembler 47 48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, 49 address entry_point, int number_of_arguments) { 50 set_num_rt_args(0); // Nothing on stack 51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || 52 oop_result1 != metadata_result, "registers must be different"); 53 54 // Currently no stack banging. We assume that there are enough 55 // StackShadowPages (which have been banged in generate_stack_overflow_check) 56 // for the stub frame and the runtime frames. 57 58 set_last_Java_frame(R1_SP, noreg); 59 60 // ARG1 must hold thread address. 61 mr(R3_ARG1, R16_thread); 62 63 address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0); 64 65 reset_last_Java_frame(); 66 67 // Check for pending exceptions. 68 { 69 ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread); 70 cmpdi(CCR0, R0, 0); 71 72 // This used to conditionally jump to forward_exception however it is 73 // possible if we relocate that the branch will not reach. So we must jump 74 // around so we can always reach. 75 76 Label ok; 77 beq(CCR0, ok); 78 79 // Make sure that the vm_results are cleared. 80 if (oop_result1->is_valid() || metadata_result->is_valid()) { 81 li(R0, 0); 82 if (oop_result1->is_valid()) { 83 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); 84 } 85 if (metadata_result->is_valid()) { 86 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); 87 } 88 } 89 90 if (frame_size() == no_frame_size) { 91 ShouldNotReachHere(); // We always have a frame size. 92 //pop_frame(); // pop the stub frame 93 //ld(R0, _abi(lr), R1_SP); 94 //mtlr(R0); 95 //load_const_optimized(R0, StubRoutines::forward_exception_entry()); 96 //mtctr(R0); 97 //bctr(); 98 } else if (_stub_id == Runtime1::forward_exception_id) { 99 should_not_reach_here(); 100 } else { 101 // keep stub frame for next call_RT 102 //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id)); 103 add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id))); 104 mtctr(R0); 105 bctr(); 106 } 107 108 bind(ok); 109 } 110 111 // Get oop results if there are any and reset the values in the thread. 112 if (oop_result1->is_valid()) { 113 get_vm_result(oop_result1); 114 } 115 if (metadata_result->is_valid()) { 116 get_vm_result_2(metadata_result); 117 } 118 119 return (int)(return_pc - code_section()->start()); 120 } 121 122 123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 124 mr_if_needed(R4_ARG2, arg1); 125 return call_RT(oop_result1, metadata_result, entry, 1); 126 } 127 128 129 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 130 mr_if_needed(R4_ARG2, arg1); 131 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 132 return call_RT(oop_result1, metadata_result, entry, 2); 133 } 134 135 136 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 137 mr_if_needed(R4_ARG2, arg1); 138 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 139 mr_if_needed(R6_ARG4, arg3); assert(arg3 != R4_ARG2 && arg3 != R5_ARG3, "smashed argument"); 140 return call_RT(oop_result1, metadata_result, entry, 3); 141 } 142 143 144 // Implementation of Runtime1 145 146 #define __ sasm-> 147 148 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 149 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 150 static int frame_size_in_bytes = -1; 151 152 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 153 assert(frame_size_in_bytes > frame::abi_reg_args_size, "init"); 154 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 155 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 156 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 157 158 int i; 159 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 160 Register r = as_Register(i); 161 if (FrameMap::reg_needs_save(r)) { 162 int sp_offset = cpu_reg_save_offsets[i]; 163 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 164 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 165 } 166 } 167 168 if (save_fpu_registers) { 169 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 170 FloatRegister r = as_FloatRegister(i); 171 int sp_offset = fpu_reg_save_offsets[i]; 172 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 173 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 174 } 175 } 176 177 return oop_map; 178 } 179 180 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, 181 Register ret_pc = noreg, int stack_preserve = 0) { 182 if (ret_pc == noreg) { 183 ret_pc = R0; 184 __ mflr(ret_pc); 185 } 186 __ std(ret_pc, _abi(lr), R1_SP); // C code needs pc in C1 method. 187 __ push_frame(frame_size_in_bytes + stack_preserve, R0); 188 189 // Record volatile registers as callee-save values in an OopMap so 190 // their save locations will be propagated to the caller frame's 191 // RegisterMap during StackFrameStream construction (needed for 192 // deoptimization; see compiledVFrame::create_stack_value). 193 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)). 194 195 int i; 196 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 197 Register r = as_Register(i); 198 if (FrameMap::reg_needs_save(r)) { 199 int sp_offset = cpu_reg_save_offsets[i]; 200 __ std(r, sp_offset + STACK_BIAS, R1_SP); 201 } 202 } 203 204 if (save_fpu_registers) { 205 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 206 FloatRegister r = as_FloatRegister(i); 207 int sp_offset = fpu_reg_save_offsets[i]; 208 __ stfd(r, sp_offset + STACK_BIAS, R1_SP); 209 } 210 } 211 212 return generate_oop_map(sasm, save_fpu_registers); 213 } 214 215 static void restore_live_registers(StubAssembler* sasm, Register result1, Register result2, 216 bool restore_fpu_registers = true) { 217 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 218 Register r = as_Register(i); 219 if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) { 220 int sp_offset = cpu_reg_save_offsets[i]; 221 __ ld(r, sp_offset + STACK_BIAS, R1_SP); 222 } 223 } 224 225 if (restore_fpu_registers) { 226 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 227 FloatRegister r = as_FloatRegister(i); 228 int sp_offset = fpu_reg_save_offsets[i]; 229 __ lfd(r, sp_offset + STACK_BIAS, R1_SP); 230 } 231 } 232 233 __ pop_frame(); 234 __ ld(R0, _abi(lr), R1_SP); 235 __ mtlr(R0); 236 } 237 238 239 void Runtime1::initialize_pd() { 240 int i; 241 int sp_offset = frame::abi_reg_args_size; 242 243 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 244 Register r = as_Register(i); 245 if (FrameMap::reg_needs_save(r)) { 246 cpu_reg_save_offsets[i] = sp_offset; 247 sp_offset += BytesPerWord; 248 } 249 } 250 251 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 252 fpu_reg_save_offsets[i] = sp_offset; 253 sp_offset += BytesPerWord; 254 } 255 frame_size_in_bytes = align_up(sp_offset, (int)frame::alignment_in_bytes); 256 } 257 258 259 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 260 // Make a frame and preserve the caller's caller-save registers. 261 OopMap* oop_map = save_live_registers(sasm); 262 263 int call_offset; 264 if (!has_argument) { 265 call_offset = __ call_RT(noreg, noreg, target); 266 } else { 267 call_offset = __ call_RT(noreg, noreg, target, R4_ARG2); 268 } 269 OopMapSet* oop_maps = new OopMapSet(); 270 oop_maps->add_gc_map(call_offset, oop_map); 271 272 __ should_not_reach_here(); 273 return oop_maps; 274 } 275 276 static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target, 277 int stack_parms) { 278 // Make a frame and preserve the caller's caller-save registers. 279 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, (int)frame::alignment_in_bytes); 280 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 281 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 282 283 int call_offset = 0; 284 switch (stack_parms) { 285 case 3: 286 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 287 case 2: 288 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 289 case 1: 290 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 291 case 0: 292 call_offset = __ call_RT(noreg, noreg, target); 293 break; 294 default: Unimplemented(); break; 295 } 296 OopMapSet* oop_maps = new OopMapSet(); 297 oop_maps->add_gc_map(call_offset, oop_map); 298 299 __ should_not_reach_here(); 300 return oop_maps; 301 } 302 303 304 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 305 Register arg1, Register arg2, Register arg3) { 306 // Make a frame and preserve the caller's caller-save registers. 307 OopMap* oop_map = save_live_registers(sasm); 308 309 int call_offset; 310 if (arg1 == noreg) { 311 call_offset = __ call_RT(result, noreg, target); 312 } else if (arg2 == noreg) { 313 call_offset = __ call_RT(result, noreg, target, arg1); 314 } else if (arg3 == noreg) { 315 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 316 } else { 317 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 318 } 319 OopMapSet* oop_maps = new OopMapSet(); 320 oop_maps->add_gc_map(call_offset, oop_map); 321 322 restore_live_registers(sasm, result, noreg); 323 __ blr(); 324 return oop_maps; 325 } 326 327 static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target, 328 int stack_parms, bool do_return = true) { 329 // Make a frame and preserve the caller's caller-save registers. 330 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, (int)frame::alignment_in_bytes); 331 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 332 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 333 334 int call_offset = 0; 335 switch (stack_parms) { 336 case 3: 337 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 338 case 2: 339 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 340 case 1: 341 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 342 case 0: 343 call_offset = __ call_RT(result, noreg, target); 344 break; 345 default: Unimplemented(); break; 346 } 347 OopMapSet* oop_maps = new OopMapSet(); 348 oop_maps->add_gc_map(call_offset, oop_map); 349 350 restore_live_registers(sasm, result, noreg); 351 if (do_return) __ blr(); 352 return oop_maps; 353 } 354 355 356 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 357 // Make a frame and preserve the caller's caller-save registers. 358 OopMap* oop_map = save_live_registers(sasm); 359 360 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 361 int call_offset = __ call_RT(noreg, noreg, target); 362 OopMapSet* oop_maps = new OopMapSet(); 363 oop_maps->add_gc_map(call_offset, oop_map); 364 __ cmpdi(CCR0, R3_RET, 0); 365 366 // Re-execute the patched instruction or, if the nmethod was deoptmized, 367 // return to the deoptimization handler entry that will cause re-execution 368 // of the current bytecode. 369 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 370 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 371 372 // Return to the deoptimization handler entry for unpacking and rexecute. 373 // If we simply returned the we'd deopt as if any call we patched had just 374 // returned. 375 376 restore_live_registers(sasm, noreg, noreg); 377 // Return if patching routine returned 0. 378 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 379 380 address stub = deopt_blob->unpack_with_reexecution(); 381 //__ load_const_optimized(R0, stub); 382 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 383 __ mtctr(R0); 384 __ bctr(); 385 386 return oop_maps; 387 } 388 389 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 390 OopMapSet* oop_maps = NULL; 391 392 // For better readability. 393 const bool must_gc_arguments = true; 394 const bool dont_gc_arguments = false; 395 396 // Stub code & info for the different stubs. 397 switch (id) { 398 case forward_exception_id: 399 { 400 oop_maps = generate_handle_exception(id, sasm); 401 } 402 break; 403 404 case new_instance_id: 405 case fast_new_instance_id: 406 case fast_new_instance_init_check_id: 407 { 408 if (id == new_instance_id) { 409 __ set_info("new_instance", dont_gc_arguments); 410 } else if (id == fast_new_instance_id) { 411 __ set_info("fast new_instance", dont_gc_arguments); 412 } else { 413 assert(id == fast_new_instance_init_check_id, "bad StubID"); 414 __ set_info("fast new_instance init check", dont_gc_arguments); 415 } 416 // We don't support eden allocation. 417 // if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 418 // UseTLAB && FastTLABRefill) { 419 // if (id == fast_new_instance_init_check_id) { 420 // // make sure the klass is initialized 421 // __ lbz(R0, in_bytes(InstanceKlass::init_state_offset()), R3_ARG1); 422 // __ cmpwi(CCR0, R0, InstanceKlass::fully_initialized); 423 // __ bne(CCR0, slow_path); 424 // } 425 //#ifdef ASSERT 426 // // assert object can be fast path allocated 427 // { 428 // Label ok, not_ok; 429 // __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R3_ARG1); 430 // // make sure it's an instance (LH > 0) 431 // __ cmpwi(CCR0, R0, 0); 432 // __ ble(CCR0, not_ok); 433 // __ testbitdi(CCR0, R0, R0, Klass::_lh_instance_slow_path_bit); 434 // __ beq(CCR0, ok); 435 // 436 // __ bind(not_ok); 437 // __ stop("assert(can be fast path allocated)"); 438 // __ bind(ok); 439 // } 440 //#endif // ASSERT 441 // // We don't support eden allocation. 442 // __ bind(slow_path); 443 // } 444 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2); 445 } 446 break; 447 448 case counter_overflow_id: 449 // Bci and method are on stack. 450 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2); 451 break; 452 453 case new_type_array_id: 454 case new_object_array_id: 455 { 456 if (id == new_type_array_id) { 457 __ set_info("new_type_array", dont_gc_arguments); 458 } else { 459 __ set_info("new_object_array", dont_gc_arguments); 460 } 461 462 #ifdef ASSERT 463 // Assert object type is really an array of the proper kind. 464 { 465 int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; 466 Label ok; 467 __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2); 468 __ srawi(R0, R0, Klass::_lh_array_tag_shift); 469 __ cmpwi(CCR0, R0, tag); 470 __ beq(CCR0, ok); 471 __ stop("assert(is an array klass)"); 472 __ should_not_reach_here(); 473 __ bind(ok); 474 } 475 #endif // ASSERT 476 477 // We don't support eden allocation. 478 479 if (id == new_type_array_id) { 480 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3); 481 } else { 482 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3); 483 } 484 } 485 break; 486 487 case new_multi_array_id: 488 { 489 // R4: klass 490 // R5: rank 491 // R6: address of 1st dimension 492 __ set_info("new_multi_array", dont_gc_arguments); 493 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_multi_array), R4_ARG2, R5_ARG3, R6_ARG4); 494 } 495 break; 496 497 case register_finalizer_id: 498 { 499 __ set_info("register_finalizer", dont_gc_arguments); 500 // This code is called via rt_call. Hence, caller-save registers have been saved. 501 Register t = R11_scratch1; 502 503 // Load the klass and check the has finalizer flag. 504 __ load_klass(t, R3_ARG1); 505 __ lwz(t, in_bytes(Klass::access_flags_offset()), t); 506 __ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER)); 507 // Return if has_finalizer bit == 0 (CR0.eq). 508 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 509 510 __ mflr(R0); 511 __ std(R0, _abi(lr), R1_SP); 512 __ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs). 513 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 514 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 515 int call_offset = __ call_RT(noreg, noreg, 516 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1); 517 oop_maps = new OopMapSet(); 518 oop_maps->add_gc_map(call_offset, oop_map); 519 520 __ pop_frame(); 521 __ ld(R0, _abi(lr), R1_SP); 522 __ mtlr(R0); 523 __ blr(); 524 } 525 break; 526 527 case throw_range_check_failed_id: 528 { 529 __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. 530 __ std(R0, -8, R1_SP); // Pass index on stack. 531 oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1); 532 } 533 break; 534 535 case throw_index_exception_id: 536 { 537 __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded. 538 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 539 } 540 break; 541 542 case throw_div0_exception_id: 543 { 544 __ set_info("throw_div0_exception", dont_gc_arguments); 545 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 546 } 547 break; 548 549 case throw_null_pointer_exception_id: 550 { 551 __ set_info("throw_null_pointer_exception", dont_gc_arguments); 552 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 553 } 554 break; 555 556 case handle_exception_nofpu_id: 557 case handle_exception_id: 558 { 559 __ set_info("handle_exception", dont_gc_arguments); 560 oop_maps = generate_handle_exception(id, sasm); 561 } 562 break; 563 564 case handle_exception_from_callee_id: 565 { 566 __ set_info("handle_exception_from_callee", dont_gc_arguments); 567 oop_maps = generate_handle_exception(id, sasm); 568 } 569 break; 570 571 case unwind_exception_id: 572 { 573 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 574 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/, 575 Rexception_save = R31, Rcaller_sp = R30; 576 __ set_info("unwind_exception", dont_gc_arguments); 577 578 __ ld(Rcaller_sp, 0, R1_SP); 579 __ push_frame_reg_args(0, R0); // dummy frame for C call 580 __ mr(Rexception_save, Rexception); // save over C call 581 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 582 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, Rexception_pc); 583 __ verify_not_null_oop(Rexception_save); 584 __ mtctr(R3_RET); 585 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 586 __ mr(R1_SP, Rcaller_sp); // Pop both frames at once. 587 __ mr(Rexception, Rexception_save); // restore 588 __ mtlr(Rexception_pc); 589 __ bctr(); 590 } 591 break; 592 593 case throw_array_store_exception_id: 594 { 595 __ set_info("throw_array_store_exception", dont_gc_arguments); 596 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 597 } 598 break; 599 600 case throw_class_cast_exception_id: 601 { 602 __ set_info("throw_class_cast_exception", dont_gc_arguments); 603 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 604 } 605 break; 606 607 case throw_incompatible_class_change_error_id: 608 { 609 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 610 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 611 } 612 break; 613 614 case slow_subtype_check_id: 615 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 616 const Register sub_klass = R5, 617 super_klass = R4, 618 temp1_reg = R6, 619 temp2_reg = R0; 620 __ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful 621 __ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne 622 __ blr(); 623 } 624 break; 625 626 case monitorenter_nofpu_id: 627 case monitorenter_id: 628 { 629 __ set_info("monitorenter", dont_gc_arguments); 630 631 int save_fpu_registers = (id == monitorenter_id); 632 // Make a frame and preserve the caller's caller-save registers. 633 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 634 635 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), R4_ARG2, R5_ARG3); 636 637 oop_maps = new OopMapSet(); 638 oop_maps->add_gc_map(call_offset, oop_map); 639 640 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 641 __ blr(); 642 } 643 break; 644 645 case monitorexit_nofpu_id: 646 case monitorexit_id: 647 { 648 // note: Really a leaf routine but must setup last java sp 649 // => use call_RT for now (speed can be improved by 650 // doing last java sp setup manually). 651 __ set_info("monitorexit", dont_gc_arguments); 652 653 int save_fpu_registers = (id == monitorexit_id); 654 // Make a frame and preserve the caller's caller-save registers. 655 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 656 657 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), R4_ARG2); 658 659 oop_maps = new OopMapSet(); 660 oop_maps->add_gc_map(call_offset, oop_map); 661 662 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 663 __ blr(); 664 } 665 break; 666 667 case deoptimize_id: 668 { 669 __ set_info("deoptimize", dont_gc_arguments); 670 __ std(R0, -8, R1_SP); // Pass trap_request on stack. 671 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false); 672 673 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 674 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 675 address stub = deopt_blob->unpack_with_reexecution(); 676 //__ load_const_optimized(R0, stub); 677 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 678 __ mtctr(R0); 679 __ bctr(); 680 } 681 break; 682 683 case access_field_patching_id: 684 { 685 __ set_info("access_field_patching", dont_gc_arguments); 686 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 687 } 688 break; 689 690 case load_klass_patching_id: 691 { 692 __ set_info("load_klass_patching", dont_gc_arguments); 693 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 694 } 695 break; 696 697 case load_mirror_patching_id: 698 { 699 __ set_info("load_mirror_patching", dont_gc_arguments); 700 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 701 } 702 break; 703 704 case load_appendix_patching_id: 705 { 706 __ set_info("load_appendix_patching", dont_gc_arguments); 707 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 708 } 709 break; 710 711 case dtrace_object_alloc_id: 712 { // O0: object 713 __ unimplemented("stub dtrace_object_alloc_id"); 714 __ set_info("dtrace_object_alloc", dont_gc_arguments); 715 // // We can't gc here so skip the oopmap but make sure that all 716 // // the live registers get saved. 717 // save_live_registers(sasm); 718 // 719 // __ save_thread(L7_thread_cache); 720 // __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 721 // relocInfo::runtime_call_type); 722 // __ delayed()->mov(I0, O0); 723 // __ restore_thread(L7_thread_cache); 724 // 725 // restore_live_registers(sasm); 726 // __ ret(); 727 // __ delayed()->restore(); 728 } 729 break; 730 731 #if INCLUDE_ALL_GCS 732 case g1_pre_barrier_slow_id: 733 { 734 BarrierSet* bs = Universe::heap()->barrier_set(); 735 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 736 goto unimplemented_entry; 737 } 738 739 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 740 741 // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2. 742 const int stack_slots = 3; 743 Register pre_val = R0; // previous value of memory 744 Register tmp = R14; 745 Register tmp2 = R15; 746 747 Label refill, restart, marking_not_active; 748 int satb_q_active_byte_offset = 749 in_bytes(JavaThread::satb_mark_queue_offset() + 750 SATBMarkQueue::byte_offset_of_active()); 751 int satb_q_index_byte_offset = 752 in_bytes(JavaThread::satb_mark_queue_offset() + 753 SATBMarkQueue::byte_offset_of_index()); 754 int satb_q_buf_byte_offset = 755 in_bytes(JavaThread::satb_mark_queue_offset() + 756 SATBMarkQueue::byte_offset_of_buf()); 757 758 // Spill 759 __ std(tmp, -16, R1_SP); 760 __ std(tmp2, -24, R1_SP); 761 762 // Is marking still active? 763 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 764 __ lwz(tmp, satb_q_active_byte_offset, R16_thread); 765 } else { 766 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 767 __ lbz(tmp, satb_q_active_byte_offset, R16_thread); 768 } 769 __ cmpdi(CCR0, tmp, 0); 770 __ beq(CCR0, marking_not_active); 771 772 __ bind(restart); 773 // Load the index into the SATB buffer. SATBMarkQueue::_index is a 774 // size_t so ld_ptr is appropriate. 775 __ ld(tmp, satb_q_index_byte_offset, R16_thread); 776 777 // index == 0? 778 __ cmpdi(CCR0, tmp, 0); 779 __ beq(CCR0, refill); 780 781 __ ld(tmp2, satb_q_buf_byte_offset, R16_thread); 782 __ ld(pre_val, -8, R1_SP); // Load from stack. 783 __ addi(tmp, tmp, -oopSize); 784 785 __ std(tmp, satb_q_index_byte_offset, R16_thread); 786 __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 787 788 __ bind(marking_not_active); 789 // Restore temp registers and return-from-leaf. 790 __ ld(tmp2, -24, R1_SP); 791 __ ld(tmp, -16, R1_SP); 792 __ blr(); 793 794 __ bind(refill); 795 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord; 796 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 797 __ mflr(R0); 798 __ std(R0, _abi(lr), R1_SP); 799 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call 800 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread); 801 __ pop_frame(); 802 __ ld(R0, _abi(lr), R1_SP); 803 __ mtlr(R0); 804 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 805 __ b(restart); 806 } 807 break; 808 809 case g1_post_barrier_slow_id: 810 { 811 BarrierSet* bs = Universe::heap()->barrier_set(); 812 if (bs->kind() != BarrierSet::G1SATBCTLogging) { 813 goto unimplemented_entry; 814 } 815 816 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 817 818 // Using stack slots: spill addr, spill tmp2 819 const int stack_slots = 2; 820 Register tmp = R0; 821 Register addr = R14; 822 Register tmp2 = R15; 823 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; 824 825 Label restart, refill, ret; 826 827 // Spill 828 __ std(addr, -8, R1_SP); 829 __ std(tmp2, -16, R1_SP); 830 831 __ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0. 832 __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp); 833 __ add(addr, tmp2, addr); 834 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] 835 836 // Return if young card. 837 __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val()); 838 __ beq(CCR0, ret); 839 840 // Return if sequential consistent value is already dirty. 841 __ membar(Assembler::StoreLoad); 842 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] 843 844 __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val()); 845 __ beq(CCR0, ret); 846 847 // Not dirty. 848 849 // First, dirty it. 850 __ li(tmp, G1SATBCardTableModRefBS::dirty_card_val()); 851 __ stb(tmp, 0, addr); 852 853 int dirty_card_q_index_byte_offset = 854 in_bytes(JavaThread::dirty_card_queue_offset() + 855 DirtyCardQueue::byte_offset_of_index()); 856 int dirty_card_q_buf_byte_offset = 857 in_bytes(JavaThread::dirty_card_queue_offset() + 858 DirtyCardQueue::byte_offset_of_buf()); 859 860 __ bind(restart); 861 862 // Get the index into the update buffer. DirtyCardQueue::_index is 863 // a size_t so ld_ptr is appropriate here. 864 __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread); 865 866 // index == 0? 867 __ cmpdi(CCR0, tmp2, 0); 868 __ beq(CCR0, refill); 869 870 __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread); 871 __ addi(tmp2, tmp2, -oopSize); 872 873 __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread); 874 __ add(tmp2, tmp, tmp2); 875 __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card> 876 877 // Restore temp registers and return-from-leaf. 878 __ bind(ret); 879 __ ld(tmp2, -16, R1_SP); 880 __ ld(addr, -8, R1_SP); 881 __ blr(); 882 883 __ bind(refill); 884 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord; 885 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 886 __ mflr(R0); 887 __ std(R0, _abi(lr), R1_SP); 888 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call 889 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread); 890 __ pop_frame(); 891 __ ld(R0, _abi(lr), R1_SP); 892 __ mtlr(R0); 893 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 894 __ b(restart); 895 } 896 break; 897 #endif // INCLUDE_ALL_GCS 898 899 case predicate_failed_trap_id: 900 { 901 __ set_info("predicate_failed_trap", dont_gc_arguments); 902 OopMap* oop_map = save_live_registers(sasm); 903 904 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 905 906 oop_maps = new OopMapSet(); 907 oop_maps->add_gc_map(call_offset, oop_map); 908 909 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 910 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 911 restore_live_registers(sasm, noreg, noreg); 912 913 address stub = deopt_blob->unpack_with_reexecution(); 914 //__ load_const_optimized(R0, stub); 915 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 916 __ mtctr(R0); 917 __ bctr(); 918 } 919 break; 920 921 default: 922 unimplemented_entry: 923 { 924 __ set_info("unimplemented entry", dont_gc_arguments); 925 __ mflr(R0); 926 __ std(R0, _abi(lr), R1_SP); 927 __ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame 928 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 929 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 930 931 __ load_const_optimized(R4_ARG2, (int)id); 932 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2); 933 934 oop_maps = new OopMapSet(); 935 oop_maps->add_gc_map(call_offset, oop_map); 936 __ should_not_reach_here(); 937 } 938 break; 939 } 940 return oop_maps; 941 } 942 943 944 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 945 __ block_comment("generate_handle_exception"); 946 947 // Save registers, if required. 948 OopMapSet* oop_maps = new OopMapSet(); 949 OopMap* oop_map = NULL; 950 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 951 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; 952 953 switch (id) { 954 case forward_exception_id: 955 // We're handling an exception in the context of a compiled frame. 956 // The registers have been saved in the standard places. Perform 957 // an exception lookup in the caller and dispatch to the handler 958 // if found. Otherwise unwind and dispatch to the callers 959 // exception handler. 960 oop_map = generate_oop_map(sasm, true); 961 // Transfer the pending exception to the exception_oop. 962 // Also load the PC which is typically at SP + frame_size_in_bytes + _abi(lr), 963 // but we support additional slots in the frame for parameter passing. 964 __ ld(Rexception_pc, 0, R1_SP); 965 __ ld(Rexception, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 966 __ li(R0, 0); 967 __ ld(Rexception_pc, _abi(lr), Rexception_pc); 968 __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 969 break; 970 case handle_exception_nofpu_id: 971 case handle_exception_id: 972 // At this point all registers MAY be live. 973 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc); 974 break; 975 case handle_exception_from_callee_id: 976 // At this point all registers except exception oop and exception pc are dead. 977 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 978 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 979 __ std(Rexception_pc, _abi(lr), R1_SP); 980 __ push_frame(frame_size_in_bytes, R0); 981 break; 982 default: ShouldNotReachHere(); 983 } 984 985 __ verify_not_null_oop(Rexception); 986 987 #ifdef ASSERT 988 // Check that fields in JavaThread for exception oop and issuing pc are 989 // empty before writing to them. 990 __ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 991 __ cmpdi(CCR0, R0, 0); 992 __ asm_assert_eq("exception oop already set", 0x963); 993 __ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 994 __ cmpdi(CCR0, R0, 0); 995 __ asm_assert_eq("exception pc already set", 0x962); 996 #endif 997 998 // Save the exception and issuing pc in the thread. 999 __ std(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 1000 __ std(Rexception_pc, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 1001 1002 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 1003 oop_maps->add_gc_map(call_offset, oop_map); 1004 1005 __ mtctr(R3_RET); 1006 1007 // Note: if nmethod has been deoptimized then regardless of 1008 // whether it had a handler or not we will deoptimize 1009 // by entering the deopt blob with a pending exception. 1010 1011 // Restore the registers that were saved at the beginning, remove 1012 // the frame and jump to the exception handler. 1013 switch (id) { 1014 case forward_exception_id: 1015 case handle_exception_nofpu_id: 1016 case handle_exception_id: 1017 restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id); 1018 __ bctr(); 1019 break; 1020 case handle_exception_from_callee_id: { 1021 __ pop_frame(); 1022 __ ld(Rexception_pc, _abi(lr), R1_SP); 1023 __ mtlr(Rexception_pc); 1024 __ bctr(); 1025 break; 1026 } 1027 default: ShouldNotReachHere(); 1028 } 1029 1030 return oop_maps; 1031 } 1032 1033 const char *Runtime1::pd_name_for_address(address entry) { 1034 return "<unknown function>"; 1035 } 1036 1037 #undef __