1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2015 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "nativeInst_ppc.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "register_ppc.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/signature.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/align.hpp" 43 #include "utilities/macros.hpp" 44 #include "vmreg_ppc.inline.hpp" 45 #if INCLUDE_ALL_GCS 46 #include "gc/g1/g1BarrierSet.hpp" 47 #include "gc/g1/g1CardTable.hpp" 48 #include "gc/g1/g1ThreadLocalData.hpp" 49 #endif 50 51 // Implementation of StubAssembler 52 53 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, 54 address entry_point, int number_of_arguments) { 55 set_num_rt_args(0); // Nothing on stack 56 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || 57 oop_result1 != metadata_result, "registers must be different"); 58 59 // Currently no stack banging. We assume that there are enough 60 // StackShadowPages (which have been banged in generate_stack_overflow_check) 61 // for the stub frame and the runtime frames. 62 63 set_last_Java_frame(R1_SP, noreg); 64 65 // ARG1 must hold thread address. 66 mr(R3_ARG1, R16_thread); 67 68 address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0); 69 70 reset_last_Java_frame(); 71 72 // Check for pending exceptions. 73 { 74 ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread); 75 cmpdi(CCR0, R0, 0); 76 77 // This used to conditionally jump to forward_exception however it is 78 // possible if we relocate that the branch will not reach. So we must jump 79 // around so we can always reach. 80 81 Label ok; 82 beq(CCR0, ok); 83 84 // Make sure that the vm_results are cleared. 85 if (oop_result1->is_valid() || metadata_result->is_valid()) { 86 li(R0, 0); 87 if (oop_result1->is_valid()) { 88 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); 89 } 90 if (metadata_result->is_valid()) { 91 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); 92 } 93 } 94 95 if (frame_size() == no_frame_size) { 96 ShouldNotReachHere(); // We always have a frame size. 97 //pop_frame(); // pop the stub frame 98 //ld(R0, _abi(lr), R1_SP); 99 //mtlr(R0); 100 //load_const_optimized(R0, StubRoutines::forward_exception_entry()); 101 //mtctr(R0); 102 //bctr(); 103 } else if (_stub_id == Runtime1::forward_exception_id) { 104 should_not_reach_here(); 105 } else { 106 // keep stub frame for next call_RT 107 //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id)); 108 add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id))); 109 mtctr(R0); 110 bctr(); 111 } 112 113 bind(ok); 114 } 115 116 // Get oop results if there are any and reset the values in the thread. 117 if (oop_result1->is_valid()) { 118 get_vm_result(oop_result1); 119 } 120 if (metadata_result->is_valid()) { 121 get_vm_result_2(metadata_result); 122 } 123 124 return (int)(return_pc - code_section()->start()); 125 } 126 127 128 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 129 mr_if_needed(R4_ARG2, arg1); 130 return call_RT(oop_result1, metadata_result, entry, 1); 131 } 132 133 134 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 135 mr_if_needed(R4_ARG2, arg1); 136 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 137 return call_RT(oop_result1, metadata_result, entry, 2); 138 } 139 140 141 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 142 mr_if_needed(R4_ARG2, arg1); 143 mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument"); 144 mr_if_needed(R6_ARG4, arg3); assert(arg3 != R4_ARG2 && arg3 != R5_ARG3, "smashed argument"); 145 return call_RT(oop_result1, metadata_result, entry, 3); 146 } 147 148 149 // Implementation of Runtime1 150 151 #define __ sasm-> 152 153 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 154 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 155 static int frame_size_in_bytes = -1; 156 157 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 158 assert(frame_size_in_bytes > frame::abi_reg_args_size, "init"); 159 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 160 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 161 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 162 163 int i; 164 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 165 Register r = as_Register(i); 166 if (FrameMap::reg_needs_save(r)) { 167 int sp_offset = cpu_reg_save_offsets[i]; 168 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 169 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 170 } 171 } 172 173 if (save_fpu_registers) { 174 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 175 FloatRegister r = as_FloatRegister(i); 176 int sp_offset = fpu_reg_save_offsets[i]; 177 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg()); 178 oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next()); 179 } 180 } 181 182 return oop_map; 183 } 184 185 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, 186 Register ret_pc = noreg, int stack_preserve = 0) { 187 if (ret_pc == noreg) { 188 ret_pc = R0; 189 __ mflr(ret_pc); 190 } 191 __ std(ret_pc, _abi(lr), R1_SP); // C code needs pc in C1 method. 192 __ push_frame(frame_size_in_bytes + stack_preserve, R0); 193 194 // Record volatile registers as callee-save values in an OopMap so 195 // their save locations will be propagated to the caller frame's 196 // RegisterMap during StackFrameStream construction (needed for 197 // deoptimization; see compiledVFrame::create_stack_value). 198 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)). 199 200 int i; 201 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 202 Register r = as_Register(i); 203 if (FrameMap::reg_needs_save(r)) { 204 int sp_offset = cpu_reg_save_offsets[i]; 205 __ std(r, sp_offset + STACK_BIAS, R1_SP); 206 } 207 } 208 209 if (save_fpu_registers) { 210 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 211 FloatRegister r = as_FloatRegister(i); 212 int sp_offset = fpu_reg_save_offsets[i]; 213 __ stfd(r, sp_offset + STACK_BIAS, R1_SP); 214 } 215 } 216 217 return generate_oop_map(sasm, save_fpu_registers); 218 } 219 220 static void restore_live_registers(StubAssembler* sasm, Register result1, Register result2, 221 bool restore_fpu_registers = true) { 222 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 223 Register r = as_Register(i); 224 if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) { 225 int sp_offset = cpu_reg_save_offsets[i]; 226 __ ld(r, sp_offset + STACK_BIAS, R1_SP); 227 } 228 } 229 230 if (restore_fpu_registers) { 231 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 232 FloatRegister r = as_FloatRegister(i); 233 int sp_offset = fpu_reg_save_offsets[i]; 234 __ lfd(r, sp_offset + STACK_BIAS, R1_SP); 235 } 236 } 237 238 __ pop_frame(); 239 __ ld(R0, _abi(lr), R1_SP); 240 __ mtlr(R0); 241 } 242 243 244 void Runtime1::initialize_pd() { 245 int i; 246 int sp_offset = frame::abi_reg_args_size; 247 248 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 249 Register r = as_Register(i); 250 if (FrameMap::reg_needs_save(r)) { 251 cpu_reg_save_offsets[i] = sp_offset; 252 sp_offset += BytesPerWord; 253 } 254 } 255 256 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 257 fpu_reg_save_offsets[i] = sp_offset; 258 sp_offset += BytesPerWord; 259 } 260 frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes); 261 } 262 263 264 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 265 // Make a frame and preserve the caller's caller-save registers. 266 OopMap* oop_map = save_live_registers(sasm); 267 268 int call_offset; 269 if (!has_argument) { 270 call_offset = __ call_RT(noreg, noreg, target); 271 } else { 272 call_offset = __ call_RT(noreg, noreg, target, R4_ARG2); 273 } 274 OopMapSet* oop_maps = new OopMapSet(); 275 oop_maps->add_gc_map(call_offset, oop_map); 276 277 __ should_not_reach_here(); 278 return oop_maps; 279 } 280 281 static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target, 282 int stack_parms) { 283 // Make a frame and preserve the caller's caller-save registers. 284 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); 285 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 286 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 287 288 int call_offset = 0; 289 switch (stack_parms) { 290 case 3: 291 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 292 case 2: 293 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 294 case 1: 295 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 296 case 0: 297 call_offset = __ call_RT(noreg, noreg, target); 298 break; 299 default: Unimplemented(); break; 300 } 301 OopMapSet* oop_maps = new OopMapSet(); 302 oop_maps->add_gc_map(call_offset, oop_map); 303 304 __ should_not_reach_here(); 305 return oop_maps; 306 } 307 308 309 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, 310 Register arg1, Register arg2, Register arg3) { 311 // Make a frame and preserve the caller's caller-save registers. 312 OopMap* oop_map = save_live_registers(sasm); 313 314 int call_offset; 315 if (arg1 == noreg) { 316 call_offset = __ call_RT(result, noreg, target); 317 } else if (arg2 == noreg) { 318 call_offset = __ call_RT(result, noreg, target, arg1); 319 } else if (arg3 == noreg) { 320 call_offset = __ call_RT(result, noreg, target, arg1, arg2); 321 } else { 322 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); 323 } 324 OopMapSet* oop_maps = new OopMapSet(); 325 oop_maps->add_gc_map(call_offset, oop_map); 326 327 restore_live_registers(sasm, result, noreg); 328 __ blr(); 329 return oop_maps; 330 } 331 332 static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target, 333 int stack_parms, bool do_return = true) { 334 // Make a frame and preserve the caller's caller-save registers. 335 const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); 336 const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); 337 OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); 338 339 int call_offset = 0; 340 switch (stack_parms) { 341 case 3: 342 __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP); 343 case 2: 344 __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP); 345 case 1: 346 __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP); 347 case 0: 348 call_offset = __ call_RT(result, noreg, target); 349 break; 350 default: Unimplemented(); break; 351 } 352 OopMapSet* oop_maps = new OopMapSet(); 353 oop_maps->add_gc_map(call_offset, oop_map); 354 355 restore_live_registers(sasm, result, noreg); 356 if (do_return) __ blr(); 357 return oop_maps; 358 } 359 360 361 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 362 // Make a frame and preserve the caller's caller-save registers. 363 OopMap* oop_map = save_live_registers(sasm); 364 365 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 366 int call_offset = __ call_RT(noreg, noreg, target); 367 OopMapSet* oop_maps = new OopMapSet(); 368 oop_maps->add_gc_map(call_offset, oop_map); 369 __ cmpdi(CCR0, R3_RET, 0); 370 371 // Re-execute the patched instruction or, if the nmethod was deoptmized, 372 // return to the deoptimization handler entry that will cause re-execution 373 // of the current bytecode. 374 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 375 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 376 377 // Return to the deoptimization handler entry for unpacking and rexecute. 378 // If we simply returned the we'd deopt as if any call we patched had just 379 // returned. 380 381 restore_live_registers(sasm, noreg, noreg); 382 // Return if patching routine returned 0. 383 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 384 385 address stub = deopt_blob->unpack_with_reexecution(); 386 //__ load_const_optimized(R0, stub); 387 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 388 __ mtctr(R0); 389 __ bctr(); 390 391 return oop_maps; 392 } 393 394 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 395 OopMapSet* oop_maps = NULL; 396 397 // For better readability. 398 const bool must_gc_arguments = true; 399 const bool dont_gc_arguments = false; 400 401 // Stub code & info for the different stubs. 402 switch (id) { 403 case forward_exception_id: 404 { 405 oop_maps = generate_handle_exception(id, sasm); 406 } 407 break; 408 409 case new_instance_id: 410 case fast_new_instance_id: 411 case fast_new_instance_init_check_id: 412 { 413 if (id == new_instance_id) { 414 __ set_info("new_instance", dont_gc_arguments); 415 } else if (id == fast_new_instance_id) { 416 __ set_info("fast new_instance", dont_gc_arguments); 417 } else { 418 assert(id == fast_new_instance_init_check_id, "bad StubID"); 419 __ set_info("fast new_instance init check", dont_gc_arguments); 420 } 421 422 // We don't support eden allocation. 423 424 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2); 425 } 426 break; 427 428 case counter_overflow_id: 429 // Bci and method are on stack. 430 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2); 431 break; 432 433 case new_type_array_id: 434 case new_object_array_id: 435 { 436 if (id == new_type_array_id) { 437 __ set_info("new_type_array", dont_gc_arguments); 438 } else { 439 __ set_info("new_object_array", dont_gc_arguments); 440 } 441 442 #ifdef ASSERT 443 // Assert object type is really an array of the proper kind. 444 { 445 int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; 446 Label ok; 447 __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2); 448 __ srawi(R0, R0, Klass::_lh_array_tag_shift); 449 __ cmpwi(CCR0, R0, tag); 450 __ beq(CCR0, ok); 451 __ stop("assert(is an array klass)"); 452 __ should_not_reach_here(); 453 __ bind(ok); 454 } 455 #endif // ASSERT 456 457 // We don't support eden allocation. 458 459 if (id == new_type_array_id) { 460 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3); 461 } else { 462 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3); 463 } 464 } 465 break; 466 467 case new_multi_array_id: 468 { 469 // R4: klass 470 // R5: rank 471 // R6: address of 1st dimension 472 __ set_info("new_multi_array", dont_gc_arguments); 473 oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_multi_array), R4_ARG2, R5_ARG3, R6_ARG4); 474 } 475 break; 476 477 case register_finalizer_id: 478 { 479 __ set_info("register_finalizer", dont_gc_arguments); 480 // This code is called via rt_call. Hence, caller-save registers have been saved. 481 Register t = R11_scratch1; 482 483 // Load the klass and check the has finalizer flag. 484 __ load_klass(t, R3_ARG1); 485 __ lwz(t, in_bytes(Klass::access_flags_offset()), t); 486 __ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER)); 487 // Return if has_finalizer bit == 0 (CR0.eq). 488 __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn); 489 490 __ mflr(R0); 491 __ std(R0, _abi(lr), R1_SP); 492 __ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs). 493 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 494 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 495 int call_offset = __ call_RT(noreg, noreg, 496 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1); 497 oop_maps = new OopMapSet(); 498 oop_maps->add_gc_map(call_offset, oop_map); 499 500 __ pop_frame(); 501 __ ld(R0, _abi(lr), R1_SP); 502 __ mtlr(R0); 503 __ blr(); 504 } 505 break; 506 507 case throw_range_check_failed_id: 508 { 509 __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. 510 __ std(R0, -8, R1_SP); // Pass index on stack. 511 oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1); 512 } 513 break; 514 515 case throw_index_exception_id: 516 { 517 __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded. 518 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 519 } 520 break; 521 522 case throw_div0_exception_id: 523 { 524 __ set_info("throw_div0_exception", dont_gc_arguments); 525 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 526 } 527 break; 528 529 case throw_null_pointer_exception_id: 530 { 531 __ set_info("throw_null_pointer_exception", dont_gc_arguments); 532 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 533 } 534 break; 535 536 case handle_exception_nofpu_id: 537 case handle_exception_id: 538 { 539 __ set_info("handle_exception", dont_gc_arguments); 540 oop_maps = generate_handle_exception(id, sasm); 541 } 542 break; 543 544 case handle_exception_from_callee_id: 545 { 546 __ set_info("handle_exception_from_callee", dont_gc_arguments); 547 oop_maps = generate_handle_exception(id, sasm); 548 } 549 break; 550 551 case unwind_exception_id: 552 { 553 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 554 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/, 555 Rexception_save = R31, Rcaller_sp = R30; 556 __ set_info("unwind_exception", dont_gc_arguments); 557 558 __ ld(Rcaller_sp, 0, R1_SP); 559 __ push_frame_reg_args(0, R0); // dummy frame for C call 560 __ mr(Rexception_save, Rexception); // save over C call 561 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 562 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, Rexception_pc); 563 __ verify_not_null_oop(Rexception_save); 564 __ mtctr(R3_RET); 565 __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc 566 __ mr(R1_SP, Rcaller_sp); // Pop both frames at once. 567 __ mr(Rexception, Rexception_save); // restore 568 __ mtlr(Rexception_pc); 569 __ bctr(); 570 } 571 break; 572 573 case throw_array_store_exception_id: 574 { 575 __ set_info("throw_array_store_exception", dont_gc_arguments); 576 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 577 } 578 break; 579 580 case throw_class_cast_exception_id: 581 { 582 __ set_info("throw_class_cast_exception", dont_gc_arguments); 583 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 584 } 585 break; 586 587 case throw_incompatible_class_change_error_id: 588 { 589 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 590 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 591 } 592 break; 593 594 case slow_subtype_check_id: 595 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); 596 const Register sub_klass = R5, 597 super_klass = R4, 598 temp1_reg = R6, 599 temp2_reg = R0; 600 __ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful 601 __ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne 602 __ blr(); 603 } 604 break; 605 606 case monitorenter_nofpu_id: 607 case monitorenter_id: 608 { 609 __ set_info("monitorenter", dont_gc_arguments); 610 611 int save_fpu_registers = (id == monitorenter_id); 612 // Make a frame and preserve the caller's caller-save registers. 613 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 614 615 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), R4_ARG2, R5_ARG3); 616 617 oop_maps = new OopMapSet(); 618 oop_maps->add_gc_map(call_offset, oop_map); 619 620 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 621 __ blr(); 622 } 623 break; 624 625 case monitorexit_nofpu_id: 626 case monitorexit_id: 627 { 628 // note: Really a leaf routine but must setup last java sp 629 // => use call_RT for now (speed can be improved by 630 // doing last java sp setup manually). 631 __ set_info("monitorexit", dont_gc_arguments); 632 633 int save_fpu_registers = (id == monitorexit_id); 634 // Make a frame and preserve the caller's caller-save registers. 635 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 636 637 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), R4_ARG2); 638 639 oop_maps = new OopMapSet(); 640 oop_maps->add_gc_map(call_offset, oop_map); 641 642 restore_live_registers(sasm, noreg, noreg, save_fpu_registers); 643 __ blr(); 644 } 645 break; 646 647 case deoptimize_id: 648 { 649 __ set_info("deoptimize", dont_gc_arguments); 650 __ std(R0, -8, R1_SP); // Pass trap_request on stack. 651 oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false); 652 653 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 654 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 655 address stub = deopt_blob->unpack_with_reexecution(); 656 //__ load_const_optimized(R0, stub); 657 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 658 __ mtctr(R0); 659 __ bctr(); 660 } 661 break; 662 663 case access_field_patching_id: 664 { 665 __ set_info("access_field_patching", dont_gc_arguments); 666 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 667 } 668 break; 669 670 case load_klass_patching_id: 671 { 672 __ set_info("load_klass_patching", dont_gc_arguments); 673 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 674 } 675 break; 676 677 case load_mirror_patching_id: 678 { 679 __ set_info("load_mirror_patching", dont_gc_arguments); 680 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 681 } 682 break; 683 684 case load_appendix_patching_id: 685 { 686 __ set_info("load_appendix_patching", dont_gc_arguments); 687 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 688 } 689 break; 690 691 case dtrace_object_alloc_id: 692 { // O0: object 693 __ unimplemented("stub dtrace_object_alloc_id"); 694 __ set_info("dtrace_object_alloc", dont_gc_arguments); 695 // // We can't gc here so skip the oopmap but make sure that all 696 // // the live registers get saved. 697 // save_live_registers(sasm); 698 // 699 // __ save_thread(L7_thread_cache); 700 // __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 701 // relocInfo::runtime_call_type); 702 // __ delayed()->mov(I0, O0); 703 // __ restore_thread(L7_thread_cache); 704 // 705 // restore_live_registers(sasm); 706 // __ ret(); 707 // __ delayed()->restore(); 708 } 709 break; 710 711 #if INCLUDE_ALL_GCS 712 case g1_pre_barrier_slow_id: 713 { 714 BarrierSet* bs = Universe::heap()->barrier_set(); 715 if (bs->kind() != BarrierSet::G1BarrierSet) { 716 goto unimplemented_entry; 717 } 718 719 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 720 721 // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2. 722 const int stack_slots = 3; 723 Register pre_val = R0; // previous value of memory 724 Register tmp = R14; 725 Register tmp2 = R15; 726 727 Label refill, restart, marking_not_active; 728 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 729 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()); 730 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); 731 732 // Spill 733 __ std(tmp, -16, R1_SP); 734 __ std(tmp2, -24, R1_SP); 735 736 // Is marking still active? 737 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 738 __ lwz(tmp, satb_q_active_byte_offset, R16_thread); 739 } else { 740 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 741 __ lbz(tmp, satb_q_active_byte_offset, R16_thread); 742 } 743 __ cmpdi(CCR0, tmp, 0); 744 __ beq(CCR0, marking_not_active); 745 746 __ bind(restart); 747 // Load the index into the SATB buffer. SATBMarkQueue::_index is a 748 // size_t so ld_ptr is appropriate. 749 __ ld(tmp, satb_q_index_byte_offset, R16_thread); 750 751 // index == 0? 752 __ cmpdi(CCR0, tmp, 0); 753 __ beq(CCR0, refill); 754 755 __ ld(tmp2, satb_q_buf_byte_offset, R16_thread); 756 __ ld(pre_val, -8, R1_SP); // Load from stack. 757 __ addi(tmp, tmp, -oopSize); 758 759 __ std(tmp, satb_q_index_byte_offset, R16_thread); 760 __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> 761 762 __ bind(marking_not_active); 763 // Restore temp registers and return-from-leaf. 764 __ ld(tmp2, -24, R1_SP); 765 __ ld(tmp, -16, R1_SP); 766 __ blr(); 767 768 __ bind(refill); 769 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord; 770 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 771 __ mflr(R0); 772 __ std(R0, _abi(lr), R1_SP); 773 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call 774 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread); 775 __ pop_frame(); 776 __ ld(R0, _abi(lr), R1_SP); 777 __ mtlr(R0); 778 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 779 __ b(restart); 780 } 781 break; 782 783 case g1_post_barrier_slow_id: 784 { 785 BarrierSet* bs = Universe::heap()->barrier_set(); 786 if (bs->kind() != BarrierSet::G1BarrierSet) { 787 goto unimplemented_entry; 788 } 789 790 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 791 792 // Using stack slots: spill addr, spill tmp2 793 const int stack_slots = 2; 794 Register tmp = R0; 795 Register addr = R14; 796 Register tmp2 = R15; 797 jbyte* byte_map_base = ci_card_table_address(); 798 799 Label restart, refill, ret; 800 801 // Spill 802 __ std(addr, -8, R1_SP); 803 __ std(tmp2, -16, R1_SP); 804 805 __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0. 806 __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp); 807 __ add(addr, tmp2, addr); 808 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] 809 810 // Return if young card. 811 __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val()); 812 __ beq(CCR0, ret); 813 814 // Return if sequential consistent value is already dirty. 815 __ membar(Assembler::StoreLoad); 816 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable] 817 818 __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val()); 819 __ beq(CCR0, ret); 820 821 // Not dirty. 822 823 // First, dirty it. 824 __ li(tmp, G1CardTable::dirty_card_val()); 825 __ stb(tmp, 0, addr); 826 827 int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()); 828 int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()); 829 830 __ bind(restart); 831 832 // Get the index into the update buffer. DirtyCardQueue::_index is 833 // a size_t so ld_ptr is appropriate here. 834 __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread); 835 836 // index == 0? 837 __ cmpdi(CCR0, tmp2, 0); 838 __ beq(CCR0, refill); 839 840 __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread); 841 __ addi(tmp2, tmp2, -oopSize); 842 843 __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread); 844 __ add(tmp2, tmp, tmp2); 845 __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card> 846 847 // Restore temp registers and return-from-leaf. 848 __ bind(ret); 849 __ ld(tmp2, -16, R1_SP); 850 __ ld(addr, -8, R1_SP); 851 __ blr(); 852 853 __ bind(refill); 854 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord; 855 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 856 __ mflr(R0); 857 __ std(R0, _abi(lr), R1_SP); 858 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call 859 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread); 860 __ pop_frame(); 861 __ ld(R0, _abi(lr), R1_SP); 862 __ mtlr(R0); 863 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 864 __ b(restart); 865 } 866 break; 867 #endif // INCLUDE_ALL_GCS 868 869 case predicate_failed_trap_id: 870 { 871 __ set_info("predicate_failed_trap", dont_gc_arguments); 872 OopMap* oop_map = save_live_registers(sasm); 873 874 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 875 876 oop_maps = new OopMapSet(); 877 oop_maps->add_gc_map(call_offset, oop_map); 878 879 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 880 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 881 restore_live_registers(sasm, noreg, noreg); 882 883 address stub = deopt_blob->unpack_with_reexecution(); 884 //__ load_const_optimized(R0, stub); 885 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 886 __ mtctr(R0); 887 __ bctr(); 888 } 889 break; 890 891 default: 892 unimplemented_entry: 893 { 894 __ set_info("unimplemented entry", dont_gc_arguments); 895 __ mflr(R0); 896 __ std(R0, _abi(lr), R1_SP); 897 __ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame 898 sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); 899 OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); 900 901 __ load_const_optimized(R4_ARG2, (int)id); 902 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2); 903 904 oop_maps = new OopMapSet(); 905 oop_maps->add_gc_map(call_offset, oop_map); 906 __ should_not_reach_here(); 907 } 908 break; 909 } 910 return oop_maps; 911 } 912 913 914 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 915 __ block_comment("generate_handle_exception"); 916 917 // Save registers, if required. 918 OopMapSet* oop_maps = new OopMapSet(); 919 OopMap* oop_map = NULL; 920 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, 921 Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; 922 923 switch (id) { 924 case forward_exception_id: 925 // We're handling an exception in the context of a compiled frame. 926 // The registers have been saved in the standard places. Perform 927 // an exception lookup in the caller and dispatch to the handler 928 // if found. Otherwise unwind and dispatch to the callers 929 // exception handler. 930 oop_map = generate_oop_map(sasm, true); 931 // Transfer the pending exception to the exception_oop. 932 // Also load the PC which is typically at SP + frame_size_in_bytes + _abi(lr), 933 // but we support additional slots in the frame for parameter passing. 934 __ ld(Rexception_pc, 0, R1_SP); 935 __ ld(Rexception, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 936 __ li(R0, 0); 937 __ ld(Rexception_pc, _abi(lr), Rexception_pc); 938 __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 939 break; 940 case handle_exception_nofpu_id: 941 case handle_exception_id: 942 // At this point all registers MAY be live. 943 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc); 944 break; 945 case handle_exception_from_callee_id: 946 // At this point all registers except exception oop and exception pc are dead. 947 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 948 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 949 __ std(Rexception_pc, _abi(lr), R1_SP); 950 __ push_frame(frame_size_in_bytes, R0); 951 break; 952 default: ShouldNotReachHere(); 953 } 954 955 __ verify_not_null_oop(Rexception); 956 957 #ifdef ASSERT 958 // Check that fields in JavaThread for exception oop and issuing pc are 959 // empty before writing to them. 960 __ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 961 __ cmpdi(CCR0, R0, 0); 962 __ asm_assert_eq("exception oop already set", 0x963); 963 __ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 964 __ cmpdi(CCR0, R0, 0); 965 __ asm_assert_eq("exception pc already set", 0x962); 966 #endif 967 968 // Save the exception and issuing pc in the thread. 969 __ std(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 970 __ std(Rexception_pc, in_bytes(JavaThread::exception_pc_offset() ), R16_thread); 971 972 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 973 oop_maps->add_gc_map(call_offset, oop_map); 974 975 __ mtctr(R3_RET); 976 977 // Note: if nmethod has been deoptimized then regardless of 978 // whether it had a handler or not we will deoptimize 979 // by entering the deopt blob with a pending exception. 980 981 // Restore the registers that were saved at the beginning, remove 982 // the frame and jump to the exception handler. 983 switch (id) { 984 case forward_exception_id: 985 case handle_exception_nofpu_id: 986 case handle_exception_id: 987 restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id); 988 __ bctr(); 989 break; 990 case handle_exception_from_callee_id: { 991 __ pop_frame(); 992 __ ld(Rexception_pc, _abi(lr), R1_SP); 993 __ mtlr(Rexception_pc); 994 __ bctr(); 995 break; 996 } 997 default: ShouldNotReachHere(); 998 } 999 1000 return oop_maps; 1001 } 1002 1003 const char *Runtime1::pd_name_for_address(address entry) { 1004 return "<unknown function>"; 1005 } 1006 1007 #undef __