1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/templateInterpreterGenerator.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/methodData.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include "utilities/macros.hpp" 50 51 #undef __ 52 #define __ _masm-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") 61 62 //----------------------------------------------------------------------------- 63 64 // Actually we should never reach here since we do stack overflow checks before pushing any frame. 65 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 66 address entry = __ pc(); 67 __ unimplemented("generate_StackOverflowError_handler"); 68 return entry; 69 } 70 71 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 72 address entry = __ pc(); 73 __ empty_expression_stack(); 74 __ load_const_optimized(R4_ARG2, (address) name); 75 // Index is in R17_tos. 76 __ mr(R5_ARG3, R17_tos); 77 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException)); 78 return entry; 79 } 80 81 #if 0 82 // Call special ClassCastException constructor taking object to cast 83 // and target class as arguments. 84 address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() { 85 address entry = __ pc(); 86 87 // Expression stack must be empty before entering the VM if an 88 // exception happened. 89 __ empty_expression_stack(); 90 91 // Thread will be loaded to R3_ARG1. 92 // Target class oop is in register R5_ARG3 by convention! 93 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3); 94 // Above call must not return here since exception pending. 95 DEBUG_ONLY(__ should_not_reach_here();) 96 return entry; 97 } 98 #endif 99 100 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 101 address entry = __ pc(); 102 // Expression stack must be empty before entering the VM if an 103 // exception happened. 104 __ empty_expression_stack(); 105 106 // Load exception object. 107 // Thread will be loaded to R3_ARG1. 108 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); 109 #ifdef ASSERT 110 // Above call must not return here since exception pending. 111 __ should_not_reach_here(); 112 #endif 113 return entry; 114 } 115 116 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 117 address entry = __ pc(); 118 //__ untested("generate_exception_handler_common"); 119 Register Rexception = R17_tos; 120 121 // Expression stack must be empty before entering the VM if an exception happened. 122 __ empty_expression_stack(); 123 124 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); 125 if (pass_oop) { 126 __ mr(R5_ARG3, Rexception); 127 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false); 128 } else { 129 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); 130 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false); 131 } 132 133 // Throw exception. 134 __ mr(R3_ARG1, Rexception); 135 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); 136 __ mtctr(R11_scratch1); 137 __ bctr(); 138 139 return entry; 140 } 141 142 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 143 address entry = __ pc(); 144 __ unimplemented("generate_continuation_for"); 145 return entry; 146 } 147 148 // This entry is returned to when a call returns to the interpreter. 149 // When we arrive here, we expect that the callee stack frame is already popped. 150 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 151 address entry = __ pc(); 152 153 // Move the value out of the return register back to the TOS cache of current frame. 154 switch (state) { 155 case ltos: 156 case btos: 157 case ctos: 158 case stos: 159 case atos: 160 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache 161 case ftos: 162 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 163 case vtos: break; // Nothing to do, this was a void return. 164 default : ShouldNotReachHere(); 165 } 166 167 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 168 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 169 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 170 171 // Compiled code destroys templateTableBase, reload. 172 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 173 174 if (state == atos) { 175 __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); 176 } 177 178 const Register cache = R11_scratch1; 179 const Register size = R12_scratch2; 180 __ get_cache_and_index_at_bcp(cache, 1, index_size); 181 182 // Get least significant byte of 64 bit value: 183 #if defined(VM_LITTLE_ENDIAN) 184 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache); 185 #else 186 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache); 187 #endif 188 __ sldi(size, size, Interpreter::logStackElementSize); 189 __ add(R15_esp, R15_esp, size); 190 __ dispatch_next(state, step); 191 return entry; 192 } 193 194 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 195 address entry = __ pc(); 196 // If state != vtos, we're returning from a native method, which put it's result 197 // into the result register. So move the value out of the return register back 198 // to the TOS cache of current frame. 199 200 switch (state) { 201 case ltos: 202 case btos: 203 case ctos: 204 case stos: 205 case atos: 206 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache 207 case ftos: 208 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 209 case vtos: break; // Nothing to do, this was a void return. 210 default : ShouldNotReachHere(); 211 } 212 213 // Load LcpoolCache @@@ should be already set! 214 __ get_constant_pool_cache(R27_constPoolCache); 215 216 // Handle a pending exception, fall through if none. 217 __ check_and_forward_exception(R11_scratch1, R12_scratch2); 218 219 // Start executing bytecodes. 220 __ dispatch_next(state, step); 221 222 return entry; 223 } 224 225 // A result handler converts the native result into java format. 226 // Use the shared code between c++ and template interpreter. 227 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 228 return AbstractInterpreterGenerator::generate_result_handler_for(type); 229 } 230 231 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 232 address entry = __ pc(); 233 234 __ push(state); 235 __ call_VM(noreg, runtime_entry); 236 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 237 238 return entry; 239 } 240 241 // Helpers for commoning out cases in the various type of method entries. 242 243 // Increment invocation count & check for overflow. 244 // 245 // Note: checking for negative value instead of overflow 246 // so we have a 'sticky' overflow test. 247 // 248 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 249 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. 250 Register Rscratch1 = R11_scratch1; 251 Register Rscratch2 = R12_scratch2; 252 Register R3_counters = R3_ARG1; 253 Label done; 254 255 if (TieredCompilation) { 256 const int increment = InvocationCounter::count_increment; 257 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 258 Label no_mdo; 259 if (ProfileInterpreter) { 260 const Register Rmdo = Rscratch1; 261 // If no method data exists, go to profile_continue. 262 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 263 __ cmpdi(CCR0, Rmdo, 0); 264 __ beq(CCR0, no_mdo); 265 266 // Increment backedge counter in the MDO. 267 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 268 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 269 __ addi(Rscratch2, Rscratch2, increment); 270 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 271 __ load_const_optimized(Rscratch1, mask, R0); 272 __ and_(Rscratch1, Rscratch2, Rscratch1); 273 __ bne(CCR0, done); 274 __ b(*overflow); 275 } 276 277 // Increment counter in MethodCounters*. 278 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 279 __ bind(no_mdo); 280 __ get_method_counters(R19_method, R3_counters, done); 281 __ lwz(Rscratch2, mo_bc_offs, R3_counters); 282 __ addi(Rscratch2, Rscratch2, increment); 283 __ stw(Rscratch2, mo_bc_offs, R3_counters); 284 __ load_const_optimized(Rscratch1, mask, R0); 285 __ and_(Rscratch1, Rscratch2, Rscratch1); 286 __ beq(CCR0, *overflow); 287 288 __ bind(done); 289 290 } else { 291 292 // Update standard invocation counters. 293 Register Rsum_ivc_bec = R4_ARG2; 294 __ get_method_counters(R19_method, R3_counters, done); 295 __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2); 296 // Increment interpreter invocation counter. 297 if (ProfileInterpreter) { // %%% Merge this into methodDataOop. 298 __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 299 __ addi(R12_scratch2, R12_scratch2, 1); 300 __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 301 } 302 // Check if we must create a method data obj. 303 if (ProfileInterpreter && profile_method != NULL) { 304 const Register profile_limit = Rscratch1; 305 int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true); 306 __ lwz(profile_limit, pl_offs, profile_limit); 307 // Test to see if we should create a method data oop. 308 __ cmpw(CCR0, Rsum_ivc_bec, profile_limit); 309 __ blt(CCR0, *profile_method_continue); 310 // If no method data exists, go to profile_method. 311 __ test_method_data_pointer(*profile_method); 312 } 313 // Finally check for counter overflow. 314 if (overflow) { 315 const Register invocation_limit = Rscratch1; 316 int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true); 317 __ lwz(invocation_limit, il_offs, invocation_limit); 318 assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size"); 319 __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit); 320 __ bge(CCR0, *overflow); 321 } 322 323 __ bind(done); 324 } 325 } 326 327 // Generate code to initiate compilation on invocation counter overflow. 328 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { 329 // Generate code to initiate compilation on the counter overflow. 330 331 // InterpreterRuntime::frequency_counter_overflow takes one arguments, 332 // which indicates if the counter overflow occurs at a backwards branch (NULL bcp) 333 // We pass zero in. 334 // The call returns the address of the verified entry point for the method or NULL 335 // if the compilation did not complete (either went background or bailed out). 336 // 337 // Unlike the C++ interpreter above: Check exceptions! 338 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed 339 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. 340 341 __ li(R4_ARG2, 0); 342 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 343 344 // Returns verified_entry_point or NULL. 345 // We ignore it in any case. 346 __ b(continue_entry); 347 } 348 349 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { 350 assert_different_registers(Rmem_frame_size, Rscratch1); 351 __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1); 352 } 353 354 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) { 355 __ unlock_object(R26_monitor, check_exceptions); 356 } 357 358 // Lock the current method, interpreter register window must be set up! 359 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { 360 const Register Robj_to_lock = Rscratch2; 361 362 { 363 if (!flags_preloaded) { 364 __ lwz(Rflags, method_(access_flags)); 365 } 366 367 #ifdef ASSERT 368 // Check if methods needs synchronization. 369 { 370 Label Lok; 371 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); 372 __ btrue(CCR0,Lok); 373 __ stop("method doesn't need synchronization"); 374 __ bind(Lok); 375 } 376 #endif // ASSERT 377 } 378 379 // Get synchronization object to Rscratch2. 380 { 381 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 382 Label Lstatic; 383 Label Ldone; 384 385 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT); 386 __ btrue(CCR0, Lstatic); 387 388 // Non-static case: load receiver obj from stack and we're done. 389 __ ld(Robj_to_lock, R18_locals); 390 __ b(Ldone); 391 392 __ bind(Lstatic); // Static case: Lock the java mirror 393 __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method); 394 __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock); 395 __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock); 396 __ ld(Robj_to_lock, mirror_offset, Robj_to_lock); 397 398 __ bind(Ldone); 399 __ verify_oop(Robj_to_lock); 400 } 401 402 // Got the oop to lock => execute! 403 __ add_monitor_to_stack(true, Rscratch1, R0); 404 405 __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 406 __ lock_object(R26_monitor, Robj_to_lock); 407 } 408 409 // Generate a fixed interpreter frame for pure interpreter 410 // and I2N native transition frames. 411 // 412 // Before (stack grows downwards): 413 // 414 // | ... | 415 // |------------- | 416 // | java arg0 | 417 // | ... | 418 // | java argn | 419 // | | <- R15_esp 420 // | | 421 // |--------------| 422 // | abi_112 | 423 // | | <- R1_SP 424 // |==============| 425 // 426 // 427 // After: 428 // 429 // | ... | 430 // | java arg0 |<- R18_locals 431 // | ... | 432 // | java argn | 433 // |--------------| 434 // | | 435 // | java locals | 436 // | | 437 // |--------------| 438 // | abi_48 | 439 // |==============| 440 // | | 441 // | istate | 442 // | | 443 // |--------------| 444 // | monitor |<- R26_monitor 445 // |--------------| 446 // | |<- R15_esp 447 // | expression | 448 // | stack | 449 // | | 450 // |--------------| 451 // | | 452 // | abi_112 |<- R1_SP 453 // |==============| 454 // 455 // The top most frame needs an abi space of 112 bytes. This space is needed, 456 // since we call to c. The c function may spill their arguments to the caller 457 // frame. When we call to java, we don't need these spill slots. In order to save 458 // space on the stack, we resize the caller. However, java local reside in 459 // the caller frame and the frame has to be increased. The frame_size for the 460 // current frame was calculated based on max_stack as size for the expression 461 // stack. At the call, just a part of the expression stack might be used. 462 // We don't want to waste this space and cut the frame back accordingly. 463 // The resulting amount for resizing is calculated as follows: 464 // resize = (number_of_locals - number_of_arguments) * slot_size 465 // + (R1_SP - R15_esp) + 48 466 // 467 // The size for the callee frame is calculated: 468 // framesize = 112 + max_stack + monitor + state_size 469 // 470 // maxstack: Max number of slots on the expression stack, loaded from the method. 471 // monitor: We statically reserve room for one monitor object. 472 // state_size: We save the current state of the interpreter to this area. 473 // 474 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { 475 Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. 476 top_frame_size = R7_ARG5, 477 Rconst_method = R8_ARG6; 478 479 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size); 480 481 __ ld(Rconst_method, method_(const)); 482 __ lhz(Rsize_of_parameters /* number of params */, 483 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); 484 if (native_call) { 485 // If we're calling a native method, we reserve space for the worst-case signature 486 // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2). 487 // We add two slots to the parameter_count, one for the jni 488 // environment and one for a possible native mirror. 489 Label skip_native_calculate_max_stack; 490 __ addi(top_frame_size, Rsize_of_parameters, 2); 491 __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters); 492 __ bge(CCR0, skip_native_calculate_max_stack); 493 __ li(top_frame_size, Argument::n_register_parameters); 494 __ bind(skip_native_calculate_max_stack); 495 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 496 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 497 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 498 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. 499 } else { 500 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); 501 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 502 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); 503 __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); 504 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 505 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 506 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 507 __ add(parent_frame_resize, parent_frame_resize, R11_scratch1); 508 } 509 510 // Compute top frame size. 511 __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size); 512 513 // Cut back area between esp and max_stack. 514 __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize); 515 516 __ round_to(top_frame_size, frame::alignment_in_bytes); 517 __ round_to(parent_frame_resize, frame::alignment_in_bytes); 518 // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. 519 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. 520 521 { 522 // -------------------------------------------------------------------------- 523 // Stack overflow check 524 525 Label cont; 526 __ add(R11_scratch1, parent_frame_resize, top_frame_size); 527 generate_stack_overflow_check(R11_scratch1, R12_scratch2); 528 } 529 530 // Set up interpreter state registers. 531 532 __ add(R18_locals, R15_esp, Rsize_of_parameters); 533 __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method); 534 __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache); 535 536 // Set method data pointer. 537 if (ProfileInterpreter) { 538 Label zero_continue; 539 __ ld(R28_mdx, method_(method_data)); 540 __ cmpdi(CCR0, R28_mdx, 0); 541 __ beq(CCR0, zero_continue); 542 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 543 __ bind(zero_continue); 544 } 545 546 if (native_call) { 547 __ li(R14_bcp, 0); // Must initialize. 548 } else { 549 __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method); 550 } 551 552 // Resize parent frame. 553 __ mflr(R12_scratch2); 554 __ neg(parent_frame_resize, parent_frame_resize); 555 __ resize_frame(parent_frame_resize, R11_scratch1); 556 __ std(R12_scratch2, _abi(lr), R1_SP); 557 558 __ addi(R26_monitor, R1_SP, - frame::ijava_state_size); 559 __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize); 560 561 // Store values. 562 // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls 563 // in InterpreterMacroAssembler::call_from_interpreter. 564 __ std(R19_method, _ijava_state_neg(method), R1_SP); 565 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); 566 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); 567 __ std(R18_locals, _ijava_state_neg(locals), R1_SP); 568 569 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only 570 // be found in the frame after save_interpreter_state is done. This is always true 571 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, 572 // because e.g. frame::interpreter_frame_bcp() will not access the correct value 573 // (Enhanced Stack Trace). 574 // The signal handler does not save the interpreter state into the frame. 575 __ li(R0, 0); 576 #ifdef ASSERT 577 // Fill remaining slots with constants. 578 __ load_const_optimized(R11_scratch1, 0x5afe); 579 __ load_const_optimized(R12_scratch2, 0xdead); 580 #endif 581 // We have to initialize some frame slots for native calls (accessed by GC). 582 if (native_call) { 583 __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP); 584 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); 585 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } 586 } 587 #ifdef ASSERT 588 else { 589 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); 590 __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP); 591 __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP); 592 } 593 __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP); 594 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); 595 __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP); 596 __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP); 597 #endif 598 __ subf(R12_scratch2, top_frame_size, R1_SP); 599 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); 600 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); 601 602 // Push top frame. 603 __ push_frame(top_frame_size, R11_scratch1); 604 } 605 606 // End of helpers 607 608 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 609 if (!TemplateInterpreter::math_entry_available(kind)) { 610 NOT_PRODUCT(__ should_not_reach_here();) 611 return NULL; 612 } 613 614 address entry = __ pc(); 615 616 __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp); 617 618 // Pop c2i arguments (if any) off when we return. 619 #ifdef ASSERT 620 __ ld(R9_ARG7, 0, R1_SP); 621 __ ld(R10_ARG8, 0, R21_sender_SP); 622 __ cmpd(CCR0, R9_ARG7, R10_ARG8); 623 __ asm_assert_eq("backlink", 0x545); 624 #endif // ASSERT 625 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. 626 627 if (kind == Interpreter::java_lang_math_sqrt) { 628 __ fsqrt(F1_RET, F1_RET); 629 } else if (kind == Interpreter::java_lang_math_abs) { 630 __ fabs(F1_RET, F1_RET); 631 } else { 632 ShouldNotReachHere(); 633 } 634 635 // And we're done. 636 __ blr(); 637 638 __ flush(); 639 640 return entry; 641 } 642 643 // Interpreter stub for calling a native method. (asm interpreter) 644 // This sets up a somewhat different looking stack for calling the 645 // native method than the typical interpreter frame setup. 646 // 647 // On entry: 648 // R19_method - method 649 // R16_thread - JavaThread* 650 // R15_esp - intptr_t* sender tos 651 // 652 // abstract stack (grows up) 653 // [ IJava (caller of JNI callee) ] <-- ASP 654 // ... 655 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 656 657 address entry = __ pc(); 658 659 const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 660 661 // ----------------------------------------------------------------------------- 662 // Allocate a new frame that represents the native callee (i2n frame). 663 // This is not a full-blown interpreter frame, but in particular, the 664 // following registers are valid after this: 665 // - R19_method 666 // - R18_local (points to start of argumuments to native function) 667 // 668 // abstract stack (grows up) 669 // [ IJava (caller of JNI callee) ] <-- ASP 670 // ... 671 672 const Register signature_handler_fd = R11_scratch1; 673 const Register pending_exception = R0; 674 const Register result_handler_addr = R31; 675 const Register native_method_fd = R11_scratch1; 676 const Register access_flags = R22_tmp2; 677 const Register active_handles = R11_scratch1; // R26_monitor saved to state. 678 const Register sync_state = R12_scratch2; 679 const Register sync_state_addr = sync_state; // Address is dead after use. 680 const Register suspend_flags = R11_scratch1; 681 682 //============================================================================= 683 // Allocate new frame and initialize interpreter state. 684 685 Label exception_return; 686 Label exception_return_sync_check; 687 Label stack_overflow_return; 688 689 // Generate new interpreter state and jump to stack_overflow_return in case of 690 // a stack overflow. 691 //generate_compute_interpreter_state(stack_overflow_return); 692 693 Register size_of_parameters = R22_tmp2; 694 695 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); 696 697 //============================================================================= 698 // Increment invocation counter. On overflow, entry to JNI method 699 // will be compiled. 700 Label invocation_counter_overflow, continue_after_compile; 701 if (inc_counter) { 702 if (synchronized) { 703 // Since at this point in the method invocation the exception handler 704 // would try to exit the monitor of synchronized methods which hasn't 705 // been entered yet, we set the thread local variable 706 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 707 // runtime, exception handling i.e. unlock_if_synchronized_method will 708 // check this thread local flag. 709 // This flag has two effects, one is to force an unwind in the topmost 710 // interpreter frame and not perform an unlock while doing so. 711 __ li(R0, 1); 712 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 713 } 714 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 715 716 BIND(continue_after_compile); 717 // Reset the _do_not_unlock_if_synchronized flag. 718 if (synchronized) { 719 __ li(R0, 0); 720 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 721 } 722 } 723 724 // access_flags = method->access_flags(); 725 // Load access flags. 726 assert(access_flags->is_nonvolatile(), 727 "access_flags must be in a non-volatile register"); 728 // Type check. 729 assert(4 == sizeof(AccessFlags), "unexpected field size"); 730 __ lwz(access_flags, method_(access_flags)); 731 732 // We don't want to reload R19_method and access_flags after calls 733 // to some helper functions. 734 assert(R19_method->is_nonvolatile(), 735 "R19_method must be a non-volatile register"); 736 737 // Check for synchronized methods. Must happen AFTER invocation counter 738 // check, so method is not locked if counter overflows. 739 740 if (synchronized) { 741 lock_method(access_flags, R11_scratch1, R12_scratch2, true); 742 743 // Update monitor in state. 744 __ ld(R11_scratch1, 0, R1_SP); 745 __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1); 746 } 747 748 // jvmti/jvmpi support 749 __ notify_method_entry(); 750 751 //============================================================================= 752 // Get and call the signature handler. 753 754 __ ld(signature_handler_fd, method_(signature_handler)); 755 Label call_signature_handler; 756 757 __ cmpdi(CCR0, signature_handler_fd, 0); 758 __ bne(CCR0, call_signature_handler); 759 760 // Method has never been called. Either generate a specialized 761 // handler or point to the slow one. 762 // 763 // Pass parameter 'false' to avoid exception check in call_VM. 764 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); 765 766 // Check for an exception while looking up the target method. If we 767 // incurred one, bail. 768 __ ld(pending_exception, thread_(pending_exception)); 769 __ cmpdi(CCR0, pending_exception, 0); 770 __ bne(CCR0, exception_return_sync_check); // Has pending exception. 771 772 // Reload signature handler, it may have been created/assigned in the meanwhile. 773 __ ld(signature_handler_fd, method_(signature_handler)); 774 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). 775 776 BIND(call_signature_handler); 777 778 // Before we call the signature handler we push a new frame to 779 // protect the interpreter frame volatile registers when we return 780 // from jni but before we can get back to Java. 781 782 // First set the frame anchor while the SP/FP registers are 783 // convenient and the slow signature handler can use this same frame 784 // anchor. 785 786 // We have a TOP_IJAVA_FRAME here, which belongs to us. 787 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 788 789 // Now the interpreter frame (and its call chain) have been 790 // invalidated and flushed. We are now protected against eager 791 // being enabled in native code. Even if it goes eager the 792 // registers will be reloaded as clean and we will invalidate after 793 // the call so no spurious flush should be possible. 794 795 // Call signature handler and pass locals address. 796 // 797 // Our signature handlers copy required arguments to the C stack 798 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. 799 __ mr(R3_ARG1, R18_locals); 800 #if !defined(ABI_ELFv2) 801 __ ld(signature_handler_fd, 0, signature_handler_fd); 802 #endif 803 804 __ call_stub(signature_handler_fd); 805 806 // Remove the register parameter varargs slots we allocated in 807 // compute_interpreter_state. SP+16 ends up pointing to the ABI 808 // outgoing argument area. 809 // 810 // Not needed on PPC64. 811 //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord); 812 813 assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); 814 // Save across call to native method. 815 __ mr(result_handler_addr, R3_RET); 816 817 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. 818 819 // Set up fixed parameters and call the native method. 820 // If the method is static, get mirror into R4_ARG2. 821 { 822 Label method_is_not_static; 823 // Access_flags is non-volatile and still, no need to restore it. 824 825 // Restore access flags. 826 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); 827 __ bfalse(CCR0, method_is_not_static); 828 829 // constants = method->constants(); 830 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 831 __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1); 832 // pool_holder = method->constants()->pool_holder(); 833 __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), 834 R11_scratch1/*constants*/); 835 836 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 837 838 // mirror = pool_holder->klass_part()->java_mirror(); 839 __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/); 840 // state->_native_mirror = mirror; 841 842 __ ld(R11_scratch1, 0, R1_SP); 843 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); 844 // R4_ARG2 = &state->_oop_temp; 845 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); 846 BIND(method_is_not_static); 847 } 848 849 // At this point, arguments have been copied off the stack into 850 // their JNI positions. Oops are boxed in-place on the stack, with 851 // handles copied to arguments. The result handler address is in a 852 // register. 853 854 // Pass JNIEnv address as first parameter. 855 __ addir(R3_ARG1, thread_(jni_environment)); 856 857 // Load the native_method entry before we change the thread state. 858 __ ld(native_method_fd, method_(native_function)); 859 860 //============================================================================= 861 // Transition from _thread_in_Java to _thread_in_native. As soon as 862 // we make this change the safepoint code needs to be certain that 863 // the last Java frame we established is good. The pc in that frame 864 // just needs to be near here not an actual return address. 865 866 // We use release_store_fence to update values like the thread state, where 867 // we don't want the current thread to continue until all our prior memory 868 // accesses (including the new thread state) are visible to other threads. 869 __ li(R0, _thread_in_native); 870 __ release(); 871 872 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 873 __ stw(R0, thread_(thread_state)); 874 875 if (UseMembar) { 876 __ fence(); 877 } 878 879 //============================================================================= 880 // Call the native method. Argument registers must not have been 881 // overwritten since "__ call_stub(signature_handler);" (except for 882 // ARG1 and ARG2 for static methods). 883 __ call_c(native_method_fd); 884 885 __ li(R0, 0); 886 __ ld(R11_scratch1, 0, R1_SP); 887 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 888 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 889 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset 890 891 // Note: C++ interpreter needs the following here: 892 // The frame_manager_lr field, which we use for setting the last 893 // java frame, gets overwritten by the signature handler. Restore 894 // it now. 895 //__ get_PC_trash_LR(R11_scratch1); 896 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 897 898 // Because of GC R19_method may no longer be valid. 899 900 // Block, if necessary, before resuming in _thread_in_Java state. 901 // In order for GC to work, don't clear the last_Java_sp until after 902 // blocking. 903 904 //============================================================================= 905 // Switch thread to "native transition" state before reading the 906 // synchronization state. This additional state is necessary 907 // because reading and testing the synchronization state is not 908 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, 909 // in _thread_in_native state, loads _not_synchronized and is 910 // preempted. VM thread changes sync state to synchronizing and 911 // suspends threads for GC. Thread A is resumed to finish this 912 // native method, but doesn't block here since it didn't see any 913 // synchronization in progress, and escapes. 914 915 // We use release_store_fence to update values like the thread state, where 916 // we don't want the current thread to continue until all our prior memory 917 // accesses (including the new thread state) are visible to other threads. 918 __ li(R0/*thread_state*/, _thread_in_native_trans); 919 __ release(); 920 __ stw(R0/*thread_state*/, thread_(thread_state)); 921 if (UseMembar) { 922 __ fence(); 923 } 924 // Write serialization page so that the VM thread can do a pseudo remote 925 // membar. We use the current thread pointer to calculate a thread 926 // specific offset to write to within the page. This minimizes bus 927 // traffic due to cache line collision. 928 else { 929 __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2); 930 } 931 932 // Now before we return to java we must look for a current safepoint 933 // (a new safepoint can not start since we entered native_trans). 934 // We must check here because a current safepoint could be modifying 935 // the callers registers right this moment. 936 937 // Acquire isn't strictly necessary here because of the fence, but 938 // sync_state is declared to be volatile, so we do it anyway 939 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 940 int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true); 941 942 // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size"); 943 __ lwz(sync_state, sync_state_offs, sync_state_addr); 944 945 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 946 __ lwz(suspend_flags, thread_(suspend_flags)); 947 948 Label sync_check_done; 949 Label do_safepoint; 950 // No synchronization in progress nor yet synchronized. 951 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); 952 // Not suspended. 953 __ cmpwi(CCR1, suspend_flags, 0); 954 955 __ bne(CCR0, do_safepoint); 956 __ beq(CCR1, sync_check_done); 957 __ bind(do_safepoint); 958 __ isync(); 959 // Block. We do the call directly and leave the current 960 // last_Java_frame setup undisturbed. We must save any possible 961 // native result across the call. No oop is present. 962 963 __ mr(R3_ARG1, R16_thread); 964 #if defined(ABI_ELFv2) 965 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 966 relocInfo::none); 967 #else 968 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), 969 relocInfo::none); 970 #endif 971 972 __ bind(sync_check_done); 973 974 //============================================================================= 975 // <<<<<< Back in Interpreter Frame >>>>> 976 977 // We are in thread_in_native_trans here and back in the normal 978 // interpreter frame. We don't have to do anything special about 979 // safepoints and we can switch to Java mode anytime we are ready. 980 981 // Note: frame::interpreter_frame_result has a dependency on how the 982 // method result is saved across the call to post_method_exit. For 983 // native methods it assumes that the non-FPU/non-void result is 984 // saved in _native_lresult and a FPU result in _native_fresult. If 985 // this changes then the interpreter_frame_result implementation 986 // will need to be updated too. 987 988 // On PPC64, we have stored the result directly after the native call. 989 990 //============================================================================= 991 // Back in Java 992 993 // We use release_store_fence to update values like the thread state, where 994 // we don't want the current thread to continue until all our prior memory 995 // accesses (including the new thread state) are visible to other threads. 996 __ li(R0/*thread_state*/, _thread_in_Java); 997 __ release(); 998 __ stw(R0/*thread_state*/, thread_(thread_state)); 999 if (UseMembar) { 1000 __ fence(); 1001 } 1002 1003 __ reset_last_Java_frame(); 1004 1005 // Jvmdi/jvmpi support. Whether we've got an exception pending or 1006 // not, and whether unlocking throws an exception or not, we notify 1007 // on native method exit. If we do have an exception, we'll end up 1008 // in the caller's context to handle it, so if we don't do the 1009 // notify here, we'll drop it on the floor. 1010 __ notify_method_exit(true/*native method*/, 1011 ilgl /*illegal state (not used for native methods)*/, 1012 InterpreterMacroAssembler::NotifyJVMTI, 1013 false /*check_exceptions*/); 1014 1015 //============================================================================= 1016 // Handle exceptions 1017 1018 if (synchronized) { 1019 // Don't check for exceptions since we're still in the i2n frame. Do that 1020 // manually afterwards. 1021 unlock_method(false); 1022 } 1023 1024 // Reset active handles after returning from native. 1025 // thread->active_handles()->clear(); 1026 __ ld(active_handles, thread_(active_handles)); 1027 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 1028 __ li(R0, 0); 1029 __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles); 1030 1031 Label exception_return_sync_check_already_unlocked; 1032 __ ld(R0/*pending_exception*/, thread_(pending_exception)); 1033 __ cmpdi(CCR0, R0/*pending_exception*/, 0); 1034 __ bne(CCR0, exception_return_sync_check_already_unlocked); 1035 1036 //----------------------------------------------------------------------------- 1037 // No exception pending. 1038 1039 // Move native method result back into proper registers and return. 1040 // Invoke result handler (may unbox/promote). 1041 __ ld(R11_scratch1, 0, R1_SP); 1042 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1043 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1044 __ call_stub(result_handler_addr); 1045 1046 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1047 1048 // Must use the return pc which was loaded from the caller's frame 1049 // as the VM uses return-pc-patching for deoptimization. 1050 __ mtlr(R0); 1051 __ blr(); 1052 1053 //----------------------------------------------------------------------------- 1054 // An exception is pending. We call into the runtime only if the 1055 // caller was not interpreted. If it was interpreted the 1056 // interpreter will do the correct thing. If it isn't interpreted 1057 // (call stub/compiled code) we will change our return and continue. 1058 1059 BIND(exception_return_sync_check); 1060 1061 if (synchronized) { 1062 // Don't check for exceptions since we're still in the i2n frame. Do that 1063 // manually afterwards. 1064 unlock_method(false); 1065 } 1066 BIND(exception_return_sync_check_already_unlocked); 1067 1068 const Register return_pc = R31; 1069 1070 __ ld(return_pc, 0, R1_SP); 1071 __ ld(return_pc, _abi(lr), return_pc); 1072 1073 // Get the address of the exception handler. 1074 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1075 R16_thread, 1076 return_pc /* return pc */); 1077 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); 1078 1079 // Load the PC of the the exception handler into LR. 1080 __ mtlr(R3_RET); 1081 1082 // Load exception into R3_ARG1 and clear pending exception in thread. 1083 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); 1084 __ li(R4_ARG2, 0); 1085 __ std(R4_ARG2, thread_(pending_exception)); 1086 1087 // Load the original return pc into R4_ARG2. 1088 __ mr(R4_ARG2/*issuing_pc*/, return_pc); 1089 1090 // Return to exception handler. 1091 __ blr(); 1092 1093 //============================================================================= 1094 // Counter overflow. 1095 1096 if (inc_counter) { 1097 // Handle invocation counter overflow. 1098 __ bind(invocation_counter_overflow); 1099 1100 generate_counter_overflow(continue_after_compile); 1101 } 1102 1103 return entry; 1104 } 1105 1106 // Generic interpreted method entry to (asm) interpreter. 1107 // 1108 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1109 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1110 address entry = __ pc(); 1111 // Generate the code to allocate the interpreter stack frame. 1112 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. 1113 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. 1114 1115 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); 1116 1117 // -------------------------------------------------------------------------- 1118 // Zero out non-parameter locals. 1119 // Note: *Always* zero out non-parameter locals as Sparc does. It's not 1120 // worth to ask the flag, just do it. 1121 Register Rslot_addr = R6_ARG4, 1122 Rnum = R7_ARG5; 1123 Label Lno_locals, Lzero_loop; 1124 1125 // Set up the zeroing loop. 1126 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); 1127 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); 1128 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); 1129 __ beq(CCR0, Lno_locals); 1130 __ li(R0, 0); 1131 __ mtctr(Rnum); 1132 1133 // The zero locals loop. 1134 __ bind(Lzero_loop); 1135 __ std(R0, 0, Rslot_addr); 1136 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); 1137 __ bdnz(Lzero_loop); 1138 1139 __ bind(Lno_locals); 1140 1141 // -------------------------------------------------------------------------- 1142 // Counter increment and overflow check. 1143 Label invocation_counter_overflow, 1144 profile_method, 1145 profile_method_continue; 1146 if (inc_counter || ProfileInterpreter) { 1147 1148 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; 1149 if (synchronized) { 1150 // Since at this point in the method invocation the exception handler 1151 // would try to exit the monitor of synchronized methods which hasn't 1152 // been entered yet, we set the thread local variable 1153 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1154 // runtime, exception handling i.e. unlock_if_synchronized_method will 1155 // check this thread local flag. 1156 // This flag has two effects, one is to force an unwind in the topmost 1157 // interpreter frame and not perform an unlock while doing so. 1158 __ li(R0, 1); 1159 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1160 } 1161 1162 // Argument and return type profiling. 1163 __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); 1164 1165 // Increment invocation counter and check for overflow. 1166 if (inc_counter) { 1167 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1168 } 1169 1170 __ bind(profile_method_continue); 1171 1172 // Reset the _do_not_unlock_if_synchronized flag. 1173 if (synchronized) { 1174 __ li(R0, 0); 1175 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1176 } 1177 } 1178 1179 // -------------------------------------------------------------------------- 1180 // Locking of synchronized methods. Must happen AFTER invocation_counter 1181 // check and stack overflow check, so method is not locked if overflows. 1182 if (synchronized) { 1183 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); 1184 } 1185 #ifdef ASSERT 1186 else { 1187 Label Lok; 1188 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1189 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); 1190 __ asm_assert_eq("method needs synchronization", 0x8521); 1191 __ bind(Lok); 1192 } 1193 #endif // ASSERT 1194 1195 __ verify_thread(); 1196 1197 // -------------------------------------------------------------------------- 1198 // JVMTI support 1199 __ notify_method_entry(); 1200 1201 // -------------------------------------------------------------------------- 1202 // Start executing instructions. 1203 __ dispatch_next(vtos); 1204 1205 // -------------------------------------------------------------------------- 1206 // Out of line counter overflow and MDO creation code. 1207 if (ProfileInterpreter) { 1208 // We have decided to profile this method in the interpreter. 1209 __ bind(profile_method); 1210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1211 __ set_method_data_pointer_for_bcp(); 1212 __ b(profile_method_continue); 1213 } 1214 1215 if (inc_counter) { 1216 // Handle invocation counter overflow. 1217 __ bind(invocation_counter_overflow); 1218 generate_counter_overflow(profile_method_continue); 1219 } 1220 return entry; 1221 } 1222 1223 // CRC32 Intrinsics. 1224 // 1225 // Contract on scratch and work registers. 1226 // ======================================= 1227 // 1228 // On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers. 1229 // You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set. 1230 // You can't rely on these registers across calls. 1231 // 1232 // The generators for CRC32_update and for CRC32_updateBytes use the 1233 // scratch/work register set internally, passing the work registers 1234 // as arguments to the MacroAssembler emitters as required. 1235 // 1236 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments. 1237 // Their contents is not constant but may change according to the requirements 1238 // of the emitted code. 1239 // 1240 // All other registers from the scratch/work register set are used "internally" 1241 // and contain garbage (i.e. unpredictable values) once blr() is reached. 1242 // Basically, only R3_RET contains a defined value which is the function result. 1243 // 1244 /** 1245 * Method entry for static native methods: 1246 * int java.util.zip.CRC32.update(int crc, int b) 1247 */ 1248 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 1249 if (UseCRC32Intrinsics) { 1250 address start = __ pc(); // Remember stub start address (is rtn value). 1251 Label slow_path; 1252 1253 // Safepoint check 1254 const Register sync_state = R11_scratch1; 1255 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true); 1256 __ lwz(sync_state, sync_state_offs, sync_state); 1257 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); 1258 __ bne(CCR0, slow_path); 1259 1260 // We don't generate local frame and don't align stack because 1261 // we not even call stub code (we generate the code inline) 1262 // and there is no safepoint on this path. 1263 1264 // Load java parameters. 1265 // R15_esp is callers operand stack pointer, i.e. it points to the parameters. 1266 const Register argP = R15_esp; 1267 const Register crc = R3_ARG1; // crc value 1268 const Register data = R4_ARG2; // address of java byte value (kernel_crc32 needs address) 1269 const Register dataLen = R5_ARG3; // source data len (1 byte). Not used because calling the single-byte emitter. 1270 const Register table = R6_ARG4; // address of crc32 table 1271 const Register tmp = dataLen; // Reuse unused len register to show we don't actually need a separate tmp here. 1272 1273 BLOCK_COMMENT("CRC32_update {"); 1274 1275 // Arguments are reversed on java expression stack 1276 #ifdef VM_LITTLE_ENDIAN 1277 __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value. 1278 // Being passed as an int, the single byte is at offset +0. 1279 #else 1280 __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value. 1281 // Being passed from java as an int, the single byte is at offset +3. 1282 #endif 1283 __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. 1284 1285 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); 1286 __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp); 1287 1288 // Restore caller sp for c2i case and return. 1289 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. 1290 __ blr(); 1291 1292 // Generate a vanilla native entry as the slow path. 1293 BLOCK_COMMENT("} CRC32_update"); 1294 BIND(slow_path); 1295 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1296 return start; 1297 } 1298 1299 return NULL; 1300 } 1301 1302 // CRC32 Intrinsics. 1303 /** 1304 * Method entry for static native methods: 1305 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) 1306 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len) 1307 */ 1308 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1309 if (UseCRC32Intrinsics) { 1310 address start = __ pc(); // Remember stub start address (is rtn value). 1311 Label slow_path; 1312 1313 // Safepoint check 1314 const Register sync_state = R11_scratch1; 1315 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true); 1316 __ lwz(sync_state, sync_state_offs, sync_state); 1317 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); 1318 __ bne(CCR0, slow_path); 1319 1320 // We don't generate local frame and don't align stack because 1321 // we not even call stub code (we generate the code inline) 1322 // and there is no safepoint on this path. 1323 1324 // Load parameters. 1325 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1326 const Register argP = R15_esp; 1327 const Register crc = R3_ARG1; // crc value 1328 const Register data = R4_ARG2; // address of java byte array 1329 const Register dataLen = R5_ARG3; // source data len 1330 const Register table = R6_ARG4; // address of crc32 table 1331 1332 const Register t0 = R9; // scratch registers for crc calculation 1333 const Register t1 = R10; 1334 const Register t2 = R11; 1335 const Register t3 = R12; 1336 1337 const Register tc0 = R2; // registers to hold pre-calculated column addresses 1338 const Register tc1 = R7; 1339 const Register tc2 = R8; 1340 const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters 1341 1342 const Register tmp = t0; // Only used very locally to calculate byte buffer address. 1343 1344 // Arguments are reversed on java expression stack. 1345 // Calculate address of start element. 1346 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct". 1347 BLOCK_COMMENT("CRC32_updateByteBuffer {"); 1348 // crc @ (SP + 5W) (32bit) 1349 // buf @ (SP + 3W) (64bit ptr to long array) 1350 // off @ (SP + 2W) (32bit) 1351 // dataLen @ (SP + 1W) (32bit) 1352 // data = buf + off 1353 __ ld( data, 3*wordSize, argP); // start of byte buffer 1354 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1355 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1356 __ lwz( crc, 5*wordSize, argP); // current crc state 1357 __ add( data, data, tmp); // Add byte buffer offset. 1358 } else { // Used for "updateBytes update". 1359 BLOCK_COMMENT("CRC32_updateBytes {"); 1360 // crc @ (SP + 4W) (32bit) 1361 // buf @ (SP + 3W) (64bit ptr to byte array) 1362 // off @ (SP + 2W) (32bit) 1363 // dataLen @ (SP + 1W) (32bit) 1364 // data = buf + off + base_offset 1365 __ ld( data, 3*wordSize, argP); // start of byte buffer 1366 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1367 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1368 __ add( data, data, tmp); // add byte buffer offset 1369 __ lwz( crc, 4*wordSize, argP); // current crc state 1370 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1371 } 1372 1373 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); 1374 1375 // Performance measurements show the 1word and 2word variants to be almost equivalent, 1376 // with very light advantages for the 1word variant. We chose the 1word variant for 1377 // code compactness. 1378 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3); 1379 1380 // Restore caller sp for c2i case and return. 1381 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. 1382 __ blr(); 1383 1384 // Generate a vanilla native entry as the slow path. 1385 BLOCK_COMMENT("} CRC32_updateBytes(Buffer)"); 1386 BIND(slow_path); 1387 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1388 return start; 1389 } 1390 1391 return NULL; 1392 } 1393 1394 // Not supported 1395 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1396 return NULL; 1397 } 1398 1399 // ============================================================================= 1400 // Exceptions 1401 1402 void TemplateInterpreterGenerator::generate_throw_exception() { 1403 Register Rexception = R17_tos, 1404 Rcontinuation = R3_RET; 1405 1406 // -------------------------------------------------------------------------- 1407 // Entry point if an method returns with a pending exception (rethrow). 1408 Interpreter::_rethrow_exception_entry = __ pc(); 1409 { 1410 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 1411 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 1412 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 1413 1414 // Compiled code destroys templateTableBase, reload. 1415 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1416 } 1417 1418 // Entry point if a interpreted method throws an exception (throw). 1419 Interpreter::_throw_exception_entry = __ pc(); 1420 { 1421 __ mr(Rexception, R3_RET); 1422 1423 __ verify_thread(); 1424 __ verify_oop(Rexception); 1425 1426 // Expression stack must be empty before entering the VM in case of an exception. 1427 __ empty_expression_stack(); 1428 // Find exception handler address and preserve exception oop. 1429 // Call C routine to find handler and jump to it. 1430 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); 1431 __ mtctr(Rcontinuation); 1432 // Push exception for exception handler bytecodes. 1433 __ push_ptr(Rexception); 1434 1435 // Jump to exception handler (may be remove activation entry!). 1436 __ bctr(); 1437 } 1438 1439 // If the exception is not handled in the current frame the frame is 1440 // removed and the exception is rethrown (i.e. exception 1441 // continuation is _rethrow_exception). 1442 // 1443 // Note: At this point the bci is still the bxi for the instruction 1444 // which caused the exception and the expression stack is 1445 // empty. Thus, for any VM calls at this point, GC will find a legal 1446 // oop map (with empty expression stack). 1447 1448 // In current activation 1449 // tos: exception 1450 // bcp: exception bcp 1451 1452 // -------------------------------------------------------------------------- 1453 // JVMTI PopFrame support 1454 1455 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1456 { 1457 // Set the popframe_processing bit in popframe_condition indicating that we are 1458 // currently handling popframe, so that call_VMs that may happen later do not 1459 // trigger new popframe handling cycles. 1460 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1461 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); 1462 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1463 1464 // Empty the expression stack, as in normal exception handling. 1465 __ empty_expression_stack(); 1466 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1467 1468 // Check to see whether we are returning to a deoptimized frame. 1469 // (The PopFrame call ensures that the caller of the popped frame is 1470 // either interpreted or compiled and deoptimizes it if compiled.) 1471 // Note that we don't compare the return PC against the 1472 // deoptimization blob's unpack entry because of the presence of 1473 // adapter frames in C2. 1474 Label Lcaller_not_deoptimized; 1475 Register return_pc = R3_ARG1; 1476 __ ld(return_pc, 0, R1_SP); 1477 __ ld(return_pc, _abi(lr), return_pc); 1478 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); 1479 __ cmpdi(CCR0, R3_RET, 0); 1480 __ bne(CCR0, Lcaller_not_deoptimized); 1481 1482 // The deoptimized case. 1483 // In this case, we can't call dispatch_next() after the frame is 1484 // popped, but instead must save the incoming arguments and restore 1485 // them after deoptimization has occurred. 1486 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); 1487 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); 1488 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); 1489 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); 1490 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); 1491 // Save these arguments. 1492 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); 1493 1494 // Inform deoptimization that it is responsible for restoring these arguments. 1495 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1496 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1497 1498 // Return from the current method into the deoptimization blob. Will eventually 1499 // end up in the deopt interpeter entry, deoptimization prepared everything that 1500 // we will reexecute the call that called us. 1501 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); 1502 __ mtlr(return_pc); 1503 __ blr(); 1504 1505 // The non-deoptimized case. 1506 __ bind(Lcaller_not_deoptimized); 1507 1508 // Clear the popframe condition flag. 1509 __ li(R0, 0); 1510 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1511 1512 // Get out of the current method and re-execute the call that called us. 1513 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 1514 __ restore_interpreter_state(R11_scratch1); 1515 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 1516 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 1517 if (ProfileInterpreter) { 1518 __ set_method_data_pointer_for_bcp(); 1519 __ ld(R11_scratch1, 0, R1_SP); 1520 __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); 1521 } 1522 #if INCLUDE_JVMTI 1523 Label L_done; 1524 1525 __ lbz(R11_scratch1, 0, R14_bcp); 1526 __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic); 1527 __ bne(CCR0, L_done); 1528 1529 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1530 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1531 __ ld(R4_ARG2, 0, R18_locals); 1532 __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false); 1533 __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); 1534 __ cmpdi(CCR0, R4_ARG2, 0); 1535 __ beq(CCR0, L_done); 1536 __ std(R4_ARG2, wordSize, R15_esp); 1537 __ bind(L_done); 1538 #endif // INCLUDE_JVMTI 1539 __ dispatch_next(vtos); 1540 } 1541 // end of JVMTI PopFrame support 1542 1543 // -------------------------------------------------------------------------- 1544 // Remove activation exception entry. 1545 // This is jumped to if an interpreted method can't handle an exception itself 1546 // (we come from the throw/rethrow exception entry above). We're going to call 1547 // into the VM to find the exception handler in the caller, pop the current 1548 // frame and return the handler we calculated. 1549 Interpreter::_remove_activation_entry = __ pc(); 1550 { 1551 __ pop_ptr(Rexception); 1552 __ verify_thread(); 1553 __ verify_oop(Rexception); 1554 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); 1555 1556 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); 1557 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); 1558 1559 __ get_vm_result(Rexception); 1560 1561 // We are done with this activation frame; find out where to go next. 1562 // The continuation point will be an exception handler, which expects 1563 // the following registers set up: 1564 // 1565 // RET: exception oop 1566 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. 1567 1568 Register return_pc = R31; // Needs to survive the runtime call. 1569 __ ld(return_pc, 0, R1_SP); 1570 __ ld(return_pc, _abi(lr), return_pc); 1571 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); 1572 1573 // Remove the current activation. 1574 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 1575 1576 __ mr(R4_ARG2, return_pc); 1577 __ mtlr(R3_RET); 1578 __ mr(R3_RET, Rexception); 1579 __ blr(); 1580 } 1581 } 1582 1583 // JVMTI ForceEarlyReturn support. 1584 // Returns "in the middle" of a method with a "fake" return value. 1585 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1586 1587 Register Rscratch1 = R11_scratch1, 1588 Rscratch2 = R12_scratch2; 1589 1590 address entry = __ pc(); 1591 __ empty_expression_stack(); 1592 1593 __ load_earlyret_value(state, Rscratch1); 1594 1595 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 1596 // Clear the earlyret state. 1597 __ li(R0, 0); 1598 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); 1599 1600 __ remove_activation(state, false, false); 1601 // Copied from TemplateTable::_return. 1602 // Restoration of lr done by remove_activation. 1603 switch (state) { 1604 case ltos: 1605 case btos: 1606 case ctos: 1607 case stos: 1608 case atos: 1609 case itos: __ mr(R3_RET, R17_tos); break; 1610 case ftos: 1611 case dtos: __ fmr(F1_RET, F15_ftos); break; 1612 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 1613 // to get visible before the reference to the object gets stored anywhere. 1614 __ membar(Assembler::StoreStore); break; 1615 default : ShouldNotReachHere(); 1616 } 1617 __ blr(); 1618 1619 return entry; 1620 } // end of ForceEarlyReturn support 1621 1622 //----------------------------------------------------------------------------- 1623 // Helper for vtos entry point generation 1624 1625 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1626 address& bep, 1627 address& cep, 1628 address& sep, 1629 address& aep, 1630 address& iep, 1631 address& lep, 1632 address& fep, 1633 address& dep, 1634 address& vep) { 1635 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1636 Label L; 1637 1638 aep = __ pc(); __ push_ptr(); __ b(L); 1639 fep = __ pc(); __ push_f(); __ b(L); 1640 dep = __ pc(); __ push_d(); __ b(L); 1641 lep = __ pc(); __ push_l(); __ b(L); 1642 __ align(32, 12, 24); // align L 1643 bep = cep = sep = 1644 iep = __ pc(); __ push_i(); 1645 vep = __ pc(); 1646 __ bind(L); 1647 generate_and_dispatch(t); 1648 } 1649 1650 //----------------------------------------------------------------------------- 1651 1652 // Non-product code 1653 #ifndef PRODUCT 1654 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1655 //__ flush_bundle(); 1656 address entry = __ pc(); 1657 1658 const char *bname = NULL; 1659 uint tsize = 0; 1660 switch(state) { 1661 case ftos: 1662 bname = "trace_code_ftos {"; 1663 tsize = 2; 1664 break; 1665 case btos: 1666 bname = "trace_code_btos {"; 1667 tsize = 2; 1668 break; 1669 case ctos: 1670 bname = "trace_code_ctos {"; 1671 tsize = 2; 1672 break; 1673 case stos: 1674 bname = "trace_code_stos {"; 1675 tsize = 2; 1676 break; 1677 case itos: 1678 bname = "trace_code_itos {"; 1679 tsize = 2; 1680 break; 1681 case ltos: 1682 bname = "trace_code_ltos {"; 1683 tsize = 3; 1684 break; 1685 case atos: 1686 bname = "trace_code_atos {"; 1687 tsize = 2; 1688 break; 1689 case vtos: 1690 // Note: In case of vtos, the topmost of stack value could be a int or doubl 1691 // In case of a double (2 slots) we won't see the 2nd stack value. 1692 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. 1693 bname = "trace_code_vtos {"; 1694 tsize = 2; 1695 1696 break; 1697 case dtos: 1698 bname = "trace_code_dtos {"; 1699 tsize = 3; 1700 break; 1701 default: 1702 ShouldNotReachHere(); 1703 } 1704 BLOCK_COMMENT(bname); 1705 1706 // Support short-cut for TraceBytecodesAt. 1707 // Don't call into the VM if we don't want to trace to speed up things. 1708 Label Lskip_vm_call; 1709 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 1710 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); 1711 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 1712 __ ld(R11_scratch1, offs1, R11_scratch1); 1713 __ lwa(R12_scratch2, offs2, R12_scratch2); 1714 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 1715 __ blt(CCR0, Lskip_vm_call); 1716 } 1717 1718 __ push(state); 1719 // Load 2 topmost expression stack values. 1720 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); 1721 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); 1722 __ mflr(R31); 1723 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); 1724 __ mtlr(R31); 1725 __ pop(state); 1726 1727 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 1728 __ bind(Lskip_vm_call); 1729 } 1730 __ blr(); 1731 BLOCK_COMMENT("} trace_code"); 1732 return entry; 1733 } 1734 1735 void TemplateInterpreterGenerator::count_bytecode() { 1736 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); 1737 __ lwz(R12_scratch2, offs, R11_scratch1); 1738 __ addi(R12_scratch2, R12_scratch2, 1); 1739 __ stw(R12_scratch2, offs, R11_scratch1); 1740 } 1741 1742 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1743 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); 1744 __ lwz(R12_scratch2, offs, R11_scratch1); 1745 __ addi(R12_scratch2, R12_scratch2, 1); 1746 __ stw(R12_scratch2, offs, R11_scratch1); 1747 } 1748 1749 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1750 const Register addr = R11_scratch1, 1751 tmp = R12_scratch2; 1752 // Get index, shift out old bytecode, bring in new bytecode, and store it. 1753 // _index = (_index >> log2_number_of_codes) | 1754 // (bytecode << log2_number_of_codes); 1755 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); 1756 __ lwz(tmp, offs1, addr); 1757 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); 1758 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1759 __ stw(tmp, offs1, addr); 1760 1761 // Bump bucket contents. 1762 // _counters[_index] ++; 1763 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); 1764 __ sldi(tmp, tmp, LogBytesPerInt); 1765 __ add(addr, tmp, addr); 1766 __ lwz(tmp, offs2, addr); 1767 __ addi(tmp, tmp, 1); 1768 __ stw(tmp, offs2, addr); 1769 } 1770 1771 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1772 // Call a little run-time stub to avoid blow-up for each bytecode. 1773 // The run-time runtime saves the right registers, depending on 1774 // the tosca in-state for the given template. 1775 1776 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1777 "entry must have been generated"); 1778 1779 // Note: we destroy LR here. 1780 __ bl(Interpreter::trace_code(t->tos_in())); 1781 } 1782 1783 void TemplateInterpreterGenerator::stop_interpreter_at() { 1784 Label L; 1785 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); 1786 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 1787 __ ld(R11_scratch1, offs1, R11_scratch1); 1788 __ lwa(R12_scratch2, offs2, R12_scratch2); 1789 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 1790 __ bne(CCR0, L); 1791 __ illtrap(); 1792 __ bind(L); 1793 } 1794 1795 #endif // !PRODUCT