1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #ifndef CC_INTERP 28 #include "asm/macroAssembler.inline.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterGenerator.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "prims/jvmtiThreadState.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/timer.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 #undef __ 53 #define __ _masm-> 54 55 #ifdef PRODUCT 56 #define BLOCK_COMMENT(str) /* nothing */ 57 #else 58 #define BLOCK_COMMENT(str) __ block_comment(str) 59 #endif 60 61 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 62 63 //----------------------------------------------------------------------------- 64 65 // Actually we should never reach here since we do stack overflow checks before pushing any frame. 66 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 67 address entry = __ pc(); 68 __ unimplemented("generate_StackOverflowError_handler"); 69 return entry; 70 } 71 72 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 73 address entry = __ pc(); 74 __ empty_expression_stack(); 75 __ load_const_optimized(R4_ARG2, (address) name); 76 // Index is in R17_tos. 77 __ mr(R5_ARG3, R17_tos); 78 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException)); 79 return entry; 80 } 81 82 #if 0 83 // Call special ClassCastException constructor taking object to cast 84 // and target class as arguments. 85 address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() { 86 address entry = __ pc(); 87 88 // Expression stack must be empty before entering the VM if an 89 // exception happened. 90 __ empty_expression_stack(); 91 92 // Thread will be loaded to R3_ARG1. 93 // Target class oop is in register R5_ARG3 by convention! 94 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3); 95 // Above call must not return here since exception pending. 96 DEBUG_ONLY(__ should_not_reach_here();) 97 return entry; 98 } 99 #endif 100 101 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 102 address entry = __ pc(); 103 // Expression stack must be empty before entering the VM if an 104 // exception happened. 105 __ empty_expression_stack(); 106 107 // Load exception object. 108 // Thread will be loaded to R3_ARG1. 109 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); 110 #ifdef ASSERT 111 // Above call must not return here since exception pending. 112 __ should_not_reach_here(); 113 #endif 114 return entry; 115 } 116 117 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 118 address entry = __ pc(); 119 //__ untested("generate_exception_handler_common"); 120 Register Rexception = R17_tos; 121 122 // Expression stack must be empty before entering the VM if an exception happened. 123 __ empty_expression_stack(); 124 125 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); 126 if (pass_oop) { 127 __ mr(R5_ARG3, Rexception); 128 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false); 129 } else { 130 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); 131 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false); 132 } 133 134 // Throw exception. 135 __ mr(R3_ARG1, Rexception); 136 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); 137 __ mtctr(R11_scratch1); 138 __ bctr(); 139 140 return entry; 141 } 142 143 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 144 address entry = __ pc(); 145 __ unimplemented("generate_continuation_for"); 146 return entry; 147 } 148 149 // This entry is returned to when a call returns to the interpreter. 150 // When we arrive here, we expect that the callee stack frame is already popped. 151 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 152 address entry = __ pc(); 153 154 // Move the value out of the return register back to the TOS cache of current frame. 155 switch (state) { 156 case ltos: 157 case btos: 158 case ctos: 159 case stos: 160 case atos: 161 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache 162 case ftos: 163 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 164 case vtos: break; // Nothing to do, this was a void return. 165 default : ShouldNotReachHere(); 166 } 167 168 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 169 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 170 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 171 172 // Compiled code destroys templateTableBase, reload. 173 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 174 175 if (state == atos) { 176 __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); 177 } 178 179 const Register cache = R11_scratch1; 180 const Register size = R12_scratch2; 181 __ get_cache_and_index_at_bcp(cache, 1, index_size); 182 183 // Get least significant byte of 64 bit value: 184 #if defined(VM_LITTLE_ENDIAN) 185 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache); 186 #else 187 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache); 188 #endif 189 __ sldi(size, size, Interpreter::logStackElementSize); 190 __ add(R15_esp, R15_esp, size); 191 __ dispatch_next(state, step); 192 return entry; 193 } 194 195 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 196 address entry = __ pc(); 197 // If state != vtos, we're returning from a native method, which put it's result 198 // into the result register. So move the value out of the return register back 199 // to the TOS cache of current frame. 200 201 switch (state) { 202 case ltos: 203 case btos: 204 case ctos: 205 case stos: 206 case atos: 207 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache 208 case ftos: 209 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 210 case vtos: break; // Nothing to do, this was a void return. 211 default : ShouldNotReachHere(); 212 } 213 214 // Load LcpoolCache @@@ should be already set! 215 __ get_constant_pool_cache(R27_constPoolCache); 216 217 // Handle a pending exception, fall through if none. 218 __ check_and_forward_exception(R11_scratch1, R12_scratch2); 219 220 // Start executing bytecodes. 221 __ dispatch_next(state, step); 222 223 return entry; 224 } 225 226 // A result handler converts the native result into java format. 227 // Use the shared code between c++ and template interpreter. 228 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 229 return AbstractInterpreterGenerator::generate_result_handler_for(type); 230 } 231 232 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 233 address entry = __ pc(); 234 235 __ push(state); 236 __ call_VM(noreg, runtime_entry); 237 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 238 239 return entry; 240 } 241 242 // Helpers for commoning out cases in the various type of method entries. 243 244 // Increment invocation count & check for overflow. 245 // 246 // Note: checking for negative value instead of overflow 247 // so we have a 'sticky' overflow test. 248 // 249 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 250 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. 251 Register Rscratch1 = R11_scratch1; 252 Register Rscratch2 = R12_scratch2; 253 Register R3_counters = R3_ARG1; 254 Label done; 255 256 if (TieredCompilation) { 257 const int increment = InvocationCounter::count_increment; 258 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 259 Label no_mdo; 260 if (ProfileInterpreter) { 261 const Register Rmdo = Rscratch1; 262 // If no method data exists, go to profile_continue. 263 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 264 __ cmpdi(CCR0, Rmdo, 0); 265 __ beq(CCR0, no_mdo); 266 267 // Increment invocation counter in the MDO. 268 const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 269 __ lwz(Rscratch2, mdo_ic_offs, Rmdo); 270 __ addi(Rscratch2, Rscratch2, increment); 271 __ stw(Rscratch2, mdo_ic_offs, Rmdo); 272 __ load_const_optimized(Rscratch1, mask, R0); 273 __ and_(Rscratch1, Rscratch2, Rscratch1); 274 __ bne(CCR0, done); 275 __ b(*overflow); 276 } 277 278 // Increment counter in MethodCounters*. 279 const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 280 __ bind(no_mdo); 281 __ get_method_counters(R19_method, R3_counters, done); 282 __ lwz(Rscratch2, mo_ic_offs, R3_counters); 283 __ addi(Rscratch2, Rscratch2, increment); 284 __ stw(Rscratch2, mo_ic_offs, R3_counters); 285 __ load_const_optimized(Rscratch1, mask, R0); 286 __ and_(Rscratch1, Rscratch2, Rscratch1); 287 __ beq(CCR0, *overflow); 288 289 __ bind(done); 290 291 } else { 292 293 // Update standard invocation counters. 294 Register Rsum_ivc_bec = R4_ARG2; 295 __ get_method_counters(R19_method, R3_counters, done); 296 __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2); 297 // Increment interpreter invocation counter. 298 if (ProfileInterpreter) { // %%% Merge this into methodDataOop. 299 __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 300 __ addi(R12_scratch2, R12_scratch2, 1); 301 __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 302 } 303 // Check if we must create a method data obj. 304 if (ProfileInterpreter && profile_method != NULL) { 305 const Register profile_limit = Rscratch1; 306 int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true); 307 __ lwz(profile_limit, pl_offs, profile_limit); 308 // Test to see if we should create a method data oop. 309 __ cmpw(CCR0, Rsum_ivc_bec, profile_limit); 310 __ blt(CCR0, *profile_method_continue); 311 // If no method data exists, go to profile_method. 312 __ test_method_data_pointer(*profile_method); 313 } 314 // Finally check for counter overflow. 315 if (overflow) { 316 const Register invocation_limit = Rscratch1; 317 int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true); 318 __ lwz(invocation_limit, il_offs, invocation_limit); 319 assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size"); 320 __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit); 321 __ bge(CCR0, *overflow); 322 } 323 324 __ bind(done); 325 } 326 } 327 328 // Generate code to initiate compilation on invocation counter overflow. 329 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { 330 // Generate code to initiate compilation on the counter overflow. 331 332 // InterpreterRuntime::frequency_counter_overflow takes one arguments, 333 // which indicates if the counter overflow occurs at a backwards branch (NULL bcp) 334 // We pass zero in. 335 // The call returns the address of the verified entry point for the method or NULL 336 // if the compilation did not complete (either went background or bailed out). 337 // 338 // Unlike the C++ interpreter above: Check exceptions! 339 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed 340 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. 341 342 __ li(R4_ARG2, 0); 343 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 344 345 // Returns verified_entry_point or NULL. 346 // We ignore it in any case. 347 __ b(continue_entry); 348 } 349 350 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { 351 assert_different_registers(Rmem_frame_size, Rscratch1); 352 __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1); 353 } 354 355 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) { 356 __ unlock_object(R26_monitor, check_exceptions); 357 } 358 359 // Lock the current method, interpreter register window must be set up! 360 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { 361 const Register Robj_to_lock = Rscratch2; 362 363 { 364 if (!flags_preloaded) { 365 __ lwz(Rflags, method_(access_flags)); 366 } 367 368 #ifdef ASSERT 369 // Check if methods needs synchronization. 370 { 371 Label Lok; 372 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); 373 __ btrue(CCR0,Lok); 374 __ stop("method doesn't need synchronization"); 375 __ bind(Lok); 376 } 377 #endif // ASSERT 378 } 379 380 // Get synchronization object to Rscratch2. 381 { 382 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 383 Label Lstatic; 384 Label Ldone; 385 386 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT); 387 __ btrue(CCR0, Lstatic); 388 389 // Non-static case: load receiver obj from stack and we're done. 390 __ ld(Robj_to_lock, R18_locals); 391 __ b(Ldone); 392 393 __ bind(Lstatic); // Static case: Lock the java mirror 394 __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method); 395 __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock); 396 __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock); 397 __ ld(Robj_to_lock, mirror_offset, Robj_to_lock); 398 399 __ bind(Ldone); 400 __ verify_oop(Robj_to_lock); 401 } 402 403 // Got the oop to lock => execute! 404 __ add_monitor_to_stack(true, Rscratch1, R0); 405 406 __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 407 __ lock_object(R26_monitor, Robj_to_lock); 408 } 409 410 // Generate a fixed interpreter frame for pure interpreter 411 // and I2N native transition frames. 412 // 413 // Before (stack grows downwards): 414 // 415 // | ... | 416 // |------------- | 417 // | java arg0 | 418 // | ... | 419 // | java argn | 420 // | | <- R15_esp 421 // | | 422 // |--------------| 423 // | abi_112 | 424 // | | <- R1_SP 425 // |==============| 426 // 427 // 428 // After: 429 // 430 // | ... | 431 // | java arg0 |<- R18_locals 432 // | ... | 433 // | java argn | 434 // |--------------| 435 // | | 436 // | java locals | 437 // | | 438 // |--------------| 439 // | abi_48 | 440 // |==============| 441 // | | 442 // | istate | 443 // | | 444 // |--------------| 445 // | monitor |<- R26_monitor 446 // |--------------| 447 // | |<- R15_esp 448 // | expression | 449 // | stack | 450 // | | 451 // |--------------| 452 // | | 453 // | abi_112 |<- R1_SP 454 // |==============| 455 // 456 // The top most frame needs an abi space of 112 bytes. This space is needed, 457 // since we call to c. The c function may spill their arguments to the caller 458 // frame. When we call to java, we don't need these spill slots. In order to save 459 // space on the stack, we resize the caller. However, java local reside in 460 // the caller frame and the frame has to be increased. The frame_size for the 461 // current frame was calculated based on max_stack as size for the expression 462 // stack. At the call, just a part of the expression stack might be used. 463 // We don't want to waste this space and cut the frame back accordingly. 464 // The resulting amount for resizing is calculated as follows: 465 // resize = (number_of_locals - number_of_arguments) * slot_size 466 // + (R1_SP - R15_esp) + 48 467 // 468 // The size for the callee frame is calculated: 469 // framesize = 112 + max_stack + monitor + state_size 470 // 471 // maxstack: Max number of slots on the expression stack, loaded from the method. 472 // monitor: We statically reserve room for one monitor object. 473 // state_size: We save the current state of the interpreter to this area. 474 // 475 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { 476 Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. 477 top_frame_size = R7_ARG5, 478 Rconst_method = R8_ARG6; 479 480 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size); 481 482 __ ld(Rconst_method, method_(const)); 483 __ lhz(Rsize_of_parameters /* number of params */, 484 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); 485 if (native_call) { 486 // If we're calling a native method, we reserve space for the worst-case signature 487 // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2). 488 // We add two slots to the parameter_count, one for the jni 489 // environment and one for a possible native mirror. 490 Label skip_native_calculate_max_stack; 491 __ addi(top_frame_size, Rsize_of_parameters, 2); 492 __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters); 493 __ bge(CCR0, skip_native_calculate_max_stack); 494 __ li(top_frame_size, Argument::n_register_parameters); 495 __ bind(skip_native_calculate_max_stack); 496 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 497 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 498 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 499 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. 500 } else { 501 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); 502 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 503 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); 504 __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); 505 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 506 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 507 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 508 __ add(parent_frame_resize, parent_frame_resize, R11_scratch1); 509 } 510 511 // Compute top frame size. 512 __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size); 513 514 // Cut back area between esp and max_stack. 515 __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize); 516 517 __ round_to(top_frame_size, frame::alignment_in_bytes); 518 __ round_to(parent_frame_resize, frame::alignment_in_bytes); 519 // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. 520 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. 521 522 { 523 // -------------------------------------------------------------------------- 524 // Stack overflow check 525 526 Label cont; 527 __ add(R11_scratch1, parent_frame_resize, top_frame_size); 528 generate_stack_overflow_check(R11_scratch1, R12_scratch2); 529 } 530 531 // Set up interpreter state registers. 532 533 __ add(R18_locals, R15_esp, Rsize_of_parameters); 534 __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method); 535 __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache); 536 537 // Set method data pointer. 538 if (ProfileInterpreter) { 539 Label zero_continue; 540 __ ld(R28_mdx, method_(method_data)); 541 __ cmpdi(CCR0, R28_mdx, 0); 542 __ beq(CCR0, zero_continue); 543 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 544 __ bind(zero_continue); 545 } 546 547 if (native_call) { 548 __ li(R14_bcp, 0); // Must initialize. 549 } else { 550 __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method); 551 } 552 553 // Resize parent frame. 554 __ mflr(R12_scratch2); 555 __ neg(parent_frame_resize, parent_frame_resize); 556 __ resize_frame(parent_frame_resize, R11_scratch1); 557 __ std(R12_scratch2, _abi(lr), R1_SP); 558 559 __ addi(R26_monitor, R1_SP, - frame::ijava_state_size); 560 __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize); 561 562 // Store values. 563 // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls 564 // in InterpreterMacroAssembler::call_from_interpreter. 565 __ std(R19_method, _ijava_state_neg(method), R1_SP); 566 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); 567 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); 568 __ std(R18_locals, _ijava_state_neg(locals), R1_SP); 569 570 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only 571 // be found in the frame after save_interpreter_state is done. This is always true 572 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, 573 // because e.g. frame::interpreter_frame_bcp() will not access the correct value 574 // (Enhanced Stack Trace). 575 // The signal handler does not save the interpreter state into the frame. 576 __ li(R0, 0); 577 #ifdef ASSERT 578 // Fill remaining slots with constants. 579 __ load_const_optimized(R11_scratch1, 0x5afe); 580 __ load_const_optimized(R12_scratch2, 0xdead); 581 #endif 582 // We have to initialize some frame slots for native calls (accessed by GC). 583 if (native_call) { 584 __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP); 585 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); 586 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } 587 } 588 #ifdef ASSERT 589 else { 590 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); 591 __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP); 592 __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP); 593 } 594 __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP); 595 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); 596 __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP); 597 __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP); 598 #endif 599 __ subf(R12_scratch2, top_frame_size, R1_SP); 600 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); 601 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); 602 603 // Push top frame. 604 __ push_frame(top_frame_size, R11_scratch1); 605 } 606 607 // End of helpers 608 609 610 // Support abs and sqrt like in compiler. 611 // For others we can use a normal (native) entry. 612 613 inline bool math_entry_available(AbstractInterpreter::MethodKind kind) { 614 // Provide math entry with debugging on demand. 615 // Note: Debugging changes which code will get executed: 616 // Debugging or disabled InlineIntrinsics: java method will get interpreted and performs a native call. 617 // Not debugging and enabled InlineIntrinics: processor instruction will get used. 618 // Result might differ slightly due to rounding etc. 619 if (!InlineIntrinsics && (!FLAG_IS_ERGO(InlineIntrinsics))) return false; // Generate a vanilla entry. 620 621 return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) || 622 (kind==Interpreter::java_lang_math_abs)); 623 } 624 625 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 626 if (!math_entry_available(kind)) { 627 NOT_PRODUCT(__ should_not_reach_here();) 628 return Interpreter::entry_for_kind(Interpreter::zerolocals); 629 } 630 631 Label Lslow_path; 632 const Register Rjvmti_mode = R11_scratch1; 633 address entry = __ pc(); 634 635 // Provide math entry with debugging on demand. 636 __ lwz(Rjvmti_mode, thread_(interp_only_mode)); 637 __ cmpwi(CCR0, Rjvmti_mode, 0); 638 __ bne(CCR0, Lslow_path); // jvmti_mode!=0 639 640 __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp); 641 642 // Pop c2i arguments (if any) off when we return. 643 #ifdef ASSERT 644 __ ld(R9_ARG7, 0, R1_SP); 645 __ ld(R10_ARG8, 0, R21_sender_SP); 646 __ cmpd(CCR0, R9_ARG7, R10_ARG8); 647 __ asm_assert_eq("backlink", 0x545); 648 #endif // ASSERT 649 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. 650 651 if (kind == Interpreter::java_lang_math_sqrt) { 652 __ fsqrt(F1_RET, F1_RET); 653 } else if (kind == Interpreter::java_lang_math_abs) { 654 __ fabs(F1_RET, F1_RET); 655 } else { 656 ShouldNotReachHere(); 657 } 658 659 // And we're done. 660 __ blr(); 661 662 // Provide slow path for JVMTI case. 663 __ bind(Lslow_path); 664 __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R12_scratch2); 665 __ flush(); 666 667 return entry; 668 } 669 670 // Interpreter stub for calling a native method. (asm interpreter) 671 // This sets up a somewhat different looking stack for calling the 672 // native method than the typical interpreter frame setup. 673 // 674 // On entry: 675 // R19_method - method 676 // R16_thread - JavaThread* 677 // R15_esp - intptr_t* sender tos 678 // 679 // abstract stack (grows up) 680 // [ IJava (caller of JNI callee) ] <-- ASP 681 // ... 682 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 683 684 address entry = __ pc(); 685 686 const bool inc_counter = UseCompiler || CountCompiledCalls; 687 688 // ----------------------------------------------------------------------------- 689 // Allocate a new frame that represents the native callee (i2n frame). 690 // This is not a full-blown interpreter frame, but in particular, the 691 // following registers are valid after this: 692 // - R19_method 693 // - R18_local (points to start of argumuments to native function) 694 // 695 // abstract stack (grows up) 696 // [ IJava (caller of JNI callee) ] <-- ASP 697 // ... 698 699 const Register signature_handler_fd = R11_scratch1; 700 const Register pending_exception = R0; 701 const Register result_handler_addr = R31; 702 const Register native_method_fd = R11_scratch1; 703 const Register access_flags = R22_tmp2; 704 const Register active_handles = R11_scratch1; // R26_monitor saved to state. 705 const Register sync_state = R12_scratch2; 706 const Register sync_state_addr = sync_state; // Address is dead after use. 707 const Register suspend_flags = R11_scratch1; 708 709 //============================================================================= 710 // Allocate new frame and initialize interpreter state. 711 712 Label exception_return; 713 Label exception_return_sync_check; 714 Label stack_overflow_return; 715 716 // Generate new interpreter state and jump to stack_overflow_return in case of 717 // a stack overflow. 718 //generate_compute_interpreter_state(stack_overflow_return); 719 720 Register size_of_parameters = R22_tmp2; 721 722 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); 723 724 //============================================================================= 725 // Increment invocation counter. On overflow, entry to JNI method 726 // will be compiled. 727 Label invocation_counter_overflow, continue_after_compile; 728 if (inc_counter) { 729 if (synchronized) { 730 // Since at this point in the method invocation the exception handler 731 // would try to exit the monitor of synchronized methods which hasn't 732 // been entered yet, we set the thread local variable 733 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 734 // runtime, exception handling i.e. unlock_if_synchronized_method will 735 // check this thread local flag. 736 // This flag has two effects, one is to force an unwind in the topmost 737 // interpreter frame and not perform an unlock while doing so. 738 __ li(R0, 1); 739 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 740 } 741 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 742 743 __ BIND(continue_after_compile); 744 // Reset the _do_not_unlock_if_synchronized flag. 745 if (synchronized) { 746 __ li(R0, 0); 747 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 748 } 749 } 750 751 // access_flags = method->access_flags(); 752 // Load access flags. 753 assert(access_flags->is_nonvolatile(), 754 "access_flags must be in a non-volatile register"); 755 // Type check. 756 assert(4 == sizeof(AccessFlags), "unexpected field size"); 757 __ lwz(access_flags, method_(access_flags)); 758 759 // We don't want to reload R19_method and access_flags after calls 760 // to some helper functions. 761 assert(R19_method->is_nonvolatile(), 762 "R19_method must be a non-volatile register"); 763 764 // Check for synchronized methods. Must happen AFTER invocation counter 765 // check, so method is not locked if counter overflows. 766 767 if (synchronized) { 768 lock_method(access_flags, R11_scratch1, R12_scratch2, true); 769 770 // Update monitor in state. 771 __ ld(R11_scratch1, 0, R1_SP); 772 __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1); 773 } 774 775 // jvmti/jvmpi support 776 __ notify_method_entry(); 777 778 //============================================================================= 779 // Get and call the signature handler. 780 781 __ ld(signature_handler_fd, method_(signature_handler)); 782 Label call_signature_handler; 783 784 __ cmpdi(CCR0, signature_handler_fd, 0); 785 __ bne(CCR0, call_signature_handler); 786 787 // Method has never been called. Either generate a specialized 788 // handler or point to the slow one. 789 // 790 // Pass parameter 'false' to avoid exception check in call_VM. 791 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); 792 793 // Check for an exception while looking up the target method. If we 794 // incurred one, bail. 795 __ ld(pending_exception, thread_(pending_exception)); 796 __ cmpdi(CCR0, pending_exception, 0); 797 __ bne(CCR0, exception_return_sync_check); // Has pending exception. 798 799 // Reload signature handler, it may have been created/assigned in the meanwhile. 800 __ ld(signature_handler_fd, method_(signature_handler)); 801 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). 802 803 __ BIND(call_signature_handler); 804 805 // Before we call the signature handler we push a new frame to 806 // protect the interpreter frame volatile registers when we return 807 // from jni but before we can get back to Java. 808 809 // First set the frame anchor while the SP/FP registers are 810 // convenient and the slow signature handler can use this same frame 811 // anchor. 812 813 // We have a TOP_IJAVA_FRAME here, which belongs to us. 814 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 815 816 // Now the interpreter frame (and its call chain) have been 817 // invalidated and flushed. We are now protected against eager 818 // being enabled in native code. Even if it goes eager the 819 // registers will be reloaded as clean and we will invalidate after 820 // the call so no spurious flush should be possible. 821 822 // Call signature handler and pass locals address. 823 // 824 // Our signature handlers copy required arguments to the C stack 825 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. 826 __ mr(R3_ARG1, R18_locals); 827 #if !defined(ABI_ELFv2) 828 __ ld(signature_handler_fd, 0, signature_handler_fd); 829 #endif 830 831 __ call_stub(signature_handler_fd); 832 833 // Remove the register parameter varargs slots we allocated in 834 // compute_interpreter_state. SP+16 ends up pointing to the ABI 835 // outgoing argument area. 836 // 837 // Not needed on PPC64. 838 //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord); 839 840 assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); 841 // Save across call to native method. 842 __ mr(result_handler_addr, R3_RET); 843 844 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. 845 846 // Set up fixed parameters and call the native method. 847 // If the method is static, get mirror into R4_ARG2. 848 { 849 Label method_is_not_static; 850 // Access_flags is non-volatile and still, no need to restore it. 851 852 // Restore access flags. 853 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); 854 __ bfalse(CCR0, method_is_not_static); 855 856 // constants = method->constants(); 857 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 858 __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1); 859 // pool_holder = method->constants()->pool_holder(); 860 __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), 861 R11_scratch1/*constants*/); 862 863 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 864 865 // mirror = pool_holder->klass_part()->java_mirror(); 866 __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/); 867 // state->_native_mirror = mirror; 868 869 __ ld(R11_scratch1, 0, R1_SP); 870 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); 871 // R4_ARG2 = &state->_oop_temp; 872 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); 873 __ BIND(method_is_not_static); 874 } 875 876 // At this point, arguments have been copied off the stack into 877 // their JNI positions. Oops are boxed in-place on the stack, with 878 // handles copied to arguments. The result handler address is in a 879 // register. 880 881 // Pass JNIEnv address as first parameter. 882 __ addir(R3_ARG1, thread_(jni_environment)); 883 884 // Load the native_method entry before we change the thread state. 885 __ ld(native_method_fd, method_(native_function)); 886 887 //============================================================================= 888 // Transition from _thread_in_Java to _thread_in_native. As soon as 889 // we make this change the safepoint code needs to be certain that 890 // the last Java frame we established is good. The pc in that frame 891 // just needs to be near here not an actual return address. 892 893 // We use release_store_fence to update values like the thread state, where 894 // we don't want the current thread to continue until all our prior memory 895 // accesses (including the new thread state) are visible to other threads. 896 __ li(R0, _thread_in_native); 897 __ release(); 898 899 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 900 __ stw(R0, thread_(thread_state)); 901 902 if (UseMembar) { 903 __ fence(); 904 } 905 906 //============================================================================= 907 // Call the native method. Argument registers must not have been 908 // overwritten since "__ call_stub(signature_handler);" (except for 909 // ARG1 and ARG2 for static methods). 910 __ call_c(native_method_fd); 911 912 __ li(R0, 0); 913 __ ld(R11_scratch1, 0, R1_SP); 914 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 915 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 916 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset 917 918 // Note: C++ interpreter needs the following here: 919 // The frame_manager_lr field, which we use for setting the last 920 // java frame, gets overwritten by the signature handler. Restore 921 // it now. 922 //__ get_PC_trash_LR(R11_scratch1); 923 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 924 925 // Because of GC R19_method may no longer be valid. 926 927 // Block, if necessary, before resuming in _thread_in_Java state. 928 // In order for GC to work, don't clear the last_Java_sp until after 929 // blocking. 930 931 //============================================================================= 932 // Switch thread to "native transition" state before reading the 933 // synchronization state. This additional state is necessary 934 // because reading and testing the synchronization state is not 935 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, 936 // in _thread_in_native state, loads _not_synchronized and is 937 // preempted. VM thread changes sync state to synchronizing and 938 // suspends threads for GC. Thread A is resumed to finish this 939 // native method, but doesn't block here since it didn't see any 940 // synchronization in progress, and escapes. 941 942 // We use release_store_fence to update values like the thread state, where 943 // we don't want the current thread to continue until all our prior memory 944 // accesses (including the new thread state) are visible to other threads. 945 __ li(R0/*thread_state*/, _thread_in_native_trans); 946 __ release(); 947 __ stw(R0/*thread_state*/, thread_(thread_state)); 948 if (UseMembar) { 949 __ fence(); 950 } 951 // Write serialization page so that the VM thread can do a pseudo remote 952 // membar. We use the current thread pointer to calculate a thread 953 // specific offset to write to within the page. This minimizes bus 954 // traffic due to cache line collision. 955 else { 956 __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2); 957 } 958 959 // Now before we return to java we must look for a current safepoint 960 // (a new safepoint can not start since we entered native_trans). 961 // We must check here because a current safepoint could be modifying 962 // the callers registers right this moment. 963 964 // Acquire isn't strictly necessary here because of the fence, but 965 // sync_state is declared to be volatile, so we do it anyway 966 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 967 int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true); 968 969 // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size"); 970 __ lwz(sync_state, sync_state_offs, sync_state_addr); 971 972 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 973 __ lwz(suspend_flags, thread_(suspend_flags)); 974 975 Label sync_check_done; 976 Label do_safepoint; 977 // No synchronization in progress nor yet synchronized. 978 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); 979 // Not suspended. 980 __ cmpwi(CCR1, suspend_flags, 0); 981 982 __ bne(CCR0, do_safepoint); 983 __ beq(CCR1, sync_check_done); 984 __ bind(do_safepoint); 985 __ isync(); 986 // Block. We do the call directly and leave the current 987 // last_Java_frame setup undisturbed. We must save any possible 988 // native result across the call. No oop is present. 989 990 __ mr(R3_ARG1, R16_thread); 991 #if defined(ABI_ELFv2) 992 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 993 relocInfo::none); 994 #else 995 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), 996 relocInfo::none); 997 #endif 998 999 __ bind(sync_check_done); 1000 1001 //============================================================================= 1002 // <<<<<< Back in Interpreter Frame >>>>> 1003 1004 // We are in thread_in_native_trans here and back in the normal 1005 // interpreter frame. We don't have to do anything special about 1006 // safepoints and we can switch to Java mode anytime we are ready. 1007 1008 // Note: frame::interpreter_frame_result has a dependency on how the 1009 // method result is saved across the call to post_method_exit. For 1010 // native methods it assumes that the non-FPU/non-void result is 1011 // saved in _native_lresult and a FPU result in _native_fresult. If 1012 // this changes then the interpreter_frame_result implementation 1013 // will need to be updated too. 1014 1015 // On PPC64, we have stored the result directly after the native call. 1016 1017 //============================================================================= 1018 // Back in Java 1019 1020 // We use release_store_fence to update values like the thread state, where 1021 // we don't want the current thread to continue until all our prior memory 1022 // accesses (including the new thread state) are visible to other threads. 1023 __ li(R0/*thread_state*/, _thread_in_Java); 1024 __ release(); 1025 __ stw(R0/*thread_state*/, thread_(thread_state)); 1026 if (UseMembar) { 1027 __ fence(); 1028 } 1029 1030 __ reset_last_Java_frame(); 1031 1032 // Jvmdi/jvmpi support. Whether we've got an exception pending or 1033 // not, and whether unlocking throws an exception or not, we notify 1034 // on native method exit. If we do have an exception, we'll end up 1035 // in the caller's context to handle it, so if we don't do the 1036 // notify here, we'll drop it on the floor. 1037 __ notify_method_exit(true/*native method*/, 1038 ilgl /*illegal state (not used for native methods)*/, 1039 InterpreterMacroAssembler::NotifyJVMTI, 1040 false /*check_exceptions*/); 1041 1042 //============================================================================= 1043 // Handle exceptions 1044 1045 if (synchronized) { 1046 // Don't check for exceptions since we're still in the i2n frame. Do that 1047 // manually afterwards. 1048 unlock_method(false); 1049 } 1050 1051 // Reset active handles after returning from native. 1052 // thread->active_handles()->clear(); 1053 __ ld(active_handles, thread_(active_handles)); 1054 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 1055 __ li(R0, 0); 1056 __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles); 1057 1058 Label exception_return_sync_check_already_unlocked; 1059 __ ld(R0/*pending_exception*/, thread_(pending_exception)); 1060 __ cmpdi(CCR0, R0/*pending_exception*/, 0); 1061 __ bne(CCR0, exception_return_sync_check_already_unlocked); 1062 1063 //----------------------------------------------------------------------------- 1064 // No exception pending. 1065 1066 // Move native method result back into proper registers and return. 1067 // Invoke result handler (may unbox/promote). 1068 __ ld(R11_scratch1, 0, R1_SP); 1069 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1070 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1071 __ call_stub(result_handler_addr); 1072 1073 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1074 1075 // Must use the return pc which was loaded from the caller's frame 1076 // as the VM uses return-pc-patching for deoptimization. 1077 __ mtlr(R0); 1078 __ blr(); 1079 1080 //----------------------------------------------------------------------------- 1081 // An exception is pending. We call into the runtime only if the 1082 // caller was not interpreted. If it was interpreted the 1083 // interpreter will do the correct thing. If it isn't interpreted 1084 // (call stub/compiled code) we will change our return and continue. 1085 1086 __ BIND(exception_return_sync_check); 1087 1088 if (synchronized) { 1089 // Don't check for exceptions since we're still in the i2n frame. Do that 1090 // manually afterwards. 1091 unlock_method(false); 1092 } 1093 __ BIND(exception_return_sync_check_already_unlocked); 1094 1095 const Register return_pc = R31; 1096 1097 __ ld(return_pc, 0, R1_SP); 1098 __ ld(return_pc, _abi(lr), return_pc); 1099 1100 // Get the address of the exception handler. 1101 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1102 R16_thread, 1103 return_pc /* return pc */); 1104 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); 1105 1106 // Load the PC of the the exception handler into LR. 1107 __ mtlr(R3_RET); 1108 1109 // Load exception into R3_ARG1 and clear pending exception in thread. 1110 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); 1111 __ li(R4_ARG2, 0); 1112 __ std(R4_ARG2, thread_(pending_exception)); 1113 1114 // Load the original return pc into R4_ARG2. 1115 __ mr(R4_ARG2/*issuing_pc*/, return_pc); 1116 1117 // Return to exception handler. 1118 __ blr(); 1119 1120 //============================================================================= 1121 // Counter overflow. 1122 1123 if (inc_counter) { 1124 // Handle invocation counter overflow. 1125 __ bind(invocation_counter_overflow); 1126 1127 generate_counter_overflow(continue_after_compile); 1128 } 1129 1130 return entry; 1131 } 1132 1133 // Generic interpreted method entry to (asm) interpreter. 1134 // 1135 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1136 bool inc_counter = UseCompiler || CountCompiledCalls; 1137 address entry = __ pc(); 1138 // Generate the code to allocate the interpreter stack frame. 1139 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. 1140 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. 1141 1142 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); 1143 1144 #ifdef FAST_DISPATCH 1145 __ unimplemented("Fast dispatch in generate_normal_entry"); 1146 #if 0 1147 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1148 // Set bytecode dispatch table base. 1149 #endif 1150 #endif 1151 1152 // -------------------------------------------------------------------------- 1153 // Zero out non-parameter locals. 1154 // Note: *Always* zero out non-parameter locals as Sparc does. It's not 1155 // worth to ask the flag, just do it. 1156 Register Rslot_addr = R6_ARG4, 1157 Rnum = R7_ARG5; 1158 Label Lno_locals, Lzero_loop; 1159 1160 // Set up the zeroing loop. 1161 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); 1162 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); 1163 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); 1164 __ beq(CCR0, Lno_locals); 1165 __ li(R0, 0); 1166 __ mtctr(Rnum); 1167 1168 // The zero locals loop. 1169 __ bind(Lzero_loop); 1170 __ std(R0, 0, Rslot_addr); 1171 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); 1172 __ bdnz(Lzero_loop); 1173 1174 __ bind(Lno_locals); 1175 1176 // -------------------------------------------------------------------------- 1177 // Counter increment and overflow check. 1178 Label invocation_counter_overflow, 1179 profile_method, 1180 profile_method_continue; 1181 if (inc_counter || ProfileInterpreter) { 1182 1183 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; 1184 if (synchronized) { 1185 // Since at this point in the method invocation the exception handler 1186 // would try to exit the monitor of synchronized methods which hasn't 1187 // been entered yet, we set the thread local variable 1188 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1189 // runtime, exception handling i.e. unlock_if_synchronized_method will 1190 // check this thread local flag. 1191 // This flag has two effects, one is to force an unwind in the topmost 1192 // interpreter frame and not perform an unlock while doing so. 1193 __ li(R0, 1); 1194 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1195 } 1196 1197 // Argument and return type profiling. 1198 __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); 1199 1200 // Increment invocation counter and check for overflow. 1201 if (inc_counter) { 1202 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1203 } 1204 1205 __ bind(profile_method_continue); 1206 1207 // Reset the _do_not_unlock_if_synchronized flag. 1208 if (synchronized) { 1209 __ li(R0, 0); 1210 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1211 } 1212 } 1213 1214 // -------------------------------------------------------------------------- 1215 // Locking of synchronized methods. Must happen AFTER invocation_counter 1216 // check and stack overflow check, so method is not locked if overflows. 1217 if (synchronized) { 1218 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); 1219 } 1220 #ifdef ASSERT 1221 else { 1222 Label Lok; 1223 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1224 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); 1225 __ asm_assert_eq("method needs synchronization", 0x8521); 1226 __ bind(Lok); 1227 } 1228 #endif // ASSERT 1229 1230 __ verify_thread(); 1231 1232 // -------------------------------------------------------------------------- 1233 // JVMTI support 1234 __ notify_method_entry(); 1235 1236 // -------------------------------------------------------------------------- 1237 // Start executing instructions. 1238 __ dispatch_next(vtos); 1239 1240 // -------------------------------------------------------------------------- 1241 // Out of line counter overflow and MDO creation code. 1242 if (ProfileInterpreter) { 1243 // We have decided to profile this method in the interpreter. 1244 __ bind(profile_method); 1245 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1246 __ set_method_data_pointer_for_bcp(); 1247 __ b(profile_method_continue); 1248 } 1249 1250 if (inc_counter) { 1251 // Handle invocation counter overflow. 1252 __ bind(invocation_counter_overflow); 1253 generate_counter_overflow(profile_method_continue); 1254 } 1255 return entry; 1256 } 1257 1258 // These should never be compiled since the interpreter will prefer 1259 // the compiled version to the intrinsic version. 1260 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1261 return !math_entry_available(method_kind(m)); 1262 } 1263 1264 // How much stack a method activation needs in stack slots. 1265 // We must calc this exactly like in generate_fixed_frame. 1266 // Note: This returns the conservative size assuming maximum alignment. 1267 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1268 const int max_alignment_size = 2; 1269 const int abi_scratch = frame::abi_reg_args_size; 1270 return method->max_locals() + method->max_stack() + 1271 frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch; 1272 } 1273 1274 // Returns number of stackElementWords needed for the interpreter frame with the 1275 // given sections. 1276 // This overestimates the stack by one slot in case of alignments. 1277 int AbstractInterpreter::size_activation(int max_stack, 1278 int temps, 1279 int extra_args, 1280 int monitors, 1281 int callee_params, 1282 int callee_locals, 1283 bool is_top_frame) { 1284 // Note: This calculation must exactly parallel the frame setup 1285 // in InterpreterGenerator::generate_fixed_frame. 1286 assert(Interpreter::stackElementWords == 1, "sanity"); 1287 const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize; 1288 const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : 1289 (frame::abi_minframe_size / Interpreter::stackElementSize); 1290 const int size = 1291 max_stack + 1292 (callee_locals - callee_params) + 1293 monitors * frame::interpreter_frame_monitor_size() + 1294 max_alignment_space + 1295 abi_scratch + 1296 frame::ijava_state_size / Interpreter::stackElementSize; 1297 1298 // Fixed size of an interpreter frame, align to 16-byte. 1299 return (size & -2); 1300 } 1301 1302 // Fills a sceletal interpreter frame generated during deoptimizations. 1303 // 1304 // Parameters: 1305 // 1306 // interpreter_frame != NULL: 1307 // set up the method, locals, and monitors. 1308 // The frame interpreter_frame, if not NULL, is guaranteed to be the 1309 // right size, as determined by a previous call to this method. 1310 // It is also guaranteed to be walkable even though it is in a skeletal state 1311 // 1312 // is_top_frame == true: 1313 // We're processing the *oldest* interpreter frame! 1314 // 1315 // pop_frame_extra_args: 1316 // If this is != 0 we are returning to a deoptimized frame by popping 1317 // off the callee frame. We want to re-execute the call that called the 1318 // callee interpreted, but since the return to the interpreter would pop 1319 // the arguments off advance the esp by dummy popframe_extra_args slots. 1320 // Popping off those will establish the stack layout as it was before the call. 1321 // 1322 void AbstractInterpreter::layout_activation(Method* method, 1323 int tempcount, 1324 int popframe_extra_args, 1325 int moncount, 1326 int caller_actual_parameters, 1327 int callee_param_count, 1328 int callee_locals_count, 1329 frame* caller, 1330 frame* interpreter_frame, 1331 bool is_top_frame, 1332 bool is_bottom_frame) { 1333 1334 const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : 1335 (frame::abi_minframe_size / Interpreter::stackElementSize); 1336 1337 intptr_t* locals_base = (caller->is_interpreted_frame()) ? 1338 caller->interpreter_frame_esp() + caller_actual_parameters : 1339 caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ; 1340 1341 intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ; 1342 intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size()); 1343 intptr_t* esp_base = monitor - 1; 1344 intptr_t* esp = esp_base - tempcount - popframe_extra_args; 1345 intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes); 1346 intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize; 1347 intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize; 1348 1349 interpreter_frame->interpreter_frame_set_method(method); 1350 interpreter_frame->interpreter_frame_set_locals(locals_base); 1351 interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache()); 1352 interpreter_frame->interpreter_frame_set_esp(esp); 1353 interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor); 1354 interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp); 1355 if (!is_bottom_frame) { 1356 interpreter_frame->interpreter_frame_set_sender_sp(sender_sp); 1357 } 1358 } 1359 1360 // ============================================================================= 1361 // Exceptions 1362 1363 void TemplateInterpreterGenerator::generate_throw_exception() { 1364 Register Rexception = R17_tos, 1365 Rcontinuation = R3_RET; 1366 1367 // -------------------------------------------------------------------------- 1368 // Entry point if an method returns with a pending exception (rethrow). 1369 Interpreter::_rethrow_exception_entry = __ pc(); 1370 { 1371 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 1372 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 1373 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 1374 1375 // Compiled code destroys templateTableBase, reload. 1376 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1377 } 1378 1379 // Entry point if a interpreted method throws an exception (throw). 1380 Interpreter::_throw_exception_entry = __ pc(); 1381 { 1382 __ mr(Rexception, R3_RET); 1383 1384 __ verify_thread(); 1385 __ verify_oop(Rexception); 1386 1387 // Expression stack must be empty before entering the VM in case of an exception. 1388 __ empty_expression_stack(); 1389 // Find exception handler address and preserve exception oop. 1390 // Call C routine to find handler and jump to it. 1391 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); 1392 __ mtctr(Rcontinuation); 1393 // Push exception for exception handler bytecodes. 1394 __ push_ptr(Rexception); 1395 1396 // Jump to exception handler (may be remove activation entry!). 1397 __ bctr(); 1398 } 1399 1400 // If the exception is not handled in the current frame the frame is 1401 // removed and the exception is rethrown (i.e. exception 1402 // continuation is _rethrow_exception). 1403 // 1404 // Note: At this point the bci is still the bxi for the instruction 1405 // which caused the exception and the expression stack is 1406 // empty. Thus, for any VM calls at this point, GC will find a legal 1407 // oop map (with empty expression stack). 1408 1409 // In current activation 1410 // tos: exception 1411 // bcp: exception bcp 1412 1413 // -------------------------------------------------------------------------- 1414 // JVMTI PopFrame support 1415 1416 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1417 { 1418 // Set the popframe_processing bit in popframe_condition indicating that we are 1419 // currently handling popframe, so that call_VMs that may happen later do not 1420 // trigger new popframe handling cycles. 1421 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1422 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); 1423 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1424 1425 // Empty the expression stack, as in normal exception handling. 1426 __ empty_expression_stack(); 1427 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1428 1429 // Check to see whether we are returning to a deoptimized frame. 1430 // (The PopFrame call ensures that the caller of the popped frame is 1431 // either interpreted or compiled and deoptimizes it if compiled.) 1432 // Note that we don't compare the return PC against the 1433 // deoptimization blob's unpack entry because of the presence of 1434 // adapter frames in C2. 1435 Label Lcaller_not_deoptimized; 1436 Register return_pc = R3_ARG1; 1437 __ ld(return_pc, 0, R1_SP); 1438 __ ld(return_pc, _abi(lr), return_pc); 1439 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); 1440 __ cmpdi(CCR0, R3_RET, 0); 1441 __ bne(CCR0, Lcaller_not_deoptimized); 1442 1443 // The deoptimized case. 1444 // In this case, we can't call dispatch_next() after the frame is 1445 // popped, but instead must save the incoming arguments and restore 1446 // them after deoptimization has occurred. 1447 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); 1448 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); 1449 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); 1450 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); 1451 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); 1452 // Save these arguments. 1453 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); 1454 1455 // Inform deoptimization that it is responsible for restoring these arguments. 1456 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1457 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1458 1459 // Return from the current method into the deoptimization blob. Will eventually 1460 // end up in the deopt interpeter entry, deoptimization prepared everything that 1461 // we will reexecute the call that called us. 1462 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); 1463 __ mtlr(return_pc); 1464 __ blr(); 1465 1466 // The non-deoptimized case. 1467 __ bind(Lcaller_not_deoptimized); 1468 1469 // Clear the popframe condition flag. 1470 __ li(R0, 0); 1471 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1472 1473 // Get out of the current method and re-execute the call that called us. 1474 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 1475 __ restore_interpreter_state(R11_scratch1); 1476 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 1477 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 1478 if (ProfileInterpreter) { 1479 __ set_method_data_pointer_for_bcp(); 1480 __ ld(R11_scratch1, 0, R1_SP); 1481 __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); 1482 } 1483 #if INCLUDE_JVMTI 1484 Label L_done; 1485 1486 __ lbz(R11_scratch1, 0, R14_bcp); 1487 __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic); 1488 __ bne(CCR0, L_done); 1489 1490 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1491 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1492 __ ld(R4_ARG2, 0, R18_locals); 1493 __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false); 1494 __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); 1495 __ cmpdi(CCR0, R4_ARG2, 0); 1496 __ beq(CCR0, L_done); 1497 __ std(R4_ARG2, wordSize, R15_esp); 1498 __ bind(L_done); 1499 #endif // INCLUDE_JVMTI 1500 __ dispatch_next(vtos); 1501 } 1502 // end of JVMTI PopFrame support 1503 1504 // -------------------------------------------------------------------------- 1505 // Remove activation exception entry. 1506 // This is jumped to if an interpreted method can't handle an exception itself 1507 // (we come from the throw/rethrow exception entry above). We're going to call 1508 // into the VM to find the exception handler in the caller, pop the current 1509 // frame and return the handler we calculated. 1510 Interpreter::_remove_activation_entry = __ pc(); 1511 { 1512 __ pop_ptr(Rexception); 1513 __ verify_thread(); 1514 __ verify_oop(Rexception); 1515 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); 1516 1517 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); 1518 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); 1519 1520 __ get_vm_result(Rexception); 1521 1522 // We are done with this activation frame; find out where to go next. 1523 // The continuation point will be an exception handler, which expects 1524 // the following registers set up: 1525 // 1526 // RET: exception oop 1527 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. 1528 1529 Register return_pc = R31; // Needs to survive the runtime call. 1530 __ ld(return_pc, 0, R1_SP); 1531 __ ld(return_pc, _abi(lr), return_pc); 1532 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); 1533 1534 // Remove the current activation. 1535 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 1536 1537 __ mr(R4_ARG2, return_pc); 1538 __ mtlr(R3_RET); 1539 __ mr(R3_RET, Rexception); 1540 __ blr(); 1541 } 1542 } 1543 1544 // JVMTI ForceEarlyReturn support. 1545 // Returns "in the middle" of a method with a "fake" return value. 1546 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1547 1548 Register Rscratch1 = R11_scratch1, 1549 Rscratch2 = R12_scratch2; 1550 1551 address entry = __ pc(); 1552 __ empty_expression_stack(); 1553 1554 __ load_earlyret_value(state, Rscratch1); 1555 1556 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 1557 // Clear the earlyret state. 1558 __ li(R0, 0); 1559 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); 1560 1561 __ remove_activation(state, false, false); 1562 // Copied from TemplateTable::_return. 1563 // Restoration of lr done by remove_activation. 1564 switch (state) { 1565 case ltos: 1566 case btos: 1567 case ctos: 1568 case stos: 1569 case atos: 1570 case itos: __ mr(R3_RET, R17_tos); break; 1571 case ftos: 1572 case dtos: __ fmr(F1_RET, F15_ftos); break; 1573 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 1574 // to get visible before the reference to the object gets stored anywhere. 1575 __ membar(Assembler::StoreStore); break; 1576 default : ShouldNotReachHere(); 1577 } 1578 __ blr(); 1579 1580 return entry; 1581 } // end of ForceEarlyReturn support 1582 1583 //----------------------------------------------------------------------------- 1584 // Helper for vtos entry point generation 1585 1586 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1587 address& bep, 1588 address& cep, 1589 address& sep, 1590 address& aep, 1591 address& iep, 1592 address& lep, 1593 address& fep, 1594 address& dep, 1595 address& vep) { 1596 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1597 Label L; 1598 1599 aep = __ pc(); __ push_ptr(); __ b(L); 1600 fep = __ pc(); __ push_f(); __ b(L); 1601 dep = __ pc(); __ push_d(); __ b(L); 1602 lep = __ pc(); __ push_l(); __ b(L); 1603 __ align(32, 12, 24); // align L 1604 bep = cep = sep = 1605 iep = __ pc(); __ push_i(); 1606 vep = __ pc(); 1607 __ bind(L); 1608 generate_and_dispatch(t); 1609 } 1610 1611 //----------------------------------------------------------------------------- 1612 // Generation of individual instructions 1613 1614 // helpers for generate_and_dispatch 1615 1616 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1617 : TemplateInterpreterGenerator(code) { 1618 generate_all(); // Down here so it can be "virtual". 1619 } 1620 1621 //----------------------------------------------------------------------------- 1622 1623 // Non-product code 1624 #ifndef PRODUCT 1625 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1626 //__ flush_bundle(); 1627 address entry = __ pc(); 1628 1629 const char *bname = NULL; 1630 uint tsize = 0; 1631 switch(state) { 1632 case ftos: 1633 bname = "trace_code_ftos {"; 1634 tsize = 2; 1635 break; 1636 case btos: 1637 bname = "trace_code_btos {"; 1638 tsize = 2; 1639 break; 1640 case ctos: 1641 bname = "trace_code_ctos {"; 1642 tsize = 2; 1643 break; 1644 case stos: 1645 bname = "trace_code_stos {"; 1646 tsize = 2; 1647 break; 1648 case itos: 1649 bname = "trace_code_itos {"; 1650 tsize = 2; 1651 break; 1652 case ltos: 1653 bname = "trace_code_ltos {"; 1654 tsize = 3; 1655 break; 1656 case atos: 1657 bname = "trace_code_atos {"; 1658 tsize = 2; 1659 break; 1660 case vtos: 1661 // Note: In case of vtos, the topmost of stack value could be a int or doubl 1662 // In case of a double (2 slots) we won't see the 2nd stack value. 1663 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. 1664 bname = "trace_code_vtos {"; 1665 tsize = 2; 1666 1667 break; 1668 case dtos: 1669 bname = "trace_code_dtos {"; 1670 tsize = 3; 1671 break; 1672 default: 1673 ShouldNotReachHere(); 1674 } 1675 BLOCK_COMMENT(bname); 1676 1677 // Support short-cut for TraceBytecodesAt. 1678 // Don't call into the VM if we don't want to trace to speed up things. 1679 Label Lskip_vm_call; 1680 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 1681 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); 1682 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 1683 __ ld(R11_scratch1, offs1, R11_scratch1); 1684 __ lwa(R12_scratch2, offs2, R12_scratch2); 1685 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 1686 __ blt(CCR0, Lskip_vm_call); 1687 } 1688 1689 __ push(state); 1690 // Load 2 topmost expression stack values. 1691 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); 1692 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); 1693 __ mflr(R31); 1694 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); 1695 __ mtlr(R31); 1696 __ pop(state); 1697 1698 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 1699 __ bind(Lskip_vm_call); 1700 } 1701 __ blr(); 1702 BLOCK_COMMENT("} trace_code"); 1703 return entry; 1704 } 1705 1706 void TemplateInterpreterGenerator::count_bytecode() { 1707 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); 1708 __ lwz(R12_scratch2, offs, R11_scratch1); 1709 __ addi(R12_scratch2, R12_scratch2, 1); 1710 __ stw(R12_scratch2, offs, R11_scratch1); 1711 } 1712 1713 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1714 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); 1715 __ lwz(R12_scratch2, offs, R11_scratch1); 1716 __ addi(R12_scratch2, R12_scratch2, 1); 1717 __ stw(R12_scratch2, offs, R11_scratch1); 1718 } 1719 1720 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1721 const Register addr = R11_scratch1, 1722 tmp = R12_scratch2; 1723 // Get index, shift out old bytecode, bring in new bytecode, and store it. 1724 // _index = (_index >> log2_number_of_codes) | 1725 // (bytecode << log2_number_of_codes); 1726 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); 1727 __ lwz(tmp, offs1, addr); 1728 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); 1729 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1730 __ stw(tmp, offs1, addr); 1731 1732 // Bump bucket contents. 1733 // _counters[_index] ++; 1734 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); 1735 __ sldi(tmp, tmp, LogBytesPerInt); 1736 __ add(addr, tmp, addr); 1737 __ lwz(tmp, offs2, addr); 1738 __ addi(tmp, tmp, 1); 1739 __ stw(tmp, offs2, addr); 1740 } 1741 1742 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1743 // Call a little run-time stub to avoid blow-up for each bytecode. 1744 // The run-time runtime saves the right registers, depending on 1745 // the tosca in-state for the given template. 1746 1747 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1748 "entry must have been generated"); 1749 1750 // Note: we destroy LR here. 1751 __ bl(Interpreter::trace_code(t->tos_in())); 1752 } 1753 1754 void TemplateInterpreterGenerator::stop_interpreter_at() { 1755 Label L; 1756 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); 1757 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 1758 __ ld(R11_scratch1, offs1, R11_scratch1); 1759 __ lwa(R12_scratch2, offs2, R12_scratch2); 1760 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 1761 __ bne(CCR0, L); 1762 __ illtrap(); 1763 __ bind(L); 1764 } 1765 1766 #endif // !PRODUCT 1767 #endif // !CC_INTERP