1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #ifndef CC_INTERP 28 #include "asm/macroAssembler.inline.hpp" 29 #include "ci/ciMethod.hpp" 30 #include "interpreter/bytecodeHistogram.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterGenerator.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "prims/jvmtiThreadState.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/timer.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 #undef __ 53 #define __ _masm-> 54 55 #ifdef PRODUCT 56 #define BLOCK_COMMENT(str) /* nothing */ 57 #else 58 #define BLOCK_COMMENT(str) __ block_comment(str) 59 #endif 60 61 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 62 63 //----------------------------------------------------------------------------- 64 65 // Actually we should never reach here since we do stack overflow checks before pushing any frame. 66 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 67 address entry = __ pc(); 68 __ unimplemented("generate_StackOverflowError_handler"); 69 return entry; 70 } 71 72 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 73 address entry = __ pc(); 74 __ empty_expression_stack(); 75 __ load_const_optimized(R4_ARG2, (address) name); 76 // Index is in R17_tos. 77 __ mr(R5_ARG3, R17_tos); 78 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException)); 79 return entry; 80 } 81 82 #if 0 83 // Call special ClassCastException constructor taking object to cast 84 // and target class as arguments. 85 address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(const char* name) { 86 address entry = __ pc(); 87 88 // Target class oop is in register R6_ARG4 by convention! 89 90 // Expression stack must be empty before entering the VM if an 91 // exception happened. 92 __ empty_expression_stack(); 93 // Setup parameters. 94 // Thread will be loaded to R3_ARG1. 95 __ load_const_optimized(R4_ARG2, (address) name); 96 __ mr(R5_ARG3, R17_tos); 97 // R6_ARG4 contains specified class. 98 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose)); 99 #ifdef ASSERT 100 // Above call must not return here since exception pending. 101 __ should_not_reach_here(); 102 #endif 103 return entry; 104 } 105 #endif 106 107 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 108 address entry = __ pc(); 109 // Expression stack must be empty before entering the VM if an 110 // exception happened. 111 __ empty_expression_stack(); 112 113 // Load exception object. 114 // Thread will be loaded to R3_ARG1. 115 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); 116 #ifdef ASSERT 117 // Above call must not return here since exception pending. 118 __ should_not_reach_here(); 119 #endif 120 return entry; 121 } 122 123 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 124 address entry = __ pc(); 125 //__ untested("generate_exception_handler_common"); 126 Register Rexception = R17_tos; 127 128 // Expression stack must be empty before entering the VM if an exception happened. 129 __ empty_expression_stack(); 130 131 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); 132 if (pass_oop) { 133 __ mr(R5_ARG3, Rexception); 134 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false); 135 } else { 136 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); 137 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false); 138 } 139 140 // Throw exception. 141 __ mr(R3_ARG1, Rexception); 142 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); 143 __ mtctr(R11_scratch1); 144 __ bctr(); 145 146 return entry; 147 } 148 149 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 150 address entry = __ pc(); 151 __ unimplemented("generate_continuation_for"); 152 return entry; 153 } 154 155 // This entry is returned to when a call returns to the interpreter. 156 // When we arrive here, we expect that the callee stack frame is already popped. 157 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 158 address entry = __ pc(); 159 160 // Move the value out of the return register back to the TOS cache of current frame. 161 switch (state) { 162 case ltos: 163 case btos: 164 case ctos: 165 case stos: 166 case atos: 167 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache 168 case ftos: 169 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 170 case vtos: break; // Nothing to do, this was a void return. 171 default : ShouldNotReachHere(); 172 } 173 174 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 175 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 176 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 177 178 // Compiled code destroys templateTableBase, reload. 179 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 180 181 const Register cache = R11_scratch1; 182 const Register size = R12_scratch2; 183 __ get_cache_and_index_at_bcp(cache, 1, index_size); 184 185 // Big Endian (get least significant byte of 64 bit value): 186 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache); 187 __ sldi(size, size, Interpreter::logStackElementSize); 188 __ add(R15_esp, R15_esp, size); 189 __ dispatch_next(state, step); 190 return entry; 191 } 192 193 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 194 address entry = __ pc(); 195 // If state != vtos, we're returning from a native method, which put it's result 196 // into the result register. So move the value out of the return register back 197 // to the TOS cache of current frame. 198 199 switch (state) { 200 case ltos: 201 case btos: 202 case ctos: 203 case stos: 204 case atos: 205 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache 206 case ftos: 207 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 208 case vtos: break; // Nothing to do, this was a void return. 209 default : ShouldNotReachHere(); 210 } 211 212 // Load LcpoolCache @@@ should be already set! 213 __ get_constant_pool_cache(R27_constPoolCache); 214 215 // Handle a pending exception, fall through if none. 216 __ check_and_forward_exception(R11_scratch1, R12_scratch2); 217 218 // Start executing bytecodes. 219 __ dispatch_next(state, step); 220 221 return entry; 222 } 223 224 // A result handler converts the native result into java format. 225 // Use the shared code between c++ and template interpreter. 226 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 227 return AbstractInterpreterGenerator::generate_result_handler_for(type); 228 } 229 230 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 231 address entry = __ pc(); 232 233 __ push(state); 234 __ call_VM(noreg, runtime_entry); 235 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 236 237 return entry; 238 } 239 240 // Helpers for commoning out cases in the various type of method entries. 241 242 // Increment invocation count & check for overflow. 243 // 244 // Note: checking for negative value instead of overflow 245 // so we have a 'sticky' overflow test. 246 // 247 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 248 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. 249 Register Rscratch1 = R11_scratch1; 250 Register Rscratch2 = R12_scratch2; 251 Register R3_counters = R3_ARG1; 252 Label done; 253 254 if (TieredCompilation) { 255 const int increment = InvocationCounter::count_increment; 256 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 257 Label no_mdo; 258 if (ProfileInterpreter) { 259 const Register Rmdo = Rscratch1; 260 // If no method data exists, go to profile_continue. 261 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 262 __ cmpdi(CCR0, Rmdo, 0); 263 __ beq(CCR0, no_mdo); 264 265 // Increment backedge counter in the MDO. 266 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 267 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 268 __ addi(Rscratch2, Rscratch2, increment); 269 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 270 __ load_const_optimized(Rscratch1, mask, R0); 271 __ and_(Rscratch1, Rscratch2, Rscratch1); 272 __ bne(CCR0, done); 273 __ b(*overflow); 274 } 275 276 // Increment counter in MethodCounters*. 277 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 278 __ bind(no_mdo); 279 __ get_method_counters(R19_method, R3_counters, done); 280 __ lwz(Rscratch2, mo_bc_offs, R3_counters); 281 __ addi(Rscratch2, Rscratch2, increment); 282 __ stw(Rscratch2, mo_bc_offs, R3_counters); 283 __ load_const_optimized(Rscratch1, mask, R0); 284 __ and_(Rscratch1, Rscratch2, Rscratch1); 285 __ beq(CCR0, *overflow); 286 287 __ bind(done); 288 289 } else { 290 291 // Update standard invocation counters. 292 Register Rsum_ivc_bec = R4_ARG2; 293 __ get_method_counters(R19_method, R3_counters, done); 294 __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2); 295 // Increment interpreter invocation counter. 296 if (ProfileInterpreter) { // %%% Merge this into methodDataOop. 297 __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 298 __ addi(R12_scratch2, R12_scratch2, 1); 299 __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 300 } 301 // Check if we must create a method data obj. 302 if (ProfileInterpreter && profile_method != NULL) { 303 const Register profile_limit = Rscratch1; 304 int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true); 305 __ lwz(profile_limit, pl_offs, profile_limit); 306 // Test to see if we should create a method data oop. 307 __ cmpw(CCR0, Rsum_ivc_bec, profile_limit); 308 __ blt(CCR0, *profile_method_continue); 309 // If no method data exists, go to profile_method. 310 __ test_method_data_pointer(*profile_method); 311 } 312 // Finally check for counter overflow. 313 if (overflow) { 314 const Register invocation_limit = Rscratch1; 315 int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true); 316 __ lwz(invocation_limit, il_offs, invocation_limit); 317 assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size"); 318 __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit); 319 __ bge(CCR0, *overflow); 320 } 321 322 __ bind(done); 323 } 324 } 325 326 // Generate code to initiate compilation on invocation counter overflow. 327 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { 328 // Generate code to initiate compilation on the counter overflow. 329 330 // InterpreterRuntime::frequency_counter_overflow takes one arguments, 331 // which indicates if the counter overflow occurs at a backwards branch (NULL bcp) 332 // We pass zero in. 333 // The call returns the address of the verified entry point for the method or NULL 334 // if the compilation did not complete (either went background or bailed out). 335 // 336 // Unlike the C++ interpreter above: Check exceptions! 337 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed 338 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. 339 340 __ li(R4_ARG2, 0); 341 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 342 343 // Returns verified_entry_point or NULL. 344 // We ignore it in any case. 345 __ b(continue_entry); 346 } 347 348 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { 349 assert_different_registers(Rmem_frame_size, Rscratch1); 350 __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1); 351 } 352 353 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) { 354 __ unlock_object(R26_monitor, check_exceptions); 355 } 356 357 // Lock the current method, interpreter register window must be set up! 358 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { 359 const Register Robj_to_lock = Rscratch2; 360 361 { 362 if (!flags_preloaded) { 363 __ lwz(Rflags, method_(access_flags)); 364 } 365 366 #ifdef ASSERT 367 // Check if methods needs synchronization. 368 { 369 Label Lok; 370 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); 371 __ btrue(CCR0,Lok); 372 __ stop("method doesn't need synchronization"); 373 __ bind(Lok); 374 } 375 #endif // ASSERT 376 } 377 378 // Get synchronization object to Rscratch2. 379 { 380 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 381 Label Lstatic; 382 Label Ldone; 383 384 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT); 385 __ btrue(CCR0, Lstatic); 386 387 // Non-static case: load receiver obj from stack and we're done. 388 __ ld(Robj_to_lock, R18_locals); 389 __ b(Ldone); 390 391 __ bind(Lstatic); // Static case: Lock the java mirror 392 __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method); 393 __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock); 394 __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock); 395 __ ld(Robj_to_lock, mirror_offset, Robj_to_lock); 396 397 __ bind(Ldone); 398 __ verify_oop(Robj_to_lock); 399 } 400 401 // Got the oop to lock => execute! 402 __ add_monitor_to_stack(true, Rscratch1, R0); 403 404 __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 405 __ lock_object(R26_monitor, Robj_to_lock); 406 } 407 408 // Generate a fixed interpreter frame for pure interpreter 409 // and I2N native transition frames. 410 // 411 // Before (stack grows downwards): 412 // 413 // | ... | 414 // |------------- | 415 // | java arg0 | 416 // | ... | 417 // | java argn | 418 // | | <- R15_esp 419 // | | 420 // |--------------| 421 // | abi_112 | 422 // | | <- R1_SP 423 // |==============| 424 // 425 // 426 // After: 427 // 428 // | ... | 429 // | java arg0 |<- R18_locals 430 // | ... | 431 // | java argn | 432 // |--------------| 433 // | | 434 // | java locals | 435 // | | 436 // |--------------| 437 // | abi_48 | 438 // |==============| 439 // | | 440 // | istate | 441 // | | 442 // |--------------| 443 // | monitor |<- R26_monitor 444 // |--------------| 445 // | |<- R15_esp 446 // | expression | 447 // | stack | 448 // | | 449 // |--------------| 450 // | | 451 // | abi_112 |<- R1_SP 452 // |==============| 453 // 454 // The top most frame needs an abi space of 112 bytes. This space is needed, 455 // since we call to c. The c function may spill their arguments to the caller 456 // frame. When we call to java, we don't need these spill slots. In order to save 457 // space on the stack, we resize the caller. However, java local reside in 458 // the caller frame and the frame has to be increased. The frame_size for the 459 // current frame was calculated based on max_stack as size for the expression 460 // stack. At the call, just a part of the expression stack might be used. 461 // We don't want to waste this space and cut the frame back accordingly. 462 // The resulting amount for resizing is calculated as follows: 463 // resize = (number_of_locals - number_of_arguments) * slot_size 464 // + (R1_SP - R15_esp) + 48 465 // 466 // The size for the callee frame is calculated: 467 // framesize = 112 + max_stack + monitor + state_size 468 // 469 // maxstack: Max number of slots on the expression stack, loaded from the method. 470 // monitor: We statically reserve room for one monitor object. 471 // state_size: We save the current state of the interpreter to this area. 472 // 473 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { 474 Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. 475 top_frame_size = R7_ARG5, 476 Rconst_method = R8_ARG6; 477 478 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size); 479 480 __ ld(Rconst_method, method_(const)); 481 __ lhz(Rsize_of_parameters /* number of params */, 482 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); 483 if (native_call) { 484 // If we're calling a native method, we reserve space for the worst-case signature 485 // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2). 486 // We add two slots to the parameter_count, one for the jni 487 // environment and one for a possible native mirror. 488 Label skip_native_calculate_max_stack; 489 __ addi(top_frame_size, Rsize_of_parameters, 2); 490 __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters); 491 __ bge(CCR0, skip_native_calculate_max_stack); 492 __ li(top_frame_size, Argument::n_register_parameters); 493 __ bind(skip_native_calculate_max_stack); 494 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 495 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 496 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 497 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. 498 } else { 499 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); 500 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 501 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); 502 __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); 503 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 504 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 505 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 506 __ add(parent_frame_resize, parent_frame_resize, R11_scratch1); 507 } 508 509 // Compute top frame size. 510 __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size); 511 512 // Cut back area between esp and max_stack. 513 __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize); 514 515 __ round_to(top_frame_size, frame::alignment_in_bytes); 516 __ round_to(parent_frame_resize, frame::alignment_in_bytes); 517 // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. 518 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. 519 520 { 521 // -------------------------------------------------------------------------- 522 // Stack overflow check 523 524 Label cont; 525 __ add(R11_scratch1, parent_frame_resize, top_frame_size); 526 generate_stack_overflow_check(R11_scratch1, R12_scratch2); 527 } 528 529 // Set up interpreter state registers. 530 531 __ add(R18_locals, R15_esp, Rsize_of_parameters); 532 __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method); 533 __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache); 534 535 // Set method data pointer. 536 if (ProfileInterpreter) { 537 Label zero_continue; 538 __ ld(R28_mdx, method_(method_data)); 539 __ cmpdi(CCR0, R28_mdx, 0); 540 __ beq(CCR0, zero_continue); 541 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 542 __ bind(zero_continue); 543 } 544 545 if (native_call) { 546 __ li(R14_bcp, 0); // Must initialize. 547 } else { 548 __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method); 549 } 550 551 // Resize parent frame. 552 __ mflr(R12_scratch2); 553 __ neg(parent_frame_resize, parent_frame_resize); 554 __ resize_frame(parent_frame_resize, R11_scratch1); 555 __ std(R12_scratch2, _abi(lr), R1_SP); 556 557 __ addi(R26_monitor, R1_SP, - frame::ijava_state_size); 558 __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize); 559 560 // Store values. 561 // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls 562 // in InterpreterMacroAssembler::call_from_interpreter. 563 __ std(R19_method, _ijava_state_neg(method), R1_SP); 564 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); 565 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); 566 __ std(R18_locals, _ijava_state_neg(locals), R1_SP); 567 568 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only 569 // be found in the frame after save_interpreter_state is done. This is always true 570 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, 571 // because e.g. frame::interpreter_frame_bcp() will not access the correct value 572 // (Enhanced Stack Trace). 573 // The signal handler does not save the interpreter state into the frame. 574 __ li(R0, 0); 575 #ifdef ASSERT 576 // Fill remaining slots with constants. 577 __ load_const_optimized(R11_scratch1, 0x5afe); 578 __ load_const_optimized(R12_scratch2, 0xdead); 579 #endif 580 // We have to initialize some frame slots for native calls (accessed by GC). 581 if (native_call) { 582 __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP); 583 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); 584 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } 585 } 586 #ifdef ASSERT 587 else { 588 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); 589 __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP); 590 __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP); 591 } 592 __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP); 593 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); 594 __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP); 595 __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP); 596 #endif 597 __ subf(R12_scratch2, top_frame_size, R1_SP); 598 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); 599 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); 600 601 // Push top frame. 602 __ push_frame(top_frame_size, R11_scratch1); 603 } 604 605 // End of helpers 606 607 // ============================================================================ 608 // Various method entries 609 // 610 611 // Empty method, generate a very fast return. We must skip this entry if 612 // someone's debugging, indicated by the flag 613 // "interp_mode" in the Thread obj. 614 // Note: empty methods are generated mostly methods that do assertions, which are 615 // disabled in the "java opt build". 616 address TemplateInterpreterGenerator::generate_empty_entry(void) { 617 if (!UseFastEmptyMethods) { 618 NOT_PRODUCT(__ should_not_reach_here();) 619 return Interpreter::entry_for_kind(Interpreter::zerolocals); 620 } 621 622 Label Lslow_path; 623 const Register Rjvmti_mode = R11_scratch1; 624 address entry = __ pc(); 625 626 __ lwz(Rjvmti_mode, thread_(interp_only_mode)); 627 __ cmpwi(CCR0, Rjvmti_mode, 0); 628 __ bne(CCR0, Lslow_path); // jvmti_mode!=0 629 630 // Noone's debuggin: Simply return. 631 // Pop c2i arguments (if any) off when we return. 632 #ifdef ASSERT 633 __ ld(R9_ARG7, 0, R1_SP); 634 __ ld(R10_ARG8, 0, R21_sender_SP); 635 __ cmpd(CCR0, R9_ARG7, R10_ARG8); 636 __ asm_assert_eq("backlink", 0x545); 637 #endif // ASSERT 638 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. 639 640 // And we're done. 641 __ blr(); 642 643 __ bind(Lslow_path); 644 __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); 645 __ flush(); 646 647 return entry; 648 } 649 650 // Support abs and sqrt like in compiler. 651 // For others we can use a normal (native) entry. 652 653 inline bool math_entry_available(AbstractInterpreter::MethodKind kind) { 654 // Provide math entry with debugging on demand. 655 // Note: Debugging changes which code will get executed: 656 // Debugging or disabled InlineIntrinsics: java method will get interpreted and performs a native call. 657 // Not debugging and enabled InlineIntrinics: processor instruction will get used. 658 // Result might differ slightly due to rounding etc. 659 if (!InlineIntrinsics && (!FLAG_IS_ERGO(InlineIntrinsics))) return false; // Generate a vanilla entry. 660 661 return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) || 662 (kind==Interpreter::java_lang_math_abs)); 663 } 664 665 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 666 if (!math_entry_available(kind)) { 667 NOT_PRODUCT(__ should_not_reach_here();) 668 return Interpreter::entry_for_kind(Interpreter::zerolocals); 669 } 670 671 Label Lslow_path; 672 const Register Rjvmti_mode = R11_scratch1; 673 address entry = __ pc(); 674 675 // Provide math entry with debugging on demand. 676 __ lwz(Rjvmti_mode, thread_(interp_only_mode)); 677 __ cmpwi(CCR0, Rjvmti_mode, 0); 678 __ bne(CCR0, Lslow_path); // jvmti_mode!=0 679 680 __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp); 681 682 // Pop c2i arguments (if any) off when we return. 683 #ifdef ASSERT 684 __ ld(R9_ARG7, 0, R1_SP); 685 __ ld(R10_ARG8, 0, R21_sender_SP); 686 __ cmpd(CCR0, R9_ARG7, R10_ARG8); 687 __ asm_assert_eq("backlink", 0x545); 688 #endif // ASSERT 689 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. 690 691 if (kind == Interpreter::java_lang_math_sqrt) { 692 __ fsqrt(F1_RET, F1_RET); 693 } else if (kind == Interpreter::java_lang_math_abs) { 694 __ fabs(F1_RET, F1_RET); 695 } else { 696 ShouldNotReachHere(); 697 } 698 699 // And we're done. 700 __ blr(); 701 702 // Provide slow path for JVMTI case. 703 __ bind(Lslow_path); 704 __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R12_scratch2); 705 __ flush(); 706 707 return entry; 708 } 709 710 // Interpreter stub for calling a native method. (asm interpreter) 711 // This sets up a somewhat different looking stack for calling the 712 // native method than the typical interpreter frame setup. 713 // 714 // On entry: 715 // R19_method - method 716 // R16_thread - JavaThread* 717 // R15_esp - intptr_t* sender tos 718 // 719 // abstract stack (grows up) 720 // [ IJava (caller of JNI callee) ] <-- ASP 721 // ... 722 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 723 724 address entry = __ pc(); 725 726 const bool inc_counter = UseCompiler || CountCompiledCalls; 727 728 // ----------------------------------------------------------------------------- 729 // Allocate a new frame that represents the native callee (i2n frame). 730 // This is not a full-blown interpreter frame, but in particular, the 731 // following registers are valid after this: 732 // - R19_method 733 // - R18_local (points to start of argumuments to native function) 734 // 735 // abstract stack (grows up) 736 // [ IJava (caller of JNI callee) ] <-- ASP 737 // ... 738 739 const Register signature_handler_fd = R11_scratch1; 740 const Register pending_exception = R0; 741 const Register result_handler_addr = R31; 742 const Register native_method_fd = R11_scratch1; 743 const Register access_flags = R22_tmp2; 744 const Register active_handles = R11_scratch1; // R26_monitor saved to state. 745 const Register sync_state = R12_scratch2; 746 const Register sync_state_addr = sync_state; // Address is dead after use. 747 const Register suspend_flags = R11_scratch1; 748 749 //============================================================================= 750 // Allocate new frame and initialize interpreter state. 751 752 Label exception_return; 753 Label exception_return_sync_check; 754 Label stack_overflow_return; 755 756 // Generate new interpreter state and jump to stack_overflow_return in case of 757 // a stack overflow. 758 //generate_compute_interpreter_state(stack_overflow_return); 759 760 Register size_of_parameters = R22_tmp2; 761 762 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); 763 764 //============================================================================= 765 // Increment invocation counter. On overflow, entry to JNI method 766 // will be compiled. 767 Label invocation_counter_overflow, continue_after_compile; 768 if (inc_counter) { 769 if (synchronized) { 770 // Since at this point in the method invocation the exception handler 771 // would try to exit the monitor of synchronized methods which hasn't 772 // been entered yet, we set the thread local variable 773 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 774 // runtime, exception handling i.e. unlock_if_synchronized_method will 775 // check this thread local flag. 776 // This flag has two effects, one is to force an unwind in the topmost 777 // interpreter frame and not perform an unlock while doing so. 778 __ li(R0, 1); 779 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 780 } 781 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 782 783 __ BIND(continue_after_compile); 784 // Reset the _do_not_unlock_if_synchronized flag. 785 if (synchronized) { 786 __ li(R0, 0); 787 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 788 } 789 } 790 791 // access_flags = method->access_flags(); 792 // Load access flags. 793 assert(access_flags->is_nonvolatile(), 794 "access_flags must be in a non-volatile register"); 795 // Type check. 796 assert(4 == sizeof(AccessFlags), "unexpected field size"); 797 __ lwz(access_flags, method_(access_flags)); 798 799 // We don't want to reload R19_method and access_flags after calls 800 // to some helper functions. 801 assert(R19_method->is_nonvolatile(), 802 "R19_method must be a non-volatile register"); 803 804 // Check for synchronized methods. Must happen AFTER invocation counter 805 // check, so method is not locked if counter overflows. 806 807 if (synchronized) { 808 lock_method(access_flags, R11_scratch1, R12_scratch2, true); 809 810 // Update monitor in state. 811 __ ld(R11_scratch1, 0, R1_SP); 812 __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1); 813 } 814 815 // jvmti/jvmpi support 816 __ notify_method_entry(); 817 818 //============================================================================= 819 // Get and call the signature handler. 820 821 __ ld(signature_handler_fd, method_(signature_handler)); 822 Label call_signature_handler; 823 824 __ cmpdi(CCR0, signature_handler_fd, 0); 825 __ bne(CCR0, call_signature_handler); 826 827 // Method has never been called. Either generate a specialized 828 // handler or point to the slow one. 829 // 830 // Pass parameter 'false' to avoid exception check in call_VM. 831 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); 832 833 // Check for an exception while looking up the target method. If we 834 // incurred one, bail. 835 __ ld(pending_exception, thread_(pending_exception)); 836 __ cmpdi(CCR0, pending_exception, 0); 837 __ bne(CCR0, exception_return_sync_check); // Has pending exception. 838 839 // Reload signature handler, it may have been created/assigned in the meanwhile. 840 __ ld(signature_handler_fd, method_(signature_handler)); 841 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). 842 843 __ BIND(call_signature_handler); 844 845 // Before we call the signature handler we push a new frame to 846 // protect the interpreter frame volatile registers when we return 847 // from jni but before we can get back to Java. 848 849 // First set the frame anchor while the SP/FP registers are 850 // convenient and the slow signature handler can use this same frame 851 // anchor. 852 853 // We have a TOP_IJAVA_FRAME here, which belongs to us. 854 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 855 856 // Now the interpreter frame (and its call chain) have been 857 // invalidated and flushed. We are now protected against eager 858 // being enabled in native code. Even if it goes eager the 859 // registers will be reloaded as clean and we will invalidate after 860 // the call so no spurious flush should be possible. 861 862 // Call signature handler and pass locals address. 863 // 864 // Our signature handlers copy required arguments to the C stack 865 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. 866 __ mr(R3_ARG1, R18_locals); 867 __ ld(signature_handler_fd, 0, signature_handler_fd); 868 869 __ call_stub(signature_handler_fd); 870 871 // Remove the register parameter varargs slots we allocated in 872 // compute_interpreter_state. SP+16 ends up pointing to the ABI 873 // outgoing argument area. 874 // 875 // Not needed on PPC64. 876 //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord); 877 878 assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); 879 // Save across call to native method. 880 __ mr(result_handler_addr, R3_RET); 881 882 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. 883 884 // Set up fixed parameters and call the native method. 885 // If the method is static, get mirror into R4_ARG2. 886 { 887 Label method_is_not_static; 888 // Access_flags is non-volatile and still, no need to restore it. 889 890 // Restore access flags. 891 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); 892 __ bfalse(CCR0, method_is_not_static); 893 894 // constants = method->constants(); 895 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 896 __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1); 897 // pool_holder = method->constants()->pool_holder(); 898 __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), 899 R11_scratch1/*constants*/); 900 901 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 902 903 // mirror = pool_holder->klass_part()->java_mirror(); 904 __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/); 905 // state->_native_mirror = mirror; 906 907 __ ld(R11_scratch1, 0, R1_SP); 908 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); 909 // R4_ARG2 = &state->_oop_temp; 910 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); 911 __ BIND(method_is_not_static); 912 } 913 914 // At this point, arguments have been copied off the stack into 915 // their JNI positions. Oops are boxed in-place on the stack, with 916 // handles copied to arguments. The result handler address is in a 917 // register. 918 919 // Pass JNIEnv address as first parameter. 920 __ addir(R3_ARG1, thread_(jni_environment)); 921 922 // Load the native_method entry before we change the thread state. 923 __ ld(native_method_fd, method_(native_function)); 924 925 //============================================================================= 926 // Transition from _thread_in_Java to _thread_in_native. As soon as 927 // we make this change the safepoint code needs to be certain that 928 // the last Java frame we established is good. The pc in that frame 929 // just needs to be near here not an actual return address. 930 931 // We use release_store_fence to update values like the thread state, where 932 // we don't want the current thread to continue until all our prior memory 933 // accesses (including the new thread state) are visible to other threads. 934 __ li(R0, _thread_in_native); 935 __ release(); 936 937 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 938 __ stw(R0, thread_(thread_state)); 939 940 if (UseMembar) { 941 __ fence(); 942 } 943 944 //============================================================================= 945 // Call the native method. Argument registers must not have been 946 // overwritten since "__ call_stub(signature_handler);" (except for 947 // ARG1 and ARG2 for static methods). 948 __ call_c(native_method_fd); 949 950 __ li(R0, 0); 951 __ ld(R11_scratch1, 0, R1_SP); 952 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 953 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 954 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset 955 956 // Note: C++ interpreter needs the following here: 957 // The frame_manager_lr field, which we use for setting the last 958 // java frame, gets overwritten by the signature handler. Restore 959 // it now. 960 //__ get_PC_trash_LR(R11_scratch1); 961 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 962 963 // Because of GC R19_method may no longer be valid. 964 965 // Block, if necessary, before resuming in _thread_in_Java state. 966 // In order for GC to work, don't clear the last_Java_sp until after 967 // blocking. 968 969 //============================================================================= 970 // Switch thread to "native transition" state before reading the 971 // synchronization state. This additional state is necessary 972 // because reading and testing the synchronization state is not 973 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, 974 // in _thread_in_native state, loads _not_synchronized and is 975 // preempted. VM thread changes sync state to synchronizing and 976 // suspends threads for GC. Thread A is resumed to finish this 977 // native method, but doesn't block here since it didn't see any 978 // synchronization in progress, and escapes. 979 980 // We use release_store_fence to update values like the thread state, where 981 // we don't want the current thread to continue until all our prior memory 982 // accesses (including the new thread state) are visible to other threads. 983 __ li(R0/*thread_state*/, _thread_in_native_trans); 984 __ release(); 985 __ stw(R0/*thread_state*/, thread_(thread_state)); 986 if (UseMembar) { 987 __ fence(); 988 } 989 // Write serialization page so that the VM thread can do a pseudo remote 990 // membar. We use the current thread pointer to calculate a thread 991 // specific offset to write to within the page. This minimizes bus 992 // traffic due to cache line collision. 993 else { 994 __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2); 995 } 996 997 // Now before we return to java we must look for a current safepoint 998 // (a new safepoint can not start since we entered native_trans). 999 // We must check here because a current safepoint could be modifying 1000 // the callers registers right this moment. 1001 1002 // Acquire isn't strictly necessary here because of the fence, but 1003 // sync_state is declared to be volatile, so we do it anyway 1004 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 1005 int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true); 1006 1007 // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size"); 1008 __ lwz(sync_state, sync_state_offs, sync_state_addr); 1009 1010 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 1011 __ lwz(suspend_flags, thread_(suspend_flags)); 1012 1013 Label sync_check_done; 1014 Label do_safepoint; 1015 // No synchronization in progress nor yet synchronized. 1016 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); 1017 // Not suspended. 1018 __ cmpwi(CCR1, suspend_flags, 0); 1019 1020 __ bne(CCR0, do_safepoint); 1021 __ beq(CCR1, sync_check_done); 1022 __ bind(do_safepoint); 1023 __ isync(); 1024 // Block. We do the call directly and leave the current 1025 // last_Java_frame setup undisturbed. We must save any possible 1026 // native result across the call. No oop is present. 1027 1028 __ mr(R3_ARG1, R16_thread); 1029 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), 1030 relocInfo::none); 1031 1032 __ bind(sync_check_done); 1033 1034 //============================================================================= 1035 // <<<<<< Back in Interpreter Frame >>>>> 1036 1037 // We are in thread_in_native_trans here and back in the normal 1038 // interpreter frame. We don't have to do anything special about 1039 // safepoints and we can switch to Java mode anytime we are ready. 1040 1041 // Note: frame::interpreter_frame_result has a dependency on how the 1042 // method result is saved across the call to post_method_exit. For 1043 // native methods it assumes that the non-FPU/non-void result is 1044 // saved in _native_lresult and a FPU result in _native_fresult. If 1045 // this changes then the interpreter_frame_result implementation 1046 // will need to be updated too. 1047 1048 // On PPC64, we have stored the result directly after the native call. 1049 1050 //============================================================================= 1051 // Back in Java 1052 1053 // We use release_store_fence to update values like the thread state, where 1054 // we don't want the current thread to continue until all our prior memory 1055 // accesses (including the new thread state) are visible to other threads. 1056 __ li(R0/*thread_state*/, _thread_in_Java); 1057 __ release(); 1058 __ stw(R0/*thread_state*/, thread_(thread_state)); 1059 if (UseMembar) { 1060 __ fence(); 1061 } 1062 1063 __ reset_last_Java_frame(); 1064 1065 // Jvmdi/jvmpi support. Whether we've got an exception pending or 1066 // not, and whether unlocking throws an exception or not, we notify 1067 // on native method exit. If we do have an exception, we'll end up 1068 // in the caller's context to handle it, so if we don't do the 1069 // notify here, we'll drop it on the floor. 1070 __ notify_method_exit(true/*native method*/, 1071 ilgl /*illegal state (not used for native methods)*/, 1072 InterpreterMacroAssembler::NotifyJVMTI, 1073 false /*check_exceptions*/); 1074 1075 //============================================================================= 1076 // Handle exceptions 1077 1078 if (synchronized) { 1079 // Don't check for exceptions since we're still in the i2n frame. Do that 1080 // manually afterwards. 1081 unlock_method(false); 1082 } 1083 1084 // Reset active handles after returning from native. 1085 // thread->active_handles()->clear(); 1086 __ ld(active_handles, thread_(active_handles)); 1087 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 1088 __ li(R0, 0); 1089 __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles); 1090 1091 Label exception_return_sync_check_already_unlocked; 1092 __ ld(R0/*pending_exception*/, thread_(pending_exception)); 1093 __ cmpdi(CCR0, R0/*pending_exception*/, 0); 1094 __ bne(CCR0, exception_return_sync_check_already_unlocked); 1095 1096 //----------------------------------------------------------------------------- 1097 // No exception pending. 1098 1099 // Move native method result back into proper registers and return. 1100 // Invoke result handler (may unbox/promote). 1101 __ ld(R11_scratch1, 0, R1_SP); 1102 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1103 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1104 __ call_stub(result_handler_addr); 1105 1106 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1107 1108 // Must use the return pc which was loaded from the caller's frame 1109 // as the VM uses return-pc-patching for deoptimization. 1110 __ mtlr(R0); 1111 __ blr(); 1112 1113 //----------------------------------------------------------------------------- 1114 // An exception is pending. We call into the runtime only if the 1115 // caller was not interpreted. If it was interpreted the 1116 // interpreter will do the correct thing. If it isn't interpreted 1117 // (call stub/compiled code) we will change our return and continue. 1118 1119 __ BIND(exception_return_sync_check); 1120 1121 if (synchronized) { 1122 // Don't check for exceptions since we're still in the i2n frame. Do that 1123 // manually afterwards. 1124 unlock_method(false); 1125 } 1126 __ BIND(exception_return_sync_check_already_unlocked); 1127 1128 const Register return_pc = R31; 1129 1130 __ ld(return_pc, 0, R1_SP); 1131 __ ld(return_pc, _abi(lr), return_pc); 1132 1133 // Get the address of the exception handler. 1134 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1135 R16_thread, 1136 return_pc /* return pc */); 1137 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); 1138 1139 // Load the PC of the the exception handler into LR. 1140 __ mtlr(R3_RET); 1141 1142 // Load exception into R3_ARG1 and clear pending exception in thread. 1143 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); 1144 __ li(R4_ARG2, 0); 1145 __ std(R4_ARG2, thread_(pending_exception)); 1146 1147 // Load the original return pc into R4_ARG2. 1148 __ mr(R4_ARG2/*issuing_pc*/, return_pc); 1149 1150 // Return to exception handler. 1151 __ blr(); 1152 1153 //============================================================================= 1154 // Counter overflow. 1155 1156 if (inc_counter) { 1157 // Handle invocation counter overflow. 1158 __ bind(invocation_counter_overflow); 1159 1160 generate_counter_overflow(continue_after_compile); 1161 } 1162 1163 return entry; 1164 } 1165 1166 // Generic interpreted method entry to (asm) interpreter. 1167 // 1168 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1169 bool inc_counter = UseCompiler || CountCompiledCalls; 1170 address entry = __ pc(); 1171 // Generate the code to allocate the interpreter stack frame. 1172 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. 1173 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. 1174 1175 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); 1176 1177 #ifdef FAST_DISPATCH 1178 __ unimplemented("Fast dispatch in generate_normal_entry"); 1179 #if 0 1180 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1181 // Set bytecode dispatch table base. 1182 #endif 1183 #endif 1184 1185 // -------------------------------------------------------------------------- 1186 // Zero out non-parameter locals. 1187 // Note: *Always* zero out non-parameter locals as Sparc does. It's not 1188 // worth to ask the flag, just do it. 1189 Register Rslot_addr = R6_ARG4, 1190 Rnum = R7_ARG5; 1191 Label Lno_locals, Lzero_loop; 1192 1193 // Set up the zeroing loop. 1194 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); 1195 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); 1196 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); 1197 __ beq(CCR0, Lno_locals); 1198 __ li(R0, 0); 1199 __ mtctr(Rnum); 1200 1201 // The zero locals loop. 1202 __ bind(Lzero_loop); 1203 __ std(R0, 0, Rslot_addr); 1204 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); 1205 __ bdnz(Lzero_loop); 1206 1207 __ bind(Lno_locals); 1208 1209 // -------------------------------------------------------------------------- 1210 // Counter increment and overflow check. 1211 Label invocation_counter_overflow, 1212 profile_method, 1213 profile_method_continue; 1214 if (inc_counter || ProfileInterpreter) { 1215 1216 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; 1217 if (synchronized) { 1218 // Since at this point in the method invocation the exception handler 1219 // would try to exit the monitor of synchronized methods which hasn't 1220 // been entered yet, we set the thread local variable 1221 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1222 // runtime, exception handling i.e. unlock_if_synchronized_method will 1223 // check this thread local flag. 1224 // This flag has two effects, one is to force an unwind in the topmost 1225 // interpreter frame and not perform an unlock while doing so. 1226 __ li(R0, 1); 1227 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1228 } 1229 // Increment invocation counter and check for overflow. 1230 if (inc_counter) { 1231 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1232 } 1233 1234 __ bind(profile_method_continue); 1235 1236 // Reset the _do_not_unlock_if_synchronized flag. 1237 if (synchronized) { 1238 __ li(R0, 0); 1239 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1240 } 1241 } 1242 1243 // -------------------------------------------------------------------------- 1244 // Locking of synchronized methods. Must happen AFTER invocation_counter 1245 // check and stack overflow check, so method is not locked if overflows. 1246 if (synchronized) { 1247 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); 1248 } 1249 #ifdef ASSERT 1250 else { 1251 Label Lok; 1252 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1253 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); 1254 __ asm_assert_eq("method needs synchronization", 0x8521); 1255 __ bind(Lok); 1256 } 1257 #endif // ASSERT 1258 1259 __ verify_thread(); 1260 1261 // -------------------------------------------------------------------------- 1262 // JVMTI support 1263 __ notify_method_entry(); 1264 1265 // -------------------------------------------------------------------------- 1266 // Start executing instructions. 1267 __ dispatch_next(vtos); 1268 1269 // -------------------------------------------------------------------------- 1270 // Out of line counter overflow and MDO creation code. 1271 if (ProfileInterpreter) { 1272 // We have decided to profile this method in the interpreter. 1273 __ bind(profile_method); 1274 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1275 __ set_method_data_pointer_for_bcp(); 1276 __ b(profile_method_continue); 1277 } 1278 1279 if (inc_counter) { 1280 // Handle invocation counter overflow. 1281 __ bind(invocation_counter_overflow); 1282 generate_counter_overflow(profile_method_continue); 1283 } 1284 return entry; 1285 } 1286 1287 // ============================================================================= 1288 // Entry points 1289 1290 address AbstractInterpreterGenerator::generate_method_entry( 1291 AbstractInterpreter::MethodKind kind) { 1292 // Determine code generation flags. 1293 bool synchronized = false; 1294 address entry_point = NULL; 1295 1296 switch (kind) { 1297 case Interpreter::zerolocals : break; 1298 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1299 case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break; 1300 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break; 1301 case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break; 1302 case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break; 1303 case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; 1304 1305 case Interpreter::java_lang_math_sin : // fall thru 1306 case Interpreter::java_lang_math_cos : // fall thru 1307 case Interpreter::java_lang_math_tan : // fall thru 1308 case Interpreter::java_lang_math_abs : // fall thru 1309 case Interpreter::java_lang_math_log : // fall thru 1310 case Interpreter::java_lang_math_log10 : // fall thru 1311 case Interpreter::java_lang_math_sqrt : // fall thru 1312 case Interpreter::java_lang_math_pow : // fall thru 1313 case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; 1314 case Interpreter::java_lang_ref_reference_get 1315 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 1316 default : ShouldNotReachHere(); break; 1317 } 1318 1319 if (entry_point) { 1320 return entry_point; 1321 } 1322 1323 return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized); 1324 } 1325 1326 // These should never be compiled since the interpreter will prefer 1327 // the compiled version to the intrinsic version. 1328 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1329 return !math_entry_available(method_kind(m)); 1330 } 1331 1332 // How much stack a method activation needs in stack slots. 1333 // We must calc this exactly like in generate_fixed_frame. 1334 // Note: This returns the conservative size assuming maximum alignment. 1335 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1336 const int max_alignment_size = 2; 1337 const int abi_scratch = frame::abi_reg_args_size; 1338 return method->max_locals() + method->max_stack() + 1339 frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch; 1340 } 1341 1342 // Returns number of stackElementWords needed for the interpreter frame with the 1343 // given sections. 1344 // This overestimates the stack by one slot in case of alignments. 1345 int AbstractInterpreter::size_activation(int max_stack, 1346 int temps, 1347 int popframe_args, 1348 int monitors, 1349 int callee_params, 1350 int callee_locals, 1351 bool is_top_frame) { 1352 // Note: This calculation must exactly parallel the frame setup 1353 // in AbstractInterpreterGenerator::generate_method_entry. 1354 assert(Interpreter::stackElementWords == 1, "sanity"); 1355 const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize; 1356 const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : 1357 (frame::abi_minframe_size / Interpreter::stackElementSize); 1358 const int size = 1359 max_stack + 1360 (callee_locals - callee_params) + 1361 monitors * frame::interpreter_frame_monitor_size() + 1362 max_alignment_space + 1363 abi_scratch + 1364 frame::ijava_state_size / Interpreter::stackElementSize; 1365 1366 // Fixed size of an interpreter frame, align to 16-byte. 1367 return (size & -2); 1368 } 1369 1370 // Fills a sceletal interpreter frame generated during deoptimizations. 1371 // 1372 // Parameters: 1373 // 1374 // interpreter_frame != NULL: 1375 // set up the method, locals, and monitors. 1376 // The frame interpreter_frame, if not NULL, is guaranteed to be the 1377 // right size, as determined by a previous call to this method. 1378 // It is also guaranteed to be walkable even though it is in a skeletal state 1379 // 1380 // is_top_frame == true: 1381 // We're processing the *oldest* interpreter frame! 1382 // 1383 // pop_frame_extra_args: 1384 // If this is != 0 we are returning to a deoptimized frame by popping 1385 // off the callee frame. We want to re-execute the call that called the 1386 // callee interpreted, but since the return to the interpreter would pop 1387 // the arguments off advance the esp by dummy popframe_extra_args slots. 1388 // Popping off those will establish the stack layout as it was before the call. 1389 // 1390 void AbstractInterpreter::layout_activation(Method* method, 1391 int tempcount, 1392 int popframe_extra_args, 1393 int moncount, 1394 int caller_actual_parameters, 1395 int callee_param_count, 1396 int callee_locals_count, 1397 frame* caller, 1398 frame* interpreter_frame, 1399 bool is_top_frame, 1400 bool is_bottom_frame) { 1401 1402 const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : 1403 (frame::abi_minframe_size / Interpreter::stackElementSize); 1404 1405 intptr_t* locals_base = (caller->is_interpreted_frame()) ? 1406 caller->interpreter_frame_esp() + caller_actual_parameters : 1407 caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ; 1408 1409 intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ; 1410 intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size()); 1411 intptr_t* esp_base = monitor - 1; 1412 intptr_t* esp = esp_base - tempcount - popframe_extra_args; 1413 intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes); 1414 intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize; 1415 intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize; 1416 1417 interpreter_frame->interpreter_frame_set_method(method); 1418 interpreter_frame->interpreter_frame_set_locals(locals_base); 1419 interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache()); 1420 interpreter_frame->interpreter_frame_set_esp(esp); 1421 interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor); 1422 interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp); 1423 if (!is_bottom_frame) { 1424 interpreter_frame->interpreter_frame_set_sender_sp(sender_sp); 1425 } 1426 } 1427 1428 // ============================================================================= 1429 // Exceptions 1430 1431 void TemplateInterpreterGenerator::generate_throw_exception() { 1432 Register Rexception = R17_tos, 1433 Rcontinuation = R3_RET; 1434 1435 // -------------------------------------------------------------------------- 1436 // Entry point if an method returns with a pending exception (rethrow). 1437 Interpreter::_rethrow_exception_entry = __ pc(); 1438 { 1439 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 1440 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 1441 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 1442 1443 // Compiled code destroys templateTableBase, reload. 1444 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1445 } 1446 1447 // Entry point if a interpreted method throws an exception (throw). 1448 Interpreter::_throw_exception_entry = __ pc(); 1449 { 1450 __ mr(Rexception, R3_RET); 1451 1452 __ verify_thread(); 1453 __ verify_oop(Rexception); 1454 1455 // Expression stack must be empty before entering the VM in case of an exception. 1456 __ empty_expression_stack(); 1457 // Find exception handler address and preserve exception oop. 1458 // Call C routine to find handler and jump to it. 1459 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); 1460 __ mtctr(Rcontinuation); 1461 // Push exception for exception handler bytecodes. 1462 __ push_ptr(Rexception); 1463 1464 // Jump to exception handler (may be remove activation entry!). 1465 __ bctr(); 1466 } 1467 1468 // If the exception is not handled in the current frame the frame is 1469 // removed and the exception is rethrown (i.e. exception 1470 // continuation is _rethrow_exception). 1471 // 1472 // Note: At this point the bci is still the bxi for the instruction 1473 // which caused the exception and the expression stack is 1474 // empty. Thus, for any VM calls at this point, GC will find a legal 1475 // oop map (with empty expression stack). 1476 1477 // In current activation 1478 // tos: exception 1479 // bcp: exception bcp 1480 1481 // -------------------------------------------------------------------------- 1482 // JVMTI PopFrame support 1483 1484 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1485 { 1486 // Set the popframe_processing bit in popframe_condition indicating that we are 1487 // currently handling popframe, so that call_VMs that may happen later do not 1488 // trigger new popframe handling cycles. 1489 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1490 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); 1491 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1492 1493 // Empty the expression stack, as in normal exception handling. 1494 __ empty_expression_stack(); 1495 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1496 1497 // Check to see whether we are returning to a deoptimized frame. 1498 // (The PopFrame call ensures that the caller of the popped frame is 1499 // either interpreted or compiled and deoptimizes it if compiled.) 1500 // Note that we don't compare the return PC against the 1501 // deoptimization blob's unpack entry because of the presence of 1502 // adapter frames in C2. 1503 Label Lcaller_not_deoptimized; 1504 Register return_pc = R3_ARG1; 1505 __ ld(return_pc, 0, R1_SP); 1506 __ ld(return_pc, _abi(lr), return_pc); 1507 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); 1508 __ cmpdi(CCR0, R3_RET, 0); 1509 __ bne(CCR0, Lcaller_not_deoptimized); 1510 1511 // The deoptimized case. 1512 // In this case, we can't call dispatch_next() after the frame is 1513 // popped, but instead must save the incoming arguments and restore 1514 // them after deoptimization has occurred. 1515 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); 1516 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); 1517 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); 1518 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); 1519 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); 1520 // Save these arguments. 1521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); 1522 1523 // Inform deoptimization that it is responsible for restoring these arguments. 1524 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1525 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1526 1527 // Return from the current method into the deoptimization blob. Will eventually 1528 // end up in the deopt interpeter entry, deoptimization prepared everything that 1529 // we will reexecute the call that called us. 1530 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); 1531 __ mtlr(return_pc); 1532 __ blr(); 1533 1534 // The non-deoptimized case. 1535 __ bind(Lcaller_not_deoptimized); 1536 1537 // Clear the popframe condition flag. 1538 __ li(R0, 0); 1539 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1540 1541 // Get out of the current method and re-execute the call that called us. 1542 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ return_pc, R11_scratch1, R12_scratch2); 1543 __ restore_interpreter_state(R11_scratch1); 1544 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 1545 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 1546 __ mtlr(return_pc); 1547 if (ProfileInterpreter) { 1548 __ set_method_data_pointer_for_bcp(); 1549 } 1550 __ dispatch_next(vtos); 1551 } 1552 // end of JVMTI PopFrame support 1553 1554 // -------------------------------------------------------------------------- 1555 // Remove activation exception entry. 1556 // This is jumped to if an interpreted method can't handle an exception itself 1557 // (we come from the throw/rethrow exception entry above). We're going to call 1558 // into the VM to find the exception handler in the caller, pop the current 1559 // frame and return the handler we calculated. 1560 Interpreter::_remove_activation_entry = __ pc(); 1561 { 1562 __ pop_ptr(Rexception); 1563 __ verify_thread(); 1564 __ verify_oop(Rexception); 1565 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); 1566 1567 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); 1568 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); 1569 1570 __ get_vm_result(Rexception); 1571 1572 // We are done with this activation frame; find out where to go next. 1573 // The continuation point will be an exception handler, which expects 1574 // the following registers set up: 1575 // 1576 // RET: exception oop 1577 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. 1578 1579 Register return_pc = R31; // Needs to survive the runtime call. 1580 __ ld(return_pc, 0, R1_SP); 1581 __ ld(return_pc, _abi(lr), return_pc); 1582 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); 1583 1584 // Remove the current activation. 1585 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 1586 1587 __ mr(R4_ARG2, return_pc); 1588 __ mtlr(R3_RET); 1589 __ mr(R3_RET, Rexception); 1590 __ blr(); 1591 } 1592 } 1593 1594 // JVMTI ForceEarlyReturn support. 1595 // Returns "in the middle" of a method with a "fake" return value. 1596 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1597 1598 Register Rscratch1 = R11_scratch1, 1599 Rscratch2 = R12_scratch2; 1600 1601 address entry = __ pc(); 1602 __ empty_expression_stack(); 1603 1604 __ load_earlyret_value(state, Rscratch1); 1605 1606 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 1607 // Clear the earlyret state. 1608 __ li(R0, 0); 1609 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); 1610 1611 __ remove_activation(state, false, false); 1612 // Copied from TemplateTable::_return. 1613 // Restoration of lr done by remove_activation. 1614 switch (state) { 1615 case ltos: 1616 case btos: 1617 case ctos: 1618 case stos: 1619 case atos: 1620 case itos: __ mr(R3_RET, R17_tos); break; 1621 case ftos: 1622 case dtos: __ fmr(F1_RET, F15_ftos); break; 1623 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 1624 // to get visible before the reference to the object gets stored anywhere. 1625 __ membar(Assembler::StoreStore); break; 1626 default : ShouldNotReachHere(); 1627 } 1628 __ blr(); 1629 1630 return entry; 1631 } // end of ForceEarlyReturn support 1632 1633 //----------------------------------------------------------------------------- 1634 // Helper for vtos entry point generation 1635 1636 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1637 address& bep, 1638 address& cep, 1639 address& sep, 1640 address& aep, 1641 address& iep, 1642 address& lep, 1643 address& fep, 1644 address& dep, 1645 address& vep) { 1646 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1647 Label L; 1648 1649 aep = __ pc(); __ push_ptr(); __ b(L); 1650 fep = __ pc(); __ push_f(); __ b(L); 1651 dep = __ pc(); __ push_d(); __ b(L); 1652 lep = __ pc(); __ push_l(); __ b(L); 1653 __ align(32, 12, 24); // align L 1654 bep = cep = sep = 1655 iep = __ pc(); __ push_i(); 1656 vep = __ pc(); 1657 __ bind(L); 1658 generate_and_dispatch(t); 1659 } 1660 1661 //----------------------------------------------------------------------------- 1662 // Generation of individual instructions 1663 1664 // helpers for generate_and_dispatch 1665 1666 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1667 : TemplateInterpreterGenerator(code) { 1668 generate_all(); // Down here so it can be "virtual". 1669 } 1670 1671 //----------------------------------------------------------------------------- 1672 1673 // Non-product code 1674 #ifndef PRODUCT 1675 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1676 //__ flush_bundle(); 1677 address entry = __ pc(); 1678 1679 char *bname = NULL; 1680 uint tsize = 0; 1681 switch(state) { 1682 case ftos: 1683 bname = "trace_code_ftos {"; 1684 tsize = 2; 1685 break; 1686 case btos: 1687 bname = "trace_code_btos {"; 1688 tsize = 2; 1689 break; 1690 case ctos: 1691 bname = "trace_code_ctos {"; 1692 tsize = 2; 1693 break; 1694 case stos: 1695 bname = "trace_code_stos {"; 1696 tsize = 2; 1697 break; 1698 case itos: 1699 bname = "trace_code_itos {"; 1700 tsize = 2; 1701 break; 1702 case ltos: 1703 bname = "trace_code_ltos {"; 1704 tsize = 3; 1705 break; 1706 case atos: 1707 bname = "trace_code_atos {"; 1708 tsize = 2; 1709 break; 1710 case vtos: 1711 // Note: In case of vtos, the topmost of stack value could be a int or doubl 1712 // In case of a double (2 slots) we won't see the 2nd stack value. 1713 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. 1714 bname = "trace_code_vtos {"; 1715 tsize = 2; 1716 1717 break; 1718 case dtos: 1719 bname = "trace_code_dtos {"; 1720 tsize = 3; 1721 break; 1722 default: 1723 ShouldNotReachHere(); 1724 } 1725 BLOCK_COMMENT(bname); 1726 1727 // Support short-cut for TraceBytecodesAt. 1728 // Don't call into the VM if we don't want to trace to speed up things. 1729 Label Lskip_vm_call; 1730 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 1731 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); 1732 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 1733 __ ld(R11_scratch1, offs1, R11_scratch1); 1734 __ lwa(R12_scratch2, offs2, R12_scratch2); 1735 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 1736 __ blt(CCR0, Lskip_vm_call); 1737 } 1738 1739 __ push(state); 1740 // Load 2 topmost expression stack values. 1741 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); 1742 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); 1743 __ mflr(R31); 1744 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); 1745 __ mtlr(R31); 1746 __ pop(state); 1747 1748 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 1749 __ bind(Lskip_vm_call); 1750 } 1751 __ blr(); 1752 BLOCK_COMMENT("} trace_code"); 1753 return entry; 1754 } 1755 1756 void TemplateInterpreterGenerator::count_bytecode() { 1757 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); 1758 __ lwz(R12_scratch2, offs, R11_scratch1); 1759 __ addi(R12_scratch2, R12_scratch2, 1); 1760 __ stw(R12_scratch2, offs, R11_scratch1); 1761 } 1762 1763 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1764 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); 1765 __ lwz(R12_scratch2, offs, R11_scratch1); 1766 __ addi(R12_scratch2, R12_scratch2, 1); 1767 __ stw(R12_scratch2, offs, R11_scratch1); 1768 } 1769 1770 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1771 const Register addr = R11_scratch1, 1772 tmp = R12_scratch2; 1773 // Get index, shift out old bytecode, bring in new bytecode, and store it. 1774 // _index = (_index >> log2_number_of_codes) | 1775 // (bytecode << log2_number_of_codes); 1776 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); 1777 __ lwz(tmp, offs1, addr); 1778 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); 1779 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1780 __ stw(tmp, offs1, addr); 1781 1782 // Bump bucket contents. 1783 // _counters[_index] ++; 1784 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); 1785 __ sldi(tmp, tmp, LogBytesPerInt); 1786 __ add(addr, tmp, addr); 1787 __ lwz(tmp, offs2, addr); 1788 __ addi(tmp, tmp, 1); 1789 __ stw(tmp, offs2, addr); 1790 } 1791 1792 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1793 // Call a little run-time stub to avoid blow-up for each bytecode. 1794 // The run-time runtime saves the right registers, depending on 1795 // the tosca in-state for the given template. 1796 1797 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1798 "entry must have been generated"); 1799 1800 // Note: we destroy LR here. 1801 __ bl(Interpreter::trace_code(t->tos_in())); 1802 } 1803 1804 void TemplateInterpreterGenerator::stop_interpreter_at() { 1805 Label L; 1806 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); 1807 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 1808 __ ld(R11_scratch1, offs1, R11_scratch1); 1809 __ lwa(R12_scratch2, offs2, R12_scratch2); 1810 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 1811 __ bne(CCR0, L); 1812 __ illtrap(); 1813 __ bind(L); 1814 } 1815 1816 #endif // !PRODUCT 1817 #endif // !CC_INTERP