1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "gc/shared/barrierSetAssembler.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/templateInterpreterGenerator.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/methodData.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 // The sethi() instruction generates lots more instructions when shell 58 // stack limit is unlimited, so that's why this is much bigger. 59 int TemplateInterpreter::InterpreterCodeSize = 260 * K; 60 61 // Generation of Interpreter 62 // 63 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code. 64 65 66 #define __ _masm-> 67 68 69 //---------------------------------------------------------------------------------------------------- 70 71 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of 72 // O0, O1, O2 etc.. 73 // Doubles are passed in D0, D2, D4 74 // We store the signature of the first 16 arguments in the first argument 75 // slot because it will be overwritten prior to calling the native 76 // function, with the pointer to the JNIEnv. 77 // If LP64 there can be up to 16 floating point arguments in registers 78 // or 6 integer registers. 79 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 80 81 enum { 82 non_float = 0, 83 float_sig = 1, 84 double_sig = 2, 85 sig_mask = 3 86 }; 87 88 address entry = __ pc(); 89 Argument argv(0, true); 90 91 // We are in the jni transition frame. Save the last_java_frame corresponding to the 92 // outer interpreter frame 93 // 94 __ set_last_Java_frame(FP, noreg); 95 // make sure the interpreter frame we've pushed has a valid return pc 96 __ mov(O7, I7); 97 __ mov(Lmethod, G3_scratch); 98 __ mov(Llocals, G4_scratch); 99 __ save_frame(0); 100 __ mov(G2_thread, L7_thread_cache); 101 __ add(argv.address_in_frame(), O3); 102 __ mov(G2_thread, O0); 103 __ mov(G3_scratch, O1); 104 __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); 105 __ delayed()->mov(G4_scratch, O2); 106 __ mov(L7_thread_cache, G2_thread); 107 __ reset_last_Java_frame(); 108 109 110 // load the register arguments (the C code packed them as varargs) 111 Address Sig = argv.address_in_frame(); // Argument 0 holds the signature 112 __ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch 113 __ mov( G3_scratch, G4_scratch); 114 __ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0 115 Label done; 116 for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) { 117 Label NonFloatArg; 118 Label LoadFloatArg; 119 Label LoadDoubleArg; 120 Label NextArg; 121 Address a = ldarg.address_in_frame(); 122 __ andcc(G4_scratch, sig_mask, G3_scratch); 123 __ br(Assembler::zero, false, Assembler::pt, NonFloatArg); 124 __ delayed()->nop(); 125 126 __ cmp(G3_scratch, float_sig ); 127 __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg); 128 __ delayed()->nop(); 129 130 __ cmp(G3_scratch, double_sig ); 131 __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg); 132 __ delayed()->nop(); 133 134 __ bind(NonFloatArg); 135 // There are only 6 integer register arguments! 136 if ( ldarg.is_register() ) 137 __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); 138 else { 139 // Optimization, see if there are any more args and get out prior to checking 140 // all 16 float registers. My guess is that this is rare. 141 // If is_register is false, then we are done the first six integer args. 142 __ br_null_short(G4_scratch, Assembler::pt, done); 143 } 144 __ ba(NextArg); 145 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 146 147 __ bind(LoadFloatArg); 148 __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4); 149 __ ba(NextArg); 150 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 151 152 __ bind(LoadDoubleArg); 153 __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() ); 154 __ ba(NextArg); 155 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 156 157 __ bind(NextArg); 158 } 159 160 __ bind(done); 161 __ ret(); 162 __ delayed()->restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler 163 164 return entry; 165 } 166 167 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) { 168 169 // Generate code to initiate compilation on the counter overflow. 170 171 // InterpreterRuntime::frequency_counter_overflow takes two arguments, 172 // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp) 173 // and the second is only used when the first is true. We pass zero for both. 174 // The call returns the address of the verified entry point for the method or NULL 175 // if the compilation did not complete (either went background or bailed out). 176 __ set((int)false, O2); 177 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true); 178 // returns verified_entry_point or NULL 179 // we ignore it in any case 180 __ ba_short(Lcontinue); 181 } 182 183 184 // End of helpers 185 186 // Various method entries 187 188 // Abstract method entry 189 // Attempt to execute abstract method. Throw exception 190 // 191 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 192 address entry = __ pc(); 193 // abstract method entry 194 // throw exception 195 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), G5_method); 196 // the call_VM checks for exception, so we should never return here. 197 __ should_not_reach_here(); 198 return entry; 199 } 200 201 void TemplateInterpreterGenerator::save_native_result(void) { 202 // result potentially in O0/O1: save it across calls 203 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 204 205 // result potentially in F0/F1: save it across calls 206 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 207 208 // save and restore any potential method result value around the unlocking operation 209 __ stf(FloatRegisterImpl::D, F0, d_tmp); 210 __ stx(O0, l_tmp); 211 } 212 213 void TemplateInterpreterGenerator::restore_native_result(void) { 214 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 215 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 216 217 // Restore any method result value 218 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 219 __ ldx(l_tmp, O0); 220 } 221 222 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 223 assert(!pass_oop || message == NULL, "either oop or message but not both"); 224 address entry = __ pc(); 225 // expression stack must be empty before entering the VM if an exception happened 226 __ empty_expression_stack(); 227 // load exception object 228 __ set((intptr_t)name, G3_scratch); 229 if (pass_oop) { 230 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 231 } else { 232 __ set((intptr_t)message, G4_scratch); 233 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 234 } 235 // throw exception 236 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 237 AddressLiteral thrower(Interpreter::throw_exception_entry()); 238 __ jump_to(thrower, G3_scratch); 239 __ delayed()->nop(); 240 return entry; 241 } 242 243 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 244 address entry = __ pc(); 245 // expression stack must be empty before entering the VM if an exception 246 // happened 247 __ empty_expression_stack(); 248 // load exception object 249 __ call_VM(Oexception, 250 CAST_FROM_FN_PTR(address, 251 InterpreterRuntime::throw_ClassCastException), 252 Otos_i); 253 __ should_not_reach_here(); 254 return entry; 255 } 256 257 258 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 259 address entry = __ pc(); 260 // expression stack must be empty before entering the VM if an exception happened 261 __ empty_expression_stack(); 262 // convention: expect aberrant index in register G3_scratch, then shuffle the 263 // index to G4_scratch for the VM call 264 __ mov(G3_scratch, G4_scratch); 265 __ set((intptr_t)name, G3_scratch); 266 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 267 __ should_not_reach_here(); 268 return entry; 269 } 270 271 272 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 273 address entry = __ pc(); 274 // expression stack must be empty before entering the VM if an exception happened 275 __ empty_expression_stack(); 276 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 277 __ should_not_reach_here(); 278 return entry; 279 } 280 281 282 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 283 address entry = __ pc(); 284 285 if (state == atos) { 286 __ profile_return_type(O0, G3_scratch, G1_scratch); 287 } 288 289 // The callee returns with the stack possibly adjusted by adapter transition 290 // We remove that possible adjustment here. 291 // All interpreter local registers are untouched. Any result is passed back 292 // in the O0/O1 or float registers. Before continuing, the arguments must be 293 // popped from the java expression stack; i.e., Lesp must be adjusted. 294 295 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 296 297 const Register cache = G3_scratch; 298 const Register index = G1_scratch; 299 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 300 301 const Register flags = cache; 302 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); 303 const Register parameter_size = flags; 304 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words 305 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes 306 __ add(Lesp, parameter_size, Lesp); // pop arguments 307 308 __ check_and_handle_popframe(Gtemp); 309 __ check_and_handle_earlyret(Gtemp); 310 311 __ dispatch_next(state, step); 312 313 return entry; 314 } 315 316 317 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 318 address entry = __ pc(); 319 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 320 #if INCLUDE_JVMCI 321 // Check if we need to take lock at entry of synchronized method. This can 322 // only occur on method entry so emit it only for vtos with step 0. 323 if (EnableJVMCI && state == vtos && step == 0) { 324 Label L; 325 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 326 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 327 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 328 // Clear flag. 329 __ stbool(G0, pending_monitor_enter_addr); 330 // Take lock. 331 lock_method(); 332 __ bind(L); 333 } else { 334 #ifdef ASSERT 335 if (EnableJVMCI) { 336 Label L; 337 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 338 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 339 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 340 __ stop("unexpected pending monitor in deopt entry"); 341 __ bind(L); 342 } 343 #endif 344 } 345 #endif 346 { Label L; 347 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 348 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 349 __ br_null_short(Gtemp, Assembler::pt, L); 350 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 351 __ should_not_reach_here(); 352 __ bind(L); 353 } 354 if (continuation == NULL) { 355 __ dispatch_next(state, step); 356 } else { 357 __ jump_to_entry(continuation); 358 } 359 return entry; 360 } 361 362 // A result handler converts/unboxes a native call result into 363 // a java interpreter/compiler result. The current frame is an 364 // interpreter frame. The activation frame unwind code must be 365 // consistent with that of TemplateTable::_return(...). In the 366 // case of native methods, the caller's SP was not modified. 367 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 368 address entry = __ pc(); 369 Register Itos_i = Otos_i ->after_save(); 370 Register Itos_l = Otos_l ->after_save(); 371 Register Itos_l1 = Otos_l1->after_save(); 372 Register Itos_l2 = Otos_l2->after_save(); 373 switch (type) { 374 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 375 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 376 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 377 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 378 case T_LONG : 379 case T_INT : __ mov(O0, Itos_i); break; 380 case T_VOID : /* nothing to do */ break; 381 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 382 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 383 case T_OBJECT : 384 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 385 __ verify_oop(Itos_i); 386 break; 387 default : ShouldNotReachHere(); 388 } 389 __ ret(); // return from interpreter activation 390 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 391 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly 392 return entry; 393 } 394 395 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 396 address entry = __ pc(); 397 __ push(state); 398 __ call_VM(noreg, runtime_entry); 399 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 400 return entry; 401 } 402 403 404 // 405 // Helpers for commoning out cases in the various type of method entries. 406 // 407 408 // increment invocation count & check for overflow 409 // 410 // Note: checking for negative value instead of overflow 411 // so we have a 'sticky' overflow test 412 // 413 // Lmethod: method 414 // ??: invocation counter 415 // 416 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 417 // Note: In tiered we increment either counters in MethodCounters* or in 418 // MDO depending if we're profiling or not. 419 const Register G3_method_counters = G3_scratch; 420 Label done; 421 422 if (TieredCompilation) { 423 const int increment = InvocationCounter::count_increment; 424 Label no_mdo; 425 if (ProfileInterpreter) { 426 // If no method data exists, go to profile_continue. 427 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 428 __ br_null_short(G4_scratch, Assembler::pn, no_mdo); 429 // Increment counter 430 Address mdo_invocation_counter(G4_scratch, 431 in_bytes(MethodData::invocation_counter_offset()) + 432 in_bytes(InvocationCounter::counter_offset())); 433 Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset())); 434 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 435 G3_scratch, Lscratch, 436 Assembler::zero, overflow); 437 __ ba_short(done); 438 } 439 440 // Increment counter in MethodCounters* 441 __ bind(no_mdo); 442 Address invocation_counter(G3_method_counters, 443 in_bytes(MethodCounters::invocation_counter_offset()) + 444 in_bytes(InvocationCounter::counter_offset())); 445 __ get_method_counters(Lmethod, G3_method_counters, done); 446 Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset())); 447 __ increment_mask_and_jump(invocation_counter, increment, mask, 448 G4_scratch, Lscratch, 449 Assembler::zero, overflow); 450 __ bind(done); 451 } else { // not TieredCompilation 452 // Update standard invocation counters 453 __ get_method_counters(Lmethod, G3_method_counters, done); 454 __ increment_invocation_counter(G3_method_counters, O0, G4_scratch); 455 if (ProfileInterpreter) { 456 Address interpreter_invocation_counter(G3_method_counters, 457 in_bytes(MethodCounters::interpreter_invocation_counter_offset())); 458 __ ld(interpreter_invocation_counter, G4_scratch); 459 __ inc(G4_scratch); 460 __ st(G4_scratch, interpreter_invocation_counter); 461 } 462 463 if (ProfileInterpreter && profile_method != NULL) { 464 // Test to see if we should create a method data oop 465 Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 466 __ ld(profile_limit, G1_scratch); 467 __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); 468 469 // if no method data exists, go to profile_method 470 __ test_method_data_pointer(*profile_method); 471 } 472 473 Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 474 __ ld(invocation_limit, G3_scratch); 475 __ cmp(O0, G3_scratch); 476 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance 477 __ delayed()->nop(); 478 __ bind(done); 479 } 480 } 481 482 // Allocate monitor and lock method (asm interpreter) 483 // ebx - Method* 484 // 485 void TemplateInterpreterGenerator::lock_method() { 486 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags. 487 488 #ifdef ASSERT 489 { Label ok; 490 __ btst(JVM_ACC_SYNCHRONIZED, O0); 491 __ br( Assembler::notZero, false, Assembler::pt, ok); 492 __ delayed()->nop(); 493 __ stop("method doesn't need synchronization"); 494 __ bind(ok); 495 } 496 #endif // ASSERT 497 498 // get synchronization object to O0 499 { Label done; 500 __ btst(JVM_ACC_STATIC, O0); 501 __ br( Assembler::zero, true, Assembler::pt, done); 502 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 503 504 // lock the mirror, not the Klass* 505 __ load_mirror(O0, Lmethod, Lscratch); 506 507 #ifdef ASSERT 508 __ tst(O0); 509 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 510 #endif // ASSERT 511 512 __ bind(done); 513 } 514 515 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 516 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 517 // __ untested("lock_object from method entry"); 518 __ lock_object(Lmonitors, O0); 519 } 520 521 // See if we've got enough room on the stack for locals plus overhead below 522 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 523 // without going through the signal handler, i.e., reserved and yellow zones 524 // will not be made usable. The shadow zone must suffice to handle the 525 // overflow. 526 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 527 Register Rscratch) { 528 const int page_size = os::vm_page_size(); 529 Label after_frame_check; 530 531 assert_different_registers(Rframe_size, Rscratch); 532 533 __ set(page_size, Rscratch); 534 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); 535 536 // Get the stack overflow limit, and in debug, verify it is non-zero. 537 __ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch); 538 #ifdef ASSERT 539 Label limit_ok; 540 __ br_notnull_short(Rscratch, Assembler::pn, limit_ok); 541 __ stop("stack overflow limit is zero in generate_stack_overflow_check"); 542 __ bind(limit_ok); 543 #endif 544 545 // Add in the size of the frame (which is the same as subtracting it from the 546 // SP, which would take another register. 547 __ add(Rscratch, Rframe_size, Rscratch); 548 549 // The frame is greater than one page in size, so check against 550 // the bottom of the stack. 551 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check); 552 553 // The stack will overflow, throw an exception. 554 555 // Note that SP is restored to sender's sp (in the delay slot). This 556 // is necessary if the sender's frame is an extended compiled frame 557 // (see gen_c2i_adapter()) and safer anyway in case of JSR292 558 // adaptations. 559 560 // Note also that the restored frame is not necessarily interpreted. 561 // Use the shared runtime version of the StackOverflowError. 562 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 563 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); 564 __ jump_to(stub, Rscratch); 565 __ delayed()->mov(O5_savedSP, SP); 566 567 // If you get to here, then there is enough stack space. 568 __ bind(after_frame_check); 569 } 570 571 572 // 573 // Generate a fixed interpreter frame. This is identical setup for interpreted 574 // methods and for native methods hence the shared code. 575 576 577 //---------------------------------------------------------------------------------------------------- 578 // Stack frame layout 579 // 580 // When control flow reaches any of the entry types for the interpreter 581 // the following holds -> 582 // 583 // C2 Calling Conventions: 584 // 585 // The entry code below assumes that the following registers are set 586 // when coming in: 587 // G5_method: holds the Method* of the method to call 588 // Lesp: points to the TOS of the callers expression stack 589 // after having pushed all the parameters 590 // 591 // The entry code does the following to setup an interpreter frame 592 // pop parameters from the callers stack by adjusting Lesp 593 // set O0 to Lesp 594 // compute X = (max_locals - num_parameters) 595 // bump SP up by X to accommodate the extra locals 596 // compute X = max_expression_stack 597 // + vm_local_words 598 // + 16 words of register save area 599 // save frame doing a save sp, -X, sp growing towards lower addresses 600 // set Lbcp, Lmethod, LcpoolCache 601 // set Llocals to i0 602 // set Lmonitors to FP - rounded_vm_local_words 603 // set Lesp to Lmonitors - 4 604 // 605 // The frame has now been setup to do the rest of the entry code 606 607 // Try this optimization: Most method entries could live in a 608 // "one size fits all" stack frame without all the dynamic size 609 // calculations. It might be profitable to do all this calculation 610 // statically and approximately for "small enough" methods. 611 612 //----------------------------------------------------------------------------------------------- 613 614 // C1 Calling conventions 615 // 616 // Upon method entry, the following registers are setup: 617 // 618 // g2 G2_thread: current thread 619 // g5 G5_method: method to activate 620 // g4 Gargs : pointer to last argument 621 // 622 // 623 // Stack: 624 // 625 // +---------------+ <--- sp 626 // | | 627 // : reg save area : 628 // | | 629 // +---------------+ <--- sp + 0x40 630 // | | 631 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 632 // | | 633 // +---------------+ <--- sp + 0x5c 634 // | | 635 // : free : 636 // | | 637 // +---------------+ <--- Gargs 638 // | | 639 // : arguments : 640 // | | 641 // +---------------+ 642 // | | 643 // 644 // 645 // 646 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 647 // 648 // +---------------+ <--- sp 649 // | | 650 // : reg save area : 651 // | | 652 // +---------------+ <--- sp + 0x40 653 // | | 654 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 655 // | | 656 // +---------------+ <--- sp + 0x5c 657 // | | 658 // : : 659 // | | <--- Lesp 660 // +---------------+ <--- Lmonitors (fp - 0x18) 661 // | VM locals | 662 // +---------------+ <--- fp 663 // | | 664 // : reg save area : 665 // | | 666 // +---------------+ <--- fp + 0x40 667 // | | 668 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 669 // | | 670 // +---------------+ <--- fp + 0x5c 671 // | | 672 // : free : 673 // | | 674 // +---------------+ 675 // | | 676 // : nonarg locals : 677 // | | 678 // +---------------+ 679 // | | 680 // : arguments : 681 // | | <--- Llocals 682 // +---------------+ <--- Gargs 683 // | | 684 685 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 686 // 687 // 688 // The entry code sets up a new interpreter frame in 4 steps: 689 // 690 // 1) Increase caller's SP by for the extra local space needed: 691 // (check for overflow) 692 // Efficient implementation of xload/xstore bytecodes requires 693 // that arguments and non-argument locals are in a contiguously 694 // addressable memory block => non-argument locals must be 695 // allocated in the caller's frame. 696 // 697 // 2) Create a new stack frame and register window: 698 // The new stack frame must provide space for the standard 699 // register save area, the maximum java expression stack size, 700 // the monitor slots (0 slots initially), and some frame local 701 // scratch locations. 702 // 703 // 3) The following interpreter activation registers must be setup: 704 // Lesp : expression stack pointer 705 // Lbcp : bytecode pointer 706 // Lmethod : method 707 // Llocals : locals pointer 708 // Lmonitors : monitor pointer 709 // LcpoolCache: constant pool cache 710 // 711 // 4) Initialize the non-argument locals if necessary: 712 // Non-argument locals may need to be initialized to NULL 713 // for GC to work. If the oop-map information is accurate 714 // (in the absence of the JSR problem), no initialization 715 // is necessary. 716 // 717 // (gri - 2/25/2000) 718 719 720 int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong ); 721 722 const int extra_space = 723 rounded_vm_local_words + // frame local scratch space 724 Method::extra_stack_entries() + // extra stack for jsr 292 725 frame::memory_parameter_word_sp_offset + // register save area 726 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 727 728 const Register Glocals_size = G3; 729 const Register RconstMethod = Glocals_size; 730 const Register Otmp1 = O3; 731 const Register Otmp2 = O4; 732 // Lscratch can't be used as a temporary because the call_stub uses 733 // it to assert that the stack frame was setup correctly. 734 const Address constMethod (G5_method, Method::const_offset()); 735 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 736 737 __ ld_ptr( constMethod, RconstMethod ); 738 __ lduh( size_of_parameters, Glocals_size); 739 740 // Gargs points to first local + BytesPerWord 741 // Set the saved SP after the register window save 742 // 743 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 744 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 745 __ add(Gargs, Otmp1, Gargs); 746 747 if (native_call) { 748 __ calc_mem_param_words( Glocals_size, Gframe_size ); 749 __ add( Gframe_size, extra_space, Gframe_size); 750 __ round_to( Gframe_size, WordsPerLong ); 751 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 752 753 // Native calls don't need the stack size check since they have no 754 // expression stack and the arguments are already on the stack and 755 // we only add a handful of words to the stack. 756 } else { 757 758 // 759 // Compute number of locals in method apart from incoming parameters 760 // 761 const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset()); 762 __ ld_ptr(constMethod, Otmp1); 763 __ lduh(size_of_locals, Otmp1); 764 __ sub(Otmp1, Glocals_size, Glocals_size); 765 __ round_to(Glocals_size, WordsPerLong); 766 __ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size); 767 768 // See if the frame is greater than one page in size. If so, 769 // then we need to verify there is enough stack space remaining. 770 // Frame_size = (max_stack + extra_space) * BytesPerWord; 771 __ ld_ptr(constMethod, Gframe_size); 772 __ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); 773 __ add(Gframe_size, extra_space, Gframe_size); 774 __ round_to(Gframe_size, WordsPerLong); 775 __ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size); 776 777 // Add in java locals size for stack overflow check only 778 __ add(Gframe_size, Glocals_size, Gframe_size); 779 780 const Register Otmp2 = O4; 781 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 782 generate_stack_overflow_check(Gframe_size, Otmp1); 783 784 __ sub(Gframe_size, Glocals_size, Gframe_size); 785 786 // 787 // bump SP to accommodate the extra locals 788 // 789 __ sub(SP, Glocals_size, SP); 790 } 791 792 // 793 // now set up a stack frame with the size computed above 794 // 795 __ neg( Gframe_size ); 796 __ save( SP, Gframe_size, SP ); 797 798 // 799 // now set up all the local cache registers 800 // 801 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 802 // that all present references to Lbyte_code initialize the register 803 // immediately before use 804 if (native_call) { 805 __ mov(G0, Lbcp); 806 } else { 807 __ ld_ptr(G5_method, Method::const_offset(), Lbcp); 808 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp); 809 } 810 __ mov( G5_method, Lmethod); // set Lmethod 811 // Get mirror and store it in the frame as GC root for this Method* 812 Register mirror = LcpoolCache; 813 __ load_mirror(mirror, Lmethod, Lscratch); 814 __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS); 815 __ get_constant_pool_cache(LcpoolCache); // set LcpoolCache 816 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 817 __ add(Lmonitors, STACK_BIAS, Lmonitors); // Account for 64 bit stack bias 818 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 819 820 // setup interpreter activation registers 821 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 822 823 if (ProfileInterpreter) { 824 __ set_method_data_pointer(); 825 } 826 827 } 828 829 // Method entry for java.lang.ref.Reference.get. 830 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 831 // Code: _aload_0, _getfield, _areturn 832 // parameter size = 1 833 // 834 // The code that gets generated by this routine is split into 2 parts: 835 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 836 // 2. The slow path - which is an expansion of the regular method entry. 837 // 838 // Notes:- 839 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 840 // * We may jump to the slow path iff the receiver is null. If the 841 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 842 // Thus we can use the regular method entry code to generate the NPE. 843 // 844 // This code is based on generate_accessor_enty. 845 846 address entry = __ pc(); 847 848 const int referent_offset = java_lang_ref_Reference::referent_offset; 849 guarantee(referent_offset > 0, "referent offset not initialized"); 850 851 Label slow_path; 852 853 // In the G1 code we don't check if we need to reach a safepoint. We 854 // continue and the thread will safepoint at the next bytecode dispatch. 855 856 // Check if local 0 != NULL 857 // If the receiver is null then it is OK to jump to the slow path. 858 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 859 // check if local 0 == NULL and go the slow path 860 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); 861 862 __ load_heap_oop(Otos_i, referent_offset, Otos_i, G3_scratch, ON_WEAK_OOP_REF); 863 864 // _areturn 865 __ retl(); // return from leaf routine 866 __ delayed()->mov(O5_savedSP, SP); 867 868 // Generate regular method entry 869 __ bind(slow_path); 870 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 871 return entry; 872 } 873 874 /** 875 * Method entry for static native methods: 876 * int java.util.zip.CRC32.update(int crc, int b) 877 */ 878 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 879 880 if (UseCRC32Intrinsics) { 881 address entry = __ pc(); 882 883 Label L_slow_path; 884 // If we need a safepoint check, generate full interpreter entry. 885 __ safepoint_poll(L_slow_path, false, G2_thread, O2); 886 __ delayed()->nop(); 887 888 // Load parameters 889 const Register crc = O0; // initial crc 890 const Register val = O1; // byte to update with 891 const Register table = O2; // address of 256-entry lookup table 892 893 __ ldub(Gargs, 3, val); 894 __ lduw(Gargs, 8, crc); 895 896 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 897 898 __ not1(crc); // ~crc 899 __ clruwu(crc); 900 __ update_byte_crc32(crc, val, table); 901 __ not1(crc); // ~crc 902 903 // result in O0 904 __ retl(); 905 __ delayed()->nop(); 906 907 // generate a vanilla native entry as the slow path 908 __ bind(L_slow_path); 909 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 910 return entry; 911 } 912 return NULL; 913 } 914 915 /** 916 * Method entry for static native methods: 917 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 918 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 919 */ 920 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 921 922 if (UseCRC32Intrinsics) { 923 address entry = __ pc(); 924 925 Label L_slow_path; 926 // If we need a safepoint check, generate full interpreter entry. 927 928 __ safepoint_poll(L_slow_path, false, G2_thread, O2); 929 __ delayed()->nop(); 930 931 // Load parameters from the stack 932 const Register crc = O0; // initial crc 933 const Register buf = O1; // source java byte array address 934 const Register len = O2; // len 935 const Register offset = O3; // offset 936 937 // Arguments are reversed on java expression stack 938 // Calculate address of start element 939 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 940 __ lduw(Gargs, 0, len); 941 __ lduw(Gargs, 8, offset); 942 __ ldx( Gargs, 16, buf); 943 __ lduw(Gargs, 32, crc); 944 __ add(buf, offset, buf); 945 } else { 946 __ lduw(Gargs, 0, len); 947 __ lduw(Gargs, 8, offset); 948 __ ldx( Gargs, 16, buf); 949 __ lduw(Gargs, 24, crc); 950 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 951 __ add(buf, offset, buf); 952 } 953 954 // Call the crc32 kernel 955 __ MacroAssembler::save_thread(L7_thread_cache); 956 __ kernel_crc32(crc, buf, len, O3); 957 __ MacroAssembler::restore_thread(L7_thread_cache); 958 959 // result in O0 960 __ retl(); 961 __ delayed()->nop(); 962 963 // generate a vanilla native entry as the slow path 964 __ bind(L_slow_path); 965 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 966 return entry; 967 } 968 return NULL; 969 } 970 971 /** 972 * Method entry for intrinsic-candidate (non-native) methods: 973 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 974 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 975 * Unlike CRC32, CRC32C does not have any methods marked as native 976 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 977 */ 978 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 979 980 if (UseCRC32CIntrinsics) { 981 address entry = __ pc(); 982 983 // Load parameters from the stack 984 const Register crc = O0; // initial crc 985 const Register buf = O1; // source java byte array address 986 const Register offset = O2; // offset 987 const Register end = O3; // index of last element to process 988 const Register len = O2; // len argument to the kernel 989 const Register table = O3; // crc32c lookup table address 990 991 // Arguments are reversed on java expression stack 992 // Calculate address of start element 993 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { 994 __ lduw(Gargs, 0, end); 995 __ lduw(Gargs, 8, offset); 996 __ ldx( Gargs, 16, buf); 997 __ lduw(Gargs, 32, crc); 998 __ add(buf, offset, buf); 999 __ sub(end, offset, len); 1000 } else { 1001 __ lduw(Gargs, 0, end); 1002 __ lduw(Gargs, 8, offset); 1003 __ ldx( Gargs, 16, buf); 1004 __ lduw(Gargs, 24, crc); 1005 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 1006 __ add(buf, offset, buf); 1007 __ sub(end, offset, len); 1008 } 1009 1010 // Call the crc32c kernel 1011 __ MacroAssembler::save_thread(L7_thread_cache); 1012 __ kernel_crc32c(crc, buf, len, table); 1013 __ MacroAssembler::restore_thread(L7_thread_cache); 1014 1015 // result in O0 1016 __ retl(); 1017 __ delayed()->nop(); 1018 1019 return entry; 1020 } 1021 return NULL; 1022 } 1023 1024 /* Math routines only partially supported. 1025 * 1026 * Providing support for fma (float/double) only. 1027 */ 1028 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) 1029 { 1030 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 1031 1032 address entry = __ pc(); 1033 1034 switch (kind) { 1035 case Interpreter::java_lang_math_fmaF: 1036 if (UseFMA) { 1037 // float .fma(float a, float b, float c) 1038 const FloatRegister ra = F1; 1039 const FloatRegister rb = F2; 1040 const FloatRegister rc = F3; 1041 const FloatRegister rd = F0; // Result. 1042 1043 __ ldf(FloatRegisterImpl::S, Gargs, 0, rc); 1044 __ ldf(FloatRegisterImpl::S, Gargs, 8, rb); 1045 __ ldf(FloatRegisterImpl::S, Gargs, 16, ra); 1046 1047 __ fmadd(FloatRegisterImpl::S, ra, rb, rc, rd); 1048 __ retl(); // Result in F0 (rd). 1049 __ delayed()->mov(O5_savedSP, SP); 1050 1051 return entry; 1052 } 1053 break; 1054 case Interpreter::java_lang_math_fmaD: 1055 if (UseFMA) { 1056 // double .fma(double a, double b, double c) 1057 const FloatRegister ra = F2; // D1 1058 const FloatRegister rb = F4; // D2 1059 const FloatRegister rc = F6; // D3 1060 const FloatRegister rd = F0; // D0 Result. 1061 1062 __ ldf(FloatRegisterImpl::D, Gargs, 0, rc); 1063 __ ldf(FloatRegisterImpl::D, Gargs, 16, rb); 1064 __ ldf(FloatRegisterImpl::D, Gargs, 32, ra); 1065 1066 __ fmadd(FloatRegisterImpl::D, ra, rb, rc, rd); 1067 __ retl(); // Result in D0 (rd). 1068 __ delayed()->mov(O5_savedSP, SP); 1069 1070 return entry; 1071 } 1072 break; 1073 default: 1074 break; 1075 } 1076 return NULL; 1077 } 1078 1079 // TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to 1080 // generate exception 1081 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1082 // Quick & dirty stack overflow checking: bang the stack & handle trap. 1083 // Note that we do the banging after the frame is setup, since the exception 1084 // handling code expects to find a valid interpreter frame on the stack. 1085 // Doing the banging earlier fails if the caller frame is not an interpreter 1086 // frame. 1087 // (Also, the exception throwing code expects to unlock any synchronized 1088 // method receiver, so do the banging after locking the receiver.) 1089 1090 // Bang each page in the shadow zone. We can't assume it's been done for 1091 // an interpreter frame with greater than a page of locals, so each page 1092 // needs to be checked. Only true for non-native. 1093 if (UseStackBanging) { 1094 const int page_size = os::vm_page_size(); 1095 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 1096 const int start_page = native_call ? n_shadow_pages : 1; 1097 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 1098 __ bang_stack_with_offset(pages*page_size); 1099 } 1100 } 1101 } 1102 1103 // 1104 // Interpreter stub for calling a native method. (asm interpreter) 1105 // This sets up a somewhat different looking stack for calling the native method 1106 // than the typical interpreter frame setup. 1107 // 1108 1109 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1110 address entry = __ pc(); 1111 1112 // the following temporary registers are used during frame creation 1113 const Register Gtmp1 = G3_scratch ; 1114 const Register Gtmp2 = G1_scratch; 1115 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1116 1117 // make sure registers are different! 1118 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1119 1120 const Address Laccess_flags(Lmethod, Method::access_flags_offset()); 1121 1122 const Register Glocals_size = G3; 1123 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1124 1125 // make sure method is native & not abstract 1126 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1127 #ifdef ASSERT 1128 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1129 { Label L; 1130 __ btst(JVM_ACC_NATIVE, Gtmp1); 1131 __ br(Assembler::notZero, false, Assembler::pt, L); 1132 __ delayed()->nop(); 1133 __ stop("tried to execute non-native method as native"); 1134 __ bind(L); 1135 } 1136 { Label L; 1137 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1138 __ br(Assembler::zero, false, Assembler::pt, L); 1139 __ delayed()->nop(); 1140 __ stop("tried to execute abstract method as non-abstract"); 1141 __ bind(L); 1142 } 1143 #endif // ASSERT 1144 1145 // generate the code to allocate the interpreter stack frame 1146 generate_fixed_frame(true); 1147 1148 // 1149 // No locals to initialize for native method 1150 // 1151 1152 // this slot will be set later, we initialize it to null here just in 1153 // case we get a GC before the actual value is stored later 1154 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 1155 1156 const Address do_not_unlock_if_synchronized(G2_thread, 1157 JavaThread::do_not_unlock_if_synchronized_offset()); 1158 // Since at this point in the method invocation the exception handler 1159 // would try to exit the monitor of synchronized methods which hasn't 1160 // been entered yet, we set the thread local variable 1161 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1162 // runtime, exception handling i.e. unlock_if_synchronized_method will 1163 // check this thread local flag. 1164 // This flag has two effects, one is to force an unwind in the topmost 1165 // interpreter frame and not perform an unlock while doing so. 1166 1167 __ movbool(true, G3_scratch); 1168 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1169 1170 // increment invocation counter and check for overflow 1171 // 1172 // Note: checking for negative value instead of overflow 1173 // so we have a 'sticky' overflow test (may be of 1174 // importance as soon as we have true MT/MP) 1175 Label invocation_counter_overflow; 1176 Label Lcontinue; 1177 if (inc_counter) { 1178 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1179 1180 } 1181 __ bind(Lcontinue); 1182 1183 bang_stack_shadow_pages(true); 1184 1185 // reset the _do_not_unlock_if_synchronized flag 1186 __ stbool(G0, do_not_unlock_if_synchronized); 1187 1188 // check for synchronized methods 1189 // Must happen AFTER invocation_counter check and stack overflow check, 1190 // so method is not locked if overflows. 1191 1192 if (synchronized) { 1193 lock_method(); 1194 } else { 1195 #ifdef ASSERT 1196 { Label ok; 1197 __ ld(Laccess_flags, O0); 1198 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1199 __ br( Assembler::zero, false, Assembler::pt, ok); 1200 __ delayed()->nop(); 1201 __ stop("method needs synchronization"); 1202 __ bind(ok); 1203 } 1204 #endif // ASSERT 1205 } 1206 1207 1208 // start execution 1209 __ verify_thread(); 1210 1211 // JVMTI support 1212 __ notify_method_entry(); 1213 1214 // native call 1215 1216 // (note that O0 is never an oop--at most it is a handle) 1217 // It is important not to smash any handles created by this call, 1218 // until any oop handle in O0 is dereferenced. 1219 1220 // (note that the space for outgoing params is preallocated) 1221 1222 // get signature handler 1223 { Label L; 1224 Address signature_handler(Lmethod, Method::signature_handler_offset()); 1225 __ ld_ptr(signature_handler, G3_scratch); 1226 __ br_notnull_short(G3_scratch, Assembler::pt, L); 1227 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 1228 __ ld_ptr(signature_handler, G3_scratch); 1229 __ bind(L); 1230 } 1231 1232 // Push a new frame so that the args will really be stored in 1233 // Copy a few locals across so the new frame has the variables 1234 // we need but these values will be dead at the jni call and 1235 // therefore not gc volatile like the values in the current 1236 // frame (Lmethod in particular) 1237 1238 // Flush the method pointer to the register save area 1239 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 1240 __ mov(Llocals, O1); 1241 1242 // calculate where the mirror handle body is allocated in the interpreter frame: 1243 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 1244 1245 // Calculate current frame size 1246 __ sub(SP, FP, O3); // Calculate negative of current frame size 1247 __ save(SP, O3, SP); // Allocate an identical sized frame 1248 1249 // Note I7 has leftover trash. Slow signature handler will fill it in 1250 // should we get there. Normal jni call will set reasonable last_Java_pc 1251 // below (and fix I7 so the stack trace doesn't have a meaningless frame 1252 // in it). 1253 1254 // Load interpreter frame's Lmethod into same register here 1255 1256 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1257 1258 __ mov(I1, Llocals); 1259 __ mov(I2, Lscratch2); // save the address of the mirror 1260 1261 1262 // ONLY Lmethod and Llocals are valid here! 1263 1264 // call signature handler, It will move the arg properly since Llocals in current frame 1265 // matches that in outer frame 1266 1267 __ callr(G3_scratch, 0); 1268 __ delayed()->nop(); 1269 1270 // Result handler is in Lscratch 1271 1272 // Reload interpreter frame's Lmethod since slow signature handler may block 1273 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1274 1275 { Label not_static; 1276 1277 __ ld(Laccess_flags, O0); 1278 __ btst(JVM_ACC_STATIC, O0); 1279 __ br( Assembler::zero, false, Assembler::pt, not_static); 1280 // get native function entry point(O0 is a good temp until the very end) 1281 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0); 1282 // for static methods insert the mirror argument 1283 __ load_mirror(O1, Lmethod, G3_scratch); 1284 #ifdef ASSERT 1285 if (!PrintSignatureHandlers) // do not dirty the output with this 1286 { Label L; 1287 __ br_notnull_short(O1, Assembler::pt, L); 1288 __ stop("mirror is missing"); 1289 __ bind(L); 1290 } 1291 #endif // ASSERT 1292 __ st_ptr(O1, Lscratch2, 0); 1293 __ mov(Lscratch2, O1); 1294 __ bind(not_static); 1295 } 1296 1297 // At this point, arguments have been copied off of stack into 1298 // their JNI positions, which are O1..O5 and SP[68..]. 1299 // Oops are boxed in-place on the stack, with handles copied to arguments. 1300 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1301 1302 #ifdef ASSERT 1303 { Label L; 1304 __ br_notnull_short(O0, Assembler::pt, L); 1305 __ stop("native entry point is missing"); 1306 __ bind(L); 1307 } 1308 #endif // ASSERT 1309 1310 // 1311 // setup the frame anchor 1312 // 1313 // The scavenge function only needs to know that the PC of this frame is 1314 // in the interpreter method entry code, it doesn't need to know the exact 1315 // PC and hence we can use O7 which points to the return address from the 1316 // previous call in the code stream (signature handler function) 1317 // 1318 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1319 // we have pushed the extra frame in order to protect the volatile register(s) 1320 // in that frame when we return from the jni call 1321 // 1322 1323 __ set_last_Java_frame(FP, O7); 1324 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1325 // not meaningless information that'll confuse me. 1326 1327 // flush the windows now. We don't care about the current (protection) frame 1328 // only the outer frames 1329 1330 __ flushw(); 1331 1332 // mark windows as flushed 1333 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1334 __ set(JavaFrameAnchor::flushed, G3_scratch); 1335 __ st(G3_scratch, flags); 1336 1337 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1338 1339 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1340 #ifdef ASSERT 1341 { Label L; 1342 __ ld(thread_state, G3_scratch); 1343 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); 1344 __ stop("Wrong thread state in native stub"); 1345 __ bind(L); 1346 } 1347 #endif // ASSERT 1348 __ set(_thread_in_native, G3_scratch); 1349 __ st(G3_scratch, thread_state); 1350 1351 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1352 __ save_thread(L7_thread_cache); // save Gthread 1353 __ callr(O0, 0); 1354 __ delayed()-> 1355 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1356 1357 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1358 1359 __ restore_thread(L7_thread_cache); // restore G2_thread 1360 __ reinit_heapbase(); 1361 1362 // must we block? 1363 1364 // Block, if necessary, before resuming in _thread_in_Java state. 1365 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1366 { Label no_block; 1367 1368 // Switch thread to "native transition" state before reading the synchronization state. 1369 // This additional state is necessary because reading and testing the synchronization 1370 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1371 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1372 // VM thread changes sync state to synchronizing and suspends threads for GC. 1373 // Thread A is resumed to finish this native method, but doesn't block here since it 1374 // didn't see any synchronization is progress, and escapes. 1375 __ set(_thread_in_native_trans, G3_scratch); 1376 __ st(G3_scratch, thread_state); 1377 if (os::is_MP()) { 1378 if (UseMembar) { 1379 // Force this write out before the read below 1380 __ membar(Assembler::StoreLoad); 1381 } else { 1382 // Write serialization page so VM thread can do a pseudo remote membar. 1383 // We use the current thread pointer to calculate a thread specific 1384 // offset to write to within the page. This minimizes bus traffic 1385 // due to cache line collision. 1386 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1387 } 1388 } 1389 1390 Label L; 1391 __ safepoint_poll(L, false, G2_thread, G3_scratch); 1392 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1393 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 1394 __ bind(L); 1395 1396 // Block. Save any potential method result value before the operation and 1397 // use a leaf call to leave the last_Java_frame setup undisturbed. 1398 save_native_result(); 1399 __ call_VM_leaf(L7_thread_cache, 1400 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1401 G2_thread); 1402 1403 // Restore any method result value 1404 restore_native_result(); 1405 __ bind(no_block); 1406 } 1407 1408 // Clear the frame anchor now 1409 1410 __ reset_last_Java_frame(); 1411 1412 // Move the result handler address 1413 __ mov(Lscratch, G3_scratch); 1414 // return possible result to the outer frame 1415 __ restore(O0, G0, O0); 1416 1417 // Move result handler to expected register 1418 __ mov(G3_scratch, Lscratch); 1419 1420 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1421 // switch to thread_in_Java. 1422 1423 __ set(_thread_in_Java, G3_scratch); 1424 __ st(G3_scratch, thread_state); 1425 1426 if (CheckJNICalls) { 1427 // clear_pending_jni_exception_check 1428 __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); 1429 } 1430 1431 // reset handle block 1432 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1433 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1434 1435 // If we have an oop result store it where it will be safe for any further gc 1436 // until we return now that we've released the handle it might be protected by 1437 1438 { Label no_oop; 1439 1440 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1441 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); 1442 __ resolve_jobject(O0, G3_scratch); 1443 // Store it where gc will look for it and result handler expects it. 1444 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1445 1446 __ bind(no_oop); 1447 } 1448 1449 1450 // handle exceptions (exception handling will handle unlocking!) 1451 { Label L; 1452 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1453 __ ld_ptr(exception_addr, Gtemp); 1454 __ br_null_short(Gtemp, Assembler::pt, L); 1455 // Note: This could be handled more efficiently since we know that the native 1456 // method doesn't have an exception handler. We could directly return 1457 // to the exception handler for the caller. 1458 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1459 __ should_not_reach_here(); 1460 __ bind(L); 1461 } 1462 1463 // JVMTI support (preserves thread register) 1464 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1465 1466 if (synchronized) { 1467 // save and restore any potential method result value around the unlocking operation 1468 save_native_result(); 1469 1470 __ add( __ top_most_monitor(), O1); 1471 __ unlock_object(O1); 1472 1473 restore_native_result(); 1474 } 1475 1476 // dispose of return address and remove activation 1477 #ifdef ASSERT 1478 { Label ok; 1479 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); 1480 __ stop("bad I5_savedSP value"); 1481 __ should_not_reach_here(); 1482 __ bind(ok); 1483 } 1484 #endif 1485 __ jmp(Lscratch, 0); 1486 __ delayed()->nop(); 1487 1488 if (inc_counter) { 1489 // handle invocation counter overflow 1490 __ bind(invocation_counter_overflow); 1491 generate_counter_overflow(Lcontinue); 1492 } 1493 1494 return entry; 1495 } 1496 1497 1498 // Generic method entry to (asm) interpreter 1499 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1500 address entry = __ pc(); 1501 1502 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1503 1504 // the following temporary registers are used during frame creation 1505 const Register Gtmp1 = G3_scratch ; 1506 const Register Gtmp2 = G1_scratch; 1507 1508 // make sure registers are different! 1509 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1510 1511 const Address constMethod (G5_method, Method::const_offset()); 1512 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1513 // and use in the asserts. 1514 const Address access_flags (Lmethod, Method::access_flags_offset()); 1515 1516 const Register Glocals_size = G3; 1517 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1518 1519 // make sure method is not native & not abstract 1520 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1521 #ifdef ASSERT 1522 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1523 { Label L; 1524 __ btst(JVM_ACC_NATIVE, Gtmp1); 1525 __ br(Assembler::zero, false, Assembler::pt, L); 1526 __ delayed()->nop(); 1527 __ stop("tried to execute native method as non-native"); 1528 __ bind(L); 1529 } 1530 { Label L; 1531 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1532 __ br(Assembler::zero, false, Assembler::pt, L); 1533 __ delayed()->nop(); 1534 __ stop("tried to execute abstract method as non-abstract"); 1535 __ bind(L); 1536 } 1537 #endif // ASSERT 1538 1539 // generate the code to allocate the interpreter stack frame 1540 1541 generate_fixed_frame(false); 1542 1543 // 1544 // Code to initialize the extra (i.e. non-parm) locals 1545 // 1546 Register init_value = noreg; // will be G0 if we must clear locals 1547 // The way the code was setup before zerolocals was always true for vanilla java entries. 1548 // It could only be false for the specialized entries like accessor or empty which have 1549 // no extra locals so the testing was a waste of time and the extra locals were always 1550 // initialized. We removed this extra complication to already over complicated code. 1551 1552 init_value = G0; 1553 Label clear_loop; 1554 1555 const Register RconstMethod = O1; 1556 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1557 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset()); 1558 1559 // NOTE: If you change the frame layout, this code will need to 1560 // be updated! 1561 __ ld_ptr( constMethod, RconstMethod ); 1562 __ lduh( size_of_locals, O2 ); 1563 __ lduh( size_of_parameters, O1 ); 1564 __ sll( O2, Interpreter::logStackElementSize, O2); 1565 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1566 __ sub( Llocals, O2, O2 ); 1567 __ sub( Llocals, O1, O1 ); 1568 1569 __ bind( clear_loop ); 1570 __ inc( O2, wordSize ); 1571 1572 __ cmp( O2, O1 ); 1573 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1574 __ delayed()->st_ptr( init_value, O2, 0 ); 1575 1576 const Address do_not_unlock_if_synchronized(G2_thread, 1577 JavaThread::do_not_unlock_if_synchronized_offset()); 1578 // Since at this point in the method invocation the exception handler 1579 // would try to exit the monitor of synchronized methods which hasn't 1580 // been entered yet, we set the thread local variable 1581 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1582 // runtime, exception handling i.e. unlock_if_synchronized_method will 1583 // check this thread local flag. 1584 __ movbool(true, G3_scratch); 1585 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1586 1587 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); 1588 // increment invocation counter and check for overflow 1589 // 1590 // Note: checking for negative value instead of overflow 1591 // so we have a 'sticky' overflow test (may be of 1592 // importance as soon as we have true MT/MP) 1593 Label invocation_counter_overflow; 1594 Label profile_method; 1595 Label profile_method_continue; 1596 Label Lcontinue; 1597 if (inc_counter) { 1598 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1599 if (ProfileInterpreter) { 1600 __ bind(profile_method_continue); 1601 } 1602 } 1603 __ bind(Lcontinue); 1604 1605 bang_stack_shadow_pages(false); 1606 1607 // reset the _do_not_unlock_if_synchronized flag 1608 __ stbool(G0, do_not_unlock_if_synchronized); 1609 1610 // check for synchronized methods 1611 // Must happen AFTER invocation_counter check and stack overflow check, 1612 // so method is not locked if overflows. 1613 1614 if (synchronized) { 1615 lock_method(); 1616 } else { 1617 #ifdef ASSERT 1618 { Label ok; 1619 __ ld(access_flags, O0); 1620 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1621 __ br( Assembler::zero, false, Assembler::pt, ok); 1622 __ delayed()->nop(); 1623 __ stop("method needs synchronization"); 1624 __ bind(ok); 1625 } 1626 #endif // ASSERT 1627 } 1628 1629 // start execution 1630 1631 __ verify_thread(); 1632 1633 // jvmti support 1634 __ notify_method_entry(); 1635 1636 // start executing instructions 1637 __ dispatch_next(vtos); 1638 1639 1640 if (inc_counter) { 1641 if (ProfileInterpreter) { 1642 // We have decided to profile this method in the interpreter 1643 __ bind(profile_method); 1644 1645 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1646 __ set_method_data_pointer_for_bcp(); 1647 __ ba_short(profile_method_continue); 1648 } 1649 1650 // handle invocation counter overflow 1651 __ bind(invocation_counter_overflow); 1652 generate_counter_overflow(Lcontinue); 1653 } 1654 1655 return entry; 1656 } 1657 1658 //---------------------------------------------------------------------------------------------------- 1659 // Exceptions 1660 void TemplateInterpreterGenerator::generate_throw_exception() { 1661 1662 // Entry point in previous activation (i.e., if the caller was interpreted) 1663 Interpreter::_rethrow_exception_entry = __ pc(); 1664 // O0: exception 1665 1666 // entry point for exceptions thrown within interpreter code 1667 Interpreter::_throw_exception_entry = __ pc(); 1668 __ verify_thread(); 1669 // expression stack is undefined here 1670 // O0: exception, i.e. Oexception 1671 // Lbcp: exception bcp 1672 __ verify_oop(Oexception); 1673 1674 1675 // expression stack must be empty before entering the VM in case of an exception 1676 __ empty_expression_stack(); 1677 // find exception handler address and preserve exception oop 1678 // call C routine to find handler and jump to it 1679 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1680 __ push_ptr(O1); // push exception for exception handler bytecodes 1681 1682 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1683 __ delayed()->nop(); 1684 1685 1686 // if the exception is not handled in the current frame 1687 // the frame is removed and the exception is rethrown 1688 // (i.e. exception continuation is _rethrow_exception) 1689 // 1690 // Note: At this point the bci is still the bxi for the instruction which caused 1691 // the exception and the expression stack is empty. Thus, for any VM calls 1692 // at this point, GC will find a legal oop map (with empty expression stack). 1693 1694 // in current activation 1695 // tos: exception 1696 // Lbcp: exception bcp 1697 1698 // 1699 // JVMTI PopFrame support 1700 // 1701 1702 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1703 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1704 // Set the popframe_processing bit in popframe_condition indicating that we are 1705 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1706 // popframe handling cycles. 1707 1708 __ ld(popframe_condition_addr, G3_scratch); 1709 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1710 __ stw(G3_scratch, popframe_condition_addr); 1711 1712 // Empty the expression stack, as in normal exception handling 1713 __ empty_expression_stack(); 1714 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1715 1716 { 1717 // Check to see whether we are returning to a deoptimized frame. 1718 // (The PopFrame call ensures that the caller of the popped frame is 1719 // either interpreted or compiled and deoptimizes it if compiled.) 1720 // In this case, we can't call dispatch_next() after the frame is 1721 // popped, but instead must save the incoming arguments and restore 1722 // them after deoptimization has occurred. 1723 // 1724 // Note that we don't compare the return PC against the 1725 // deoptimization blob's unpack entry because of the presence of 1726 // adapter frames in C2. 1727 Label caller_not_deoptimized; 1728 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1729 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); 1730 1731 const Register Gtmp1 = G3_scratch; 1732 const Register Gtmp2 = G1_scratch; 1733 const Register RconstMethod = Gtmp1; 1734 const Address constMethod(Lmethod, Method::const_offset()); 1735 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1736 1737 // Compute size of arguments for saving when returning to deoptimized caller 1738 __ ld_ptr(constMethod, RconstMethod); 1739 __ lduh(size_of_parameters, Gtmp1); 1740 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1741 __ sub(Llocals, Gtmp1, Gtmp2); 1742 __ add(Gtmp2, wordSize, Gtmp2); 1743 // Save these arguments 1744 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1745 // Inform deoptimization that it is responsible for restoring these arguments 1746 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1747 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1748 __ st(Gtmp1, popframe_condition_addr); 1749 1750 // Return from the current method 1751 // The caller's SP was adjusted upon method entry to accomodate 1752 // the callee's non-argument locals. Undo that adjustment. 1753 __ ret(); 1754 __ delayed()->restore(I5_savedSP, G0, SP); 1755 1756 __ bind(caller_not_deoptimized); 1757 } 1758 1759 // Clear the popframe condition flag 1760 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1761 1762 // Get out of the current method (how this is done depends on the particular compiler calling 1763 // convention that the interpreter currently follows) 1764 // The caller's SP was adjusted upon method entry to accomodate 1765 // the callee's non-argument locals. Undo that adjustment. 1766 __ restore(I5_savedSP, G0, SP); 1767 // The method data pointer was incremented already during 1768 // call profiling. We have to restore the mdp for the current bcp. 1769 if (ProfileInterpreter) { 1770 __ set_method_data_pointer_for_bcp(); 1771 } 1772 1773 #if INCLUDE_JVMTI 1774 { Label L_done; 1775 1776 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode 1777 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done); 1778 1779 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1780 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1781 1782 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp); 1783 1784 __ br_null(G1_scratch, false, Assembler::pn, L_done); 1785 __ delayed()->nop(); 1786 1787 __ st_ptr(G1_scratch, Lesp, wordSize); 1788 __ bind(L_done); 1789 } 1790 #endif // INCLUDE_JVMTI 1791 1792 // Resume bytecode interpretation at the current bcp 1793 __ dispatch_next(vtos); 1794 // end of JVMTI PopFrame support 1795 1796 Interpreter::_remove_activation_entry = __ pc(); 1797 1798 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1799 __ pop_ptr(Oexception); // get exception 1800 1801 // Intel has the following comment: 1802 //// remove the activation (without doing throws on illegalMonitorExceptions) 1803 // They remove the activation without checking for bad monitor state. 1804 // %%% We should make sure this is the right semantics before implementing. 1805 1806 __ set_vm_result(Oexception); 1807 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1808 1809 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1810 1811 __ get_vm_result(Oexception); 1812 __ verify_oop(Oexception); 1813 1814 const int return_reg_adjustment = frame::pc_return_offset; 1815 Address issuing_pc_addr(I7, return_reg_adjustment); 1816 1817 // We are done with this activation frame; find out where to go next. 1818 // The continuation point will be an exception handler, which expects 1819 // the following registers set up: 1820 // 1821 // Oexception: exception 1822 // Oissuing_pc: the local call that threw exception 1823 // Other On: garbage 1824 // In/Ln: the contents of the caller's register window 1825 // 1826 // We do the required restore at the last possible moment, because we 1827 // need to preserve some state across a runtime call. 1828 // (Remember that the caller activation is unknown--it might not be 1829 // interpreted, so things like Lscratch are useless in the caller.) 1830 1831 // Although the Intel version uses call_C, we can use the more 1832 // compact call_VM. (The only real difference on SPARC is a 1833 // harmlessly ignored [re]set_last_Java_frame, compared with 1834 // the Intel code which lacks this.) 1835 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1836 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1837 __ super_call_VM_leaf(L7_thread_cache, 1838 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1839 G2_thread, Oissuing_pc->after_save()); 1840 1841 // The caller's SP was adjusted upon method entry to accomodate 1842 // the callee's non-argument locals. Undo that adjustment. 1843 __ JMP(O0, 0); // return exception handler in caller 1844 __ delayed()->restore(I5_savedSP, G0, SP); 1845 1846 // (same old exception object is already in Oexception; see above) 1847 // Note that an "issuing PC" is actually the next PC after the call 1848 } 1849 1850 1851 // 1852 // JVMTI ForceEarlyReturn support 1853 // 1854 1855 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1856 address entry = __ pc(); 1857 1858 __ empty_expression_stack(); 1859 __ load_earlyret_value(state); 1860 1861 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1862 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1863 1864 // Clear the earlyret state 1865 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1866 1867 __ remove_activation(state, 1868 /* throw_monitor_exception */ false, 1869 /* install_monitor_exception */ false); 1870 1871 // The caller's SP was adjusted upon method entry to accomodate 1872 // the callee's non-argument locals. Undo that adjustment. 1873 __ ret(); // return to caller 1874 __ delayed()->restore(I5_savedSP, G0, SP); 1875 1876 return entry; 1877 } // end of JVMTI ForceEarlyReturn support 1878 1879 1880 //------------------------------------------------------------------------------------------------------------------------ 1881 // Helper for vtos entry point generation 1882 1883 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1884 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1885 Label L; 1886 aep = __ pc(); __ push_ptr(); __ ba_short(L); 1887 fep = __ pc(); __ push_f(); __ ba_short(L); 1888 dep = __ pc(); __ push_d(); __ ba_short(L); 1889 lep = __ pc(); __ push_l(); __ ba_short(L); 1890 iep = __ pc(); __ push_i(); 1891 bep = cep = sep = iep; // there aren't any 1892 vep = __ pc(); __ bind(L); // fall through 1893 generate_and_dispatch(t); 1894 } 1895 1896 // -------------------------------------------------------------------------------- 1897 1898 // Non-product code 1899 #ifndef PRODUCT 1900 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1901 address entry = __ pc(); 1902 1903 __ push(state); 1904 __ mov(O7, Lscratch); // protect return address within interpreter 1905 1906 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 1907 __ mov( Otos_l2, G3_scratch ); 1908 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 1909 __ mov(Lscratch, O7); // restore return address 1910 __ pop(state); 1911 __ retl(); 1912 __ delayed()->nop(); 1913 1914 return entry; 1915 } 1916 1917 1918 // helpers for generate_and_dispatch 1919 1920 void TemplateInterpreterGenerator::count_bytecode() { 1921 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 1922 } 1923 1924 1925 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1926 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 1927 } 1928 1929 1930 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1931 AddressLiteral index (&BytecodePairHistogram::_index); 1932 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 1933 1934 // get index, shift out old bytecode, bring in new bytecode, and store it 1935 // _index = (_index >> log2_number_of_codes) | 1936 // (bytecode << log2_number_of_codes); 1937 1938 __ load_contents(index, G4_scratch); 1939 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 1940 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 1941 __ or3( G3_scratch, G4_scratch, G4_scratch ); 1942 __ store_contents(G4_scratch, index, G3_scratch); 1943 1944 // bump bucket contents 1945 // _counters[_index] ++; 1946 1947 __ set(counters, G3_scratch); // loads into G3_scratch 1948 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 1949 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 1950 __ ld (G3_scratch, 0, G4_scratch); 1951 __ inc (G4_scratch); 1952 __ st (G4_scratch, 0, G3_scratch); 1953 } 1954 1955 1956 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1957 // Call a little run-time stub to avoid blow-up for each bytecode. 1958 // The run-time runtime saves the right registers, depending on 1959 // the tosca in-state for the given template. 1960 address entry = Interpreter::trace_code(t->tos_in()); 1961 guarantee(entry != NULL, "entry must have been generated"); 1962 __ call(entry, relocInfo::none); 1963 __ delayed()->nop(); 1964 } 1965 1966 1967 void TemplateInterpreterGenerator::stop_interpreter_at() { 1968 AddressLiteral counter(&BytecodeCounter::_counter_value); 1969 __ load_contents(counter, G3_scratch); 1970 AddressLiteral stop_at(&StopInterpreterAt); 1971 __ load_ptr_contents(stop_at, G4_scratch); 1972 __ cmp(G3_scratch, G4_scratch); 1973 __ breakpoint_trap(Assembler::equal, Assembler::icc); 1974 } 1975 #endif // not PRODUCT