1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 // Size of interpreter code. Increase if too small. Interpreter will 51 // fail with a guarantee ("not enough space for interpreter generation"); 52 // if too small. 53 // Run with +PrintInterpreter to get the VM to print out the size. 54 // Max size with JVMTI 55 // The sethi() instruction generates lots more instructions when shell 56 // stack limit is unlimited, so that's why this is much bigger. 57 int TemplateInterpreter::InterpreterCodeSize = 260 * K; 58 59 // Generation of Interpreter 60 // 61 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code. 62 63 64 #define __ _masm-> 65 66 67 //---------------------------------------------------------------------------------------------------- 68 69 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of 70 // O0, O1, O2 etc.. 71 // Doubles are passed in D0, D2, D4 72 // We store the signature of the first 16 arguments in the first argument 73 // slot because it will be overwritten prior to calling the native 74 // function, with the pointer to the JNIEnv. 75 // If LP64 there can be up to 16 floating point arguments in registers 76 // or 6 integer registers. 77 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 78 79 enum { 80 non_float = 0, 81 float_sig = 1, 82 double_sig = 2, 83 sig_mask = 3 84 }; 85 86 address entry = __ pc(); 87 Argument argv(0, true); 88 89 // We are in the jni transition frame. Save the last_java_frame corresponding to the 90 // outer interpreter frame 91 // 92 __ set_last_Java_frame(FP, noreg); 93 // make sure the interpreter frame we've pushed has a valid return pc 94 __ mov(O7, I7); 95 __ mov(Lmethod, G3_scratch); 96 __ mov(Llocals, G4_scratch); 97 __ save_frame(0); 98 __ mov(G2_thread, L7_thread_cache); 99 __ add(argv.address_in_frame(), O3); 100 __ mov(G2_thread, O0); 101 __ mov(G3_scratch, O1); 102 __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); 103 __ delayed()->mov(G4_scratch, O2); 104 __ mov(L7_thread_cache, G2_thread); 105 __ reset_last_Java_frame(); 106 107 108 // load the register arguments (the C code packed them as varargs) 109 Address Sig = argv.address_in_frame(); // Argument 0 holds the signature 110 __ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch 111 __ mov( G3_scratch, G4_scratch); 112 __ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0 113 Label done; 114 for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) { 115 Label NonFloatArg; 116 Label LoadFloatArg; 117 Label LoadDoubleArg; 118 Label NextArg; 119 Address a = ldarg.address_in_frame(); 120 __ andcc(G4_scratch, sig_mask, G3_scratch); 121 __ br(Assembler::zero, false, Assembler::pt, NonFloatArg); 122 __ delayed()->nop(); 123 124 __ cmp(G3_scratch, float_sig ); 125 __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg); 126 __ delayed()->nop(); 127 128 __ cmp(G3_scratch, double_sig ); 129 __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg); 130 __ delayed()->nop(); 131 132 __ bind(NonFloatArg); 133 // There are only 6 integer register arguments! 134 if ( ldarg.is_register() ) 135 __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); 136 else { 137 // Optimization, see if there are any more args and get out prior to checking 138 // all 16 float registers. My guess is that this is rare. 139 // If is_register is false, then we are done the first six integer args. 140 __ br_null_short(G4_scratch, Assembler::pt, done); 141 } 142 __ ba(NextArg); 143 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 144 145 __ bind(LoadFloatArg); 146 __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4); 147 __ ba(NextArg); 148 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 149 150 __ bind(LoadDoubleArg); 151 __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() ); 152 __ ba(NextArg); 153 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 154 155 __ bind(NextArg); 156 } 157 158 __ bind(done); 159 __ ret(); 160 __ delayed()->restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler 161 162 return entry; 163 } 164 165 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) { 166 167 // Generate code to initiate compilation on the counter overflow. 168 169 // InterpreterRuntime::frequency_counter_overflow takes two arguments, 170 // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp) 171 // and the second is only used when the first is true. We pass zero for both. 172 // The call returns the address of the verified entry point for the method or NULL 173 // if the compilation did not complete (either went background or bailed out). 174 __ set((int)false, O2); 175 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true); 176 // returns verified_entry_point or NULL 177 // we ignore it in any case 178 __ ba_short(Lcontinue); 179 } 180 181 182 // End of helpers 183 184 // Various method entries 185 186 // Abstract method entry 187 // Attempt to execute abstract method. Throw exception 188 // 189 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 190 address entry = __ pc(); 191 // abstract method entry 192 // throw exception 193 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 194 // the call_VM checks for exception, so we should never return here. 195 __ should_not_reach_here(); 196 return entry; 197 } 198 199 void TemplateInterpreterGenerator::save_native_result(void) { 200 // result potentially in O0/O1: save it across calls 201 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 202 203 // result potentially in F0/F1: save it across calls 204 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 205 206 // save and restore any potential method result value around the unlocking operation 207 __ stf(FloatRegisterImpl::D, F0, d_tmp); 208 __ stx(O0, l_tmp); 209 } 210 211 void TemplateInterpreterGenerator::restore_native_result(void) { 212 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 213 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 214 215 // Restore any method result value 216 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 217 __ ldx(l_tmp, O0); 218 } 219 220 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 221 assert(!pass_oop || message == NULL, "either oop or message but not both"); 222 address entry = __ pc(); 223 // expression stack must be empty before entering the VM if an exception happened 224 __ empty_expression_stack(); 225 // load exception object 226 __ set((intptr_t)name, G3_scratch); 227 if (pass_oop) { 228 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 229 } else { 230 __ set((intptr_t)message, G4_scratch); 231 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 232 } 233 // throw exception 234 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 235 AddressLiteral thrower(Interpreter::throw_exception_entry()); 236 __ jump_to(thrower, G3_scratch); 237 __ delayed()->nop(); 238 return entry; 239 } 240 241 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 242 address entry = __ pc(); 243 // expression stack must be empty before entering the VM if an exception 244 // happened 245 __ empty_expression_stack(); 246 // load exception object 247 __ call_VM(Oexception, 248 CAST_FROM_FN_PTR(address, 249 InterpreterRuntime::throw_ClassCastException), 250 Otos_i); 251 __ should_not_reach_here(); 252 return entry; 253 } 254 255 256 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 257 address entry = __ pc(); 258 // expression stack must be empty before entering the VM if an exception happened 259 __ empty_expression_stack(); 260 // convention: expect aberrant index in register G3_scratch, then shuffle the 261 // index to G4_scratch for the VM call 262 __ mov(G3_scratch, G4_scratch); 263 __ set((intptr_t)name, G3_scratch); 264 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 265 __ should_not_reach_here(); 266 return entry; 267 } 268 269 270 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 271 address entry = __ pc(); 272 // expression stack must be empty before entering the VM if an exception happened 273 __ empty_expression_stack(); 274 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 275 __ should_not_reach_here(); 276 return entry; 277 } 278 279 280 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 281 address entry = __ pc(); 282 283 if (state == atos) { 284 __ profile_return_type(O0, G3_scratch, G1_scratch); 285 } 286 287 // The callee returns with the stack possibly adjusted by adapter transition 288 // We remove that possible adjustment here. 289 // All interpreter local registers are untouched. Any result is passed back 290 // in the O0/O1 or float registers. Before continuing, the arguments must be 291 // popped from the java expression stack; i.e., Lesp must be adjusted. 292 293 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 294 295 const Register cache = G3_scratch; 296 const Register index = G1_scratch; 297 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 298 299 const Register flags = cache; 300 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); 301 const Register parameter_size = flags; 302 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words 303 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes 304 __ add(Lesp, parameter_size, Lesp); // pop arguments 305 306 __ check_and_handle_popframe(Gtemp); 307 __ check_and_handle_earlyret(Gtemp); 308 309 __ dispatch_next(state, step); 310 311 return entry; 312 } 313 314 315 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 316 address entry = __ pc(); 317 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 318 #if INCLUDE_JVMCI 319 // Check if we need to take lock at entry of synchronized method. This can 320 // only occur on method entry so emit it only for vtos with step 0. 321 if (UseJVMCICompiler && state == vtos && step == 0) { 322 Label L; 323 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 324 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 325 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 326 // Clear flag. 327 __ stbool(G0, pending_monitor_enter_addr); 328 // Take lock. 329 lock_method(); 330 __ bind(L); 331 } else { 332 #ifdef ASSERT 333 if (UseJVMCICompiler) { 334 Label L; 335 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 336 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 337 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 338 __ stop("unexpected pending monitor in deopt entry"); 339 __ bind(L); 340 } 341 #endif 342 } 343 #endif 344 { Label L; 345 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 346 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 347 __ br_null_short(Gtemp, Assembler::pt, L); 348 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 349 __ should_not_reach_here(); 350 __ bind(L); 351 } 352 __ dispatch_next(state, step); 353 return entry; 354 } 355 356 // A result handler converts/unboxes a native call result into 357 // a java interpreter/compiler result. The current frame is an 358 // interpreter frame. The activation frame unwind code must be 359 // consistent with that of TemplateTable::_return(...). In the 360 // case of native methods, the caller's SP was not modified. 361 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 362 address entry = __ pc(); 363 Register Itos_i = Otos_i ->after_save(); 364 Register Itos_l = Otos_l ->after_save(); 365 Register Itos_l1 = Otos_l1->after_save(); 366 Register Itos_l2 = Otos_l2->after_save(); 367 switch (type) { 368 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 369 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 370 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 371 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 372 case T_LONG : 373 case T_INT : __ mov(O0, Itos_i); break; 374 case T_VOID : /* nothing to do */ break; 375 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 376 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 377 case T_OBJECT : 378 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 379 __ verify_oop(Itos_i); 380 break; 381 default : ShouldNotReachHere(); 382 } 383 __ ret(); // return from interpreter activation 384 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 385 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly 386 return entry; 387 } 388 389 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 390 address entry = __ pc(); 391 __ push(state); 392 __ call_VM(noreg, runtime_entry); 393 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 394 return entry; 395 } 396 397 398 // 399 // Helpers for commoning out cases in the various type of method entries. 400 // 401 402 // increment invocation count & check for overflow 403 // 404 // Note: checking for negative value instead of overflow 405 // so we have a 'sticky' overflow test 406 // 407 // Lmethod: method 408 // ??: invocation counter 409 // 410 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 411 // Note: In tiered we increment either counters in MethodCounters* or in 412 // MDO depending if we're profiling or not. 413 const Register G3_method_counters = G3_scratch; 414 Label done; 415 416 if (TieredCompilation) { 417 const int increment = InvocationCounter::count_increment; 418 Label no_mdo; 419 if (ProfileInterpreter) { 420 // If no method data exists, go to profile_continue. 421 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 422 __ br_null_short(G4_scratch, Assembler::pn, no_mdo); 423 // Increment counter 424 Address mdo_invocation_counter(G4_scratch, 425 in_bytes(MethodData::invocation_counter_offset()) + 426 in_bytes(InvocationCounter::counter_offset())); 427 Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset())); 428 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 429 G3_scratch, Lscratch, 430 Assembler::zero, overflow); 431 __ ba_short(done); 432 } 433 434 // Increment counter in MethodCounters* 435 __ bind(no_mdo); 436 Address invocation_counter(G3_method_counters, 437 in_bytes(MethodCounters::invocation_counter_offset()) + 438 in_bytes(InvocationCounter::counter_offset())); 439 __ get_method_counters(Lmethod, G3_method_counters, done); 440 Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset())); 441 __ increment_mask_and_jump(invocation_counter, increment, mask, 442 G4_scratch, Lscratch, 443 Assembler::zero, overflow); 444 __ bind(done); 445 } else { // not TieredCompilation 446 // Update standard invocation counters 447 __ get_method_counters(Lmethod, G3_method_counters, done); 448 __ increment_invocation_counter(G3_method_counters, O0, G4_scratch); 449 if (ProfileInterpreter) { 450 Address interpreter_invocation_counter(G3_method_counters, 451 in_bytes(MethodCounters::interpreter_invocation_counter_offset())); 452 __ ld(interpreter_invocation_counter, G4_scratch); 453 __ inc(G4_scratch); 454 __ st(G4_scratch, interpreter_invocation_counter); 455 } 456 457 if (ProfileInterpreter && profile_method != NULL) { 458 // Test to see if we should create a method data oop 459 Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 460 __ ld(profile_limit, G1_scratch); 461 __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); 462 463 // if no method data exists, go to profile_method 464 __ test_method_data_pointer(*profile_method); 465 } 466 467 Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 468 __ ld(invocation_limit, G3_scratch); 469 __ cmp(O0, G3_scratch); 470 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance 471 __ delayed()->nop(); 472 __ bind(done); 473 } 474 } 475 476 // Allocate monitor and lock method (asm interpreter) 477 // ebx - Method* 478 // 479 void TemplateInterpreterGenerator::lock_method() { 480 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags. 481 482 #ifdef ASSERT 483 { Label ok; 484 __ btst(JVM_ACC_SYNCHRONIZED, O0); 485 __ br( Assembler::notZero, false, Assembler::pt, ok); 486 __ delayed()->nop(); 487 __ stop("method doesn't need synchronization"); 488 __ bind(ok); 489 } 490 #endif // ASSERT 491 492 // get synchronization object to O0 493 { Label done; 494 __ btst(JVM_ACC_STATIC, O0); 495 __ br( Assembler::zero, true, Assembler::pt, done); 496 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 497 498 // lock the mirror, not the Klass* 499 __ load_mirror(O0, Lmethod); 500 501 #ifdef ASSERT 502 __ tst(O0); 503 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 504 #endif // ASSERT 505 506 __ bind(done); 507 } 508 509 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 510 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 511 // __ untested("lock_object from method entry"); 512 __ lock_object(Lmonitors, O0); 513 } 514 515 // See if we've got enough room on the stack for locals plus overhead below 516 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 517 // without going through the signal handler, i.e., reserved and yellow zones 518 // will not be made usable. The shadow zone must suffice to handle the 519 // overflow. 520 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 521 Register Rscratch) { 522 const int page_size = os::vm_page_size(); 523 Label after_frame_check; 524 525 assert_different_registers(Rframe_size, Rscratch); 526 527 __ set(page_size, Rscratch); 528 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); 529 530 // Get the stack overflow limit, and in debug, verify it is non-zero. 531 __ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch); 532 #ifdef ASSERT 533 Label limit_ok; 534 __ br_notnull_short(Rscratch, Assembler::pn, limit_ok); 535 __ stop("stack overflow limit is zero in generate_stack_overflow_check"); 536 __ bind(limit_ok); 537 #endif 538 539 // Add in the size of the frame (which is the same as subtracting it from the 540 // SP, which would take another register. 541 __ add(Rscratch, Rframe_size, Rscratch); 542 543 // The frame is greater than one page in size, so check against 544 // the bottom of the stack. 545 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check); 546 547 // The stack will overflow, throw an exception. 548 549 // Note that SP is restored to sender's sp (in the delay slot). This 550 // is necessary if the sender's frame is an extended compiled frame 551 // (see gen_c2i_adapter()) and safer anyway in case of JSR292 552 // adaptations. 553 554 // Note also that the restored frame is not necessarily interpreted. 555 // Use the shared runtime version of the StackOverflowError. 556 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 557 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); 558 __ jump_to(stub, Rscratch); 559 __ delayed()->mov(O5_savedSP, SP); 560 561 // If you get to here, then there is enough stack space. 562 __ bind(after_frame_check); 563 } 564 565 566 // 567 // Generate a fixed interpreter frame. This is identical setup for interpreted 568 // methods and for native methods hence the shared code. 569 570 571 //---------------------------------------------------------------------------------------------------- 572 // Stack frame layout 573 // 574 // When control flow reaches any of the entry types for the interpreter 575 // the following holds -> 576 // 577 // C2 Calling Conventions: 578 // 579 // The entry code below assumes that the following registers are set 580 // when coming in: 581 // G5_method: holds the Method* of the method to call 582 // Lesp: points to the TOS of the callers expression stack 583 // after having pushed all the parameters 584 // 585 // The entry code does the following to setup an interpreter frame 586 // pop parameters from the callers stack by adjusting Lesp 587 // set O0 to Lesp 588 // compute X = (max_locals - num_parameters) 589 // bump SP up by X to accommodate the extra locals 590 // compute X = max_expression_stack 591 // + vm_local_words 592 // + 16 words of register save area 593 // save frame doing a save sp, -X, sp growing towards lower addresses 594 // set Lbcp, Lmethod, LcpoolCache 595 // set Llocals to i0 596 // set Lmonitors to FP - rounded_vm_local_words 597 // set Lesp to Lmonitors - 4 598 // 599 // The frame has now been setup to do the rest of the entry code 600 601 // Try this optimization: Most method entries could live in a 602 // "one size fits all" stack frame without all the dynamic size 603 // calculations. It might be profitable to do all this calculation 604 // statically and approximately for "small enough" methods. 605 606 //----------------------------------------------------------------------------------------------- 607 608 // C1 Calling conventions 609 // 610 // Upon method entry, the following registers are setup: 611 // 612 // g2 G2_thread: current thread 613 // g5 G5_method: method to activate 614 // g4 Gargs : pointer to last argument 615 // 616 // 617 // Stack: 618 // 619 // +---------------+ <--- sp 620 // | | 621 // : reg save area : 622 // | | 623 // +---------------+ <--- sp + 0x40 624 // | | 625 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 626 // | | 627 // +---------------+ <--- sp + 0x5c 628 // | | 629 // : free : 630 // | | 631 // +---------------+ <--- Gargs 632 // | | 633 // : arguments : 634 // | | 635 // +---------------+ 636 // | | 637 // 638 // 639 // 640 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 641 // 642 // +---------------+ <--- sp 643 // | | 644 // : reg save area : 645 // | | 646 // +---------------+ <--- sp + 0x40 647 // | | 648 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 649 // | | 650 // +---------------+ <--- sp + 0x5c 651 // | | 652 // : : 653 // | | <--- Lesp 654 // +---------------+ <--- Lmonitors (fp - 0x18) 655 // | VM locals | 656 // +---------------+ <--- fp 657 // | | 658 // : reg save area : 659 // | | 660 // +---------------+ <--- fp + 0x40 661 // | | 662 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 663 // | | 664 // +---------------+ <--- fp + 0x5c 665 // | | 666 // : free : 667 // | | 668 // +---------------+ 669 // | | 670 // : nonarg locals : 671 // | | 672 // +---------------+ 673 // | | 674 // : arguments : 675 // | | <--- Llocals 676 // +---------------+ <--- Gargs 677 // | | 678 679 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 680 // 681 // 682 // The entry code sets up a new interpreter frame in 4 steps: 683 // 684 // 1) Increase caller's SP by for the extra local space needed: 685 // (check for overflow) 686 // Efficient implementation of xload/xstore bytecodes requires 687 // that arguments and non-argument locals are in a contiguously 688 // addressable memory block => non-argument locals must be 689 // allocated in the caller's frame. 690 // 691 // 2) Create a new stack frame and register window: 692 // The new stack frame must provide space for the standard 693 // register save area, the maximum java expression stack size, 694 // the monitor slots (0 slots initially), and some frame local 695 // scratch locations. 696 // 697 // 3) The following interpreter activation registers must be setup: 698 // Lesp : expression stack pointer 699 // Lbcp : bytecode pointer 700 // Lmethod : method 701 // Llocals : locals pointer 702 // Lmonitors : monitor pointer 703 // LcpoolCache: constant pool cache 704 // 705 // 4) Initialize the non-argument locals if necessary: 706 // Non-argument locals may need to be initialized to NULL 707 // for GC to work. If the oop-map information is accurate 708 // (in the absence of the JSR problem), no initialization 709 // is necessary. 710 // 711 // (gri - 2/25/2000) 712 713 714 int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong ); 715 716 const int extra_space = 717 rounded_vm_local_words + // frame local scratch space 718 Method::extra_stack_entries() + // extra stack for jsr 292 719 frame::memory_parameter_word_sp_offset + // register save area 720 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 721 722 const Register Glocals_size = G3; 723 const Register RconstMethod = Glocals_size; 724 const Register Otmp1 = O3; 725 const Register Otmp2 = O4; 726 // Lscratch can't be used as a temporary because the call_stub uses 727 // it to assert that the stack frame was setup correctly. 728 const Address constMethod (G5_method, Method::const_offset()); 729 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 730 731 __ ld_ptr( constMethod, RconstMethod ); 732 __ lduh( size_of_parameters, Glocals_size); 733 734 // Gargs points to first local + BytesPerWord 735 // Set the saved SP after the register window save 736 // 737 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 738 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 739 __ add(Gargs, Otmp1, Gargs); 740 741 if (native_call) { 742 __ calc_mem_param_words( Glocals_size, Gframe_size ); 743 __ add( Gframe_size, extra_space, Gframe_size); 744 __ round_to( Gframe_size, WordsPerLong ); 745 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 746 747 // Native calls don't need the stack size check since they have no 748 // expression stack and the arguments are already on the stack and 749 // we only add a handful of words to the stack. 750 } else { 751 752 // 753 // Compute number of locals in method apart from incoming parameters 754 // 755 const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset()); 756 __ ld_ptr(constMethod, Otmp1); 757 __ lduh(size_of_locals, Otmp1); 758 __ sub(Otmp1, Glocals_size, Glocals_size); 759 __ round_to(Glocals_size, WordsPerLong); 760 __ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size); 761 762 // See if the frame is greater than one page in size. If so, 763 // then we need to verify there is enough stack space remaining. 764 // Frame_size = (max_stack + extra_space) * BytesPerWord; 765 __ ld_ptr(constMethod, Gframe_size); 766 __ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); 767 __ add(Gframe_size, extra_space, Gframe_size); 768 __ round_to(Gframe_size, WordsPerLong); 769 __ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size); 770 771 // Add in java locals size for stack overflow check only 772 __ add(Gframe_size, Glocals_size, Gframe_size); 773 774 const Register Otmp2 = O4; 775 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 776 generate_stack_overflow_check(Gframe_size, Otmp1); 777 778 __ sub(Gframe_size, Glocals_size, Gframe_size); 779 780 // 781 // bump SP to accommodate the extra locals 782 // 783 __ sub(SP, Glocals_size, SP); 784 } 785 786 // 787 // now set up a stack frame with the size computed above 788 // 789 __ neg( Gframe_size ); 790 __ save( SP, Gframe_size, SP ); 791 792 // 793 // now set up all the local cache registers 794 // 795 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 796 // that all present references to Lbyte_code initialize the register 797 // immediately before use 798 if (native_call) { 799 __ mov(G0, Lbcp); 800 } else { 801 __ ld_ptr(G5_method, Method::const_offset(), Lbcp); 802 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp); 803 } 804 __ mov( G5_method, Lmethod); // set Lmethod 805 // Get mirror and store it in the frame as GC root for this Method* 806 Register mirror = LcpoolCache; 807 __ load_mirror(mirror, Lmethod); 808 __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS); 809 __ get_constant_pool_cache(LcpoolCache); // set LcpoolCache 810 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 811 __ add(Lmonitors, STACK_BIAS, Lmonitors); // Account for 64 bit stack bias 812 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 813 814 // setup interpreter activation registers 815 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 816 817 if (ProfileInterpreter) { 818 __ set_method_data_pointer(); 819 } 820 821 } 822 823 // Method entry for java.lang.ref.Reference.get. 824 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 825 #if INCLUDE_ALL_GCS 826 // Code: _aload_0, _getfield, _areturn 827 // parameter size = 1 828 // 829 // The code that gets generated by this routine is split into 2 parts: 830 // 1. The "intrinsified" code for G1 (or any SATB based GC), 831 // 2. The slow path - which is an expansion of the regular method entry. 832 // 833 // Notes:- 834 // * In the G1 code we do not check whether we need to block for 835 // a safepoint. If G1 is enabled then we must execute the specialized 836 // code for Reference.get (except when the Reference object is null) 837 // so that we can log the value in the referent field with an SATB 838 // update buffer. 839 // If the code for the getfield template is modified so that the 840 // G1 pre-barrier code is executed when the current method is 841 // Reference.get() then going through the normal method entry 842 // will be fine. 843 // * The G1 code can, however, check the receiver object (the instance 844 // of java.lang.Reference) and jump to the slow path if null. If the 845 // Reference object is null then we obviously cannot fetch the referent 846 // and so we don't need to call the G1 pre-barrier. Thus we can use the 847 // regular method entry code to generate the NPE. 848 // 849 // This code is based on generate_accessor_enty. 850 851 address entry = __ pc(); 852 853 const int referent_offset = java_lang_ref_Reference::referent_offset; 854 guarantee(referent_offset > 0, "referent offset not initialized"); 855 856 if (UseG1GC) { 857 Label slow_path; 858 859 // In the G1 code we don't check if we need to reach a safepoint. We 860 // continue and the thread will safepoint at the next bytecode dispatch. 861 862 // Check if local 0 != NULL 863 // If the receiver is null then it is OK to jump to the slow path. 864 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 865 // check if local 0 == NULL and go the slow path 866 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); 867 868 869 // Load the value of the referent field. 870 if (Assembler::is_simm13(referent_offset)) { 871 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 872 } else { 873 __ set(referent_offset, G3_scratch); 874 __ load_heap_oop(Otos_i, G3_scratch, Otos_i); 875 } 876 877 // Generate the G1 pre-barrier code to log the value of 878 // the referent field in an SATB buffer. Note with 879 // these parameters the pre-barrier does not generate 880 // the load of the previous value 881 882 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, 883 Otos_i /* pre_val */, 884 G3_scratch /* tmp */, 885 true /* preserve_o_regs */); 886 887 // _areturn 888 __ retl(); // return from leaf routine 889 __ delayed()->mov(O5_savedSP, SP); 890 891 // Generate regular method entry 892 __ bind(slow_path); 893 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 894 return entry; 895 } 896 #endif // INCLUDE_ALL_GCS 897 898 // If G1 is not enabled then attempt to go through the accessor entry point 899 // Reference.get is an accessor 900 return NULL; 901 } 902 903 /** 904 * Method entry for static native methods: 905 * int java.util.zip.CRC32.update(int crc, int b) 906 */ 907 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 908 909 if (UseCRC32Intrinsics) { 910 address entry = __ pc(); 911 912 Label L_slow_path; 913 // If we need a safepoint check, generate full interpreter entry. 914 ExternalAddress state(SafepointSynchronize::address_of_state()); 915 __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); 916 __ set(SafepointSynchronize::_not_synchronized, O3); 917 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path); 918 919 // Load parameters 920 const Register crc = O0; // initial crc 921 const Register val = O1; // byte to update with 922 const Register table = O2; // address of 256-entry lookup table 923 924 __ ldub(Gargs, 3, val); 925 __ lduw(Gargs, 8, crc); 926 927 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 928 929 __ not1(crc); // ~crc 930 __ clruwu(crc); 931 __ update_byte_crc32(crc, val, table); 932 __ not1(crc); // ~crc 933 934 // result in O0 935 __ retl(); 936 __ delayed()->nop(); 937 938 // generate a vanilla native entry as the slow path 939 __ bind(L_slow_path); 940 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 941 return entry; 942 } 943 return NULL; 944 } 945 946 /** 947 * Method entry for static native methods: 948 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 949 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 950 */ 951 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 952 953 if (UseCRC32Intrinsics) { 954 address entry = __ pc(); 955 956 Label L_slow_path; 957 // If we need a safepoint check, generate full interpreter entry. 958 ExternalAddress state(SafepointSynchronize::address_of_state()); 959 __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); 960 __ set(SafepointSynchronize::_not_synchronized, O3); 961 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path); 962 963 // Load parameters from the stack 964 const Register crc = O0; // initial crc 965 const Register buf = O1; // source java byte array address 966 const Register len = O2; // len 967 const Register offset = O3; // offset 968 969 // Arguments are reversed on java expression stack 970 // Calculate address of start element 971 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 972 __ lduw(Gargs, 0, len); 973 __ lduw(Gargs, 8, offset); 974 __ ldx( Gargs, 16, buf); 975 __ lduw(Gargs, 32, crc); 976 __ add(buf, offset, buf); 977 } else { 978 __ lduw(Gargs, 0, len); 979 __ lduw(Gargs, 8, offset); 980 __ ldx( Gargs, 16, buf); 981 __ lduw(Gargs, 24, crc); 982 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 983 __ add(buf, offset, buf); 984 } 985 986 // Call the crc32 kernel 987 __ MacroAssembler::save_thread(L7_thread_cache); 988 __ kernel_crc32(crc, buf, len, O3); 989 __ MacroAssembler::restore_thread(L7_thread_cache); 990 991 // result in O0 992 __ retl(); 993 __ delayed()->nop(); 994 995 // generate a vanilla native entry as the slow path 996 __ bind(L_slow_path); 997 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 998 return entry; 999 } 1000 return NULL; 1001 } 1002 1003 /** 1004 * Method entry for intrinsic-candidate (non-native) methods: 1005 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 1006 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 1007 * Unlike CRC32, CRC32C does not have any methods marked as native 1008 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 1009 */ 1010 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1011 1012 if (UseCRC32CIntrinsics) { 1013 address entry = __ pc(); 1014 1015 // Load parameters from the stack 1016 const Register crc = O0; // initial crc 1017 const Register buf = O1; // source java byte array address 1018 const Register offset = O2; // offset 1019 const Register end = O3; // index of last element to process 1020 const Register len = O2; // len argument to the kernel 1021 const Register table = O3; // crc32c lookup table address 1022 1023 // Arguments are reversed on java expression stack 1024 // Calculate address of start element 1025 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { 1026 __ lduw(Gargs, 0, end); 1027 __ lduw(Gargs, 8, offset); 1028 __ ldx( Gargs, 16, buf); 1029 __ lduw(Gargs, 32, crc); 1030 __ add(buf, offset, buf); 1031 __ sub(end, offset, len); 1032 } else { 1033 __ lduw(Gargs, 0, end); 1034 __ lduw(Gargs, 8, offset); 1035 __ ldx( Gargs, 16, buf); 1036 __ lduw(Gargs, 24, crc); 1037 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 1038 __ add(buf, offset, buf); 1039 __ sub(end, offset, len); 1040 } 1041 1042 // Call the crc32c kernel 1043 __ MacroAssembler::save_thread(L7_thread_cache); 1044 __ kernel_crc32c(crc, buf, len, table); 1045 __ MacroAssembler::restore_thread(L7_thread_cache); 1046 1047 // result in O0 1048 __ retl(); 1049 __ delayed()->nop(); 1050 1051 return entry; 1052 } 1053 return NULL; 1054 } 1055 1056 /* Math routines only partially supported. 1057 * 1058 * Providing support for fma (float/double) only. 1059 */ 1060 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) 1061 { 1062 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 1063 1064 address entry = __ pc(); 1065 1066 switch (kind) { 1067 case Interpreter::java_lang_math_fmaF: 1068 if (UseFMA) { 1069 // float .fma(float a, float b, float c) 1070 const FloatRegister ra = F1; 1071 const FloatRegister rb = F2; 1072 const FloatRegister rc = F3; 1073 const FloatRegister rd = F0; // Result. 1074 1075 __ ldf(FloatRegisterImpl::S, Gargs, 0, rc); 1076 __ ldf(FloatRegisterImpl::S, Gargs, 8, rb); 1077 __ ldf(FloatRegisterImpl::S, Gargs, 16, ra); 1078 1079 __ fmadd(FloatRegisterImpl::S, ra, rb, rc, rd); 1080 __ retl(); // Result in F0 (rd). 1081 __ delayed()->mov(O5_savedSP, SP); 1082 1083 return entry; 1084 } 1085 break; 1086 case Interpreter::java_lang_math_fmaD: 1087 if (UseFMA) { 1088 // double .fma(double a, double b, double c) 1089 const FloatRegister ra = F2; // D1 1090 const FloatRegister rb = F4; // D2 1091 const FloatRegister rc = F6; // D3 1092 const FloatRegister rd = F0; // D0 Result. 1093 1094 __ ldf(FloatRegisterImpl::D, Gargs, 0, rc); 1095 __ ldf(FloatRegisterImpl::D, Gargs, 16, rb); 1096 __ ldf(FloatRegisterImpl::D, Gargs, 32, ra); 1097 1098 __ fmadd(FloatRegisterImpl::D, ra, rb, rc, rd); 1099 __ retl(); // Result in D0 (rd). 1100 __ delayed()->mov(O5_savedSP, SP); 1101 1102 return entry; 1103 } 1104 break; 1105 default: 1106 break; 1107 } 1108 return NULL; 1109 } 1110 1111 // TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to 1112 // generate exception 1113 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1114 // Quick & dirty stack overflow checking: bang the stack & handle trap. 1115 // Note that we do the banging after the frame is setup, since the exception 1116 // handling code expects to find a valid interpreter frame on the stack. 1117 // Doing the banging earlier fails if the caller frame is not an interpreter 1118 // frame. 1119 // (Also, the exception throwing code expects to unlock any synchronized 1120 // method receiver, so do the banging after locking the receiver.) 1121 1122 // Bang each page in the shadow zone. We can't assume it's been done for 1123 // an interpreter frame with greater than a page of locals, so each page 1124 // needs to be checked. Only true for non-native. 1125 if (UseStackBanging) { 1126 const int page_size = os::vm_page_size(); 1127 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 1128 const int start_page = native_call ? n_shadow_pages : 1; 1129 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 1130 __ bang_stack_with_offset(pages*page_size); 1131 } 1132 } 1133 } 1134 1135 // 1136 // Interpreter stub for calling a native method. (asm interpreter) 1137 // This sets up a somewhat different looking stack for calling the native method 1138 // than the typical interpreter frame setup. 1139 // 1140 1141 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1142 address entry = __ pc(); 1143 1144 // the following temporary registers are used during frame creation 1145 const Register Gtmp1 = G3_scratch ; 1146 const Register Gtmp2 = G1_scratch; 1147 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1148 1149 // make sure registers are different! 1150 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1151 1152 const Address Laccess_flags(Lmethod, Method::access_flags_offset()); 1153 1154 const Register Glocals_size = G3; 1155 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1156 1157 // make sure method is native & not abstract 1158 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1159 #ifdef ASSERT 1160 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1161 { Label L; 1162 __ btst(JVM_ACC_NATIVE, Gtmp1); 1163 __ br(Assembler::notZero, false, Assembler::pt, L); 1164 __ delayed()->nop(); 1165 __ stop("tried to execute non-native method as native"); 1166 __ bind(L); 1167 } 1168 { Label L; 1169 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1170 __ br(Assembler::zero, false, Assembler::pt, L); 1171 __ delayed()->nop(); 1172 __ stop("tried to execute abstract method as non-abstract"); 1173 __ bind(L); 1174 } 1175 #endif // ASSERT 1176 1177 // generate the code to allocate the interpreter stack frame 1178 generate_fixed_frame(true); 1179 1180 // 1181 // No locals to initialize for native method 1182 // 1183 1184 // this slot will be set later, we initialize it to null here just in 1185 // case we get a GC before the actual value is stored later 1186 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 1187 1188 const Address do_not_unlock_if_synchronized(G2_thread, 1189 JavaThread::do_not_unlock_if_synchronized_offset()); 1190 // Since at this point in the method invocation the exception handler 1191 // would try to exit the monitor of synchronized methods which hasn't 1192 // been entered yet, we set the thread local variable 1193 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1194 // runtime, exception handling i.e. unlock_if_synchronized_method will 1195 // check this thread local flag. 1196 // This flag has two effects, one is to force an unwind in the topmost 1197 // interpreter frame and not perform an unlock while doing so. 1198 1199 __ movbool(true, G3_scratch); 1200 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1201 1202 // increment invocation counter and check for overflow 1203 // 1204 // Note: checking for negative value instead of overflow 1205 // so we have a 'sticky' overflow test (may be of 1206 // importance as soon as we have true MT/MP) 1207 Label invocation_counter_overflow; 1208 Label Lcontinue; 1209 if (inc_counter) { 1210 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1211 1212 } 1213 __ bind(Lcontinue); 1214 1215 bang_stack_shadow_pages(true); 1216 1217 // reset the _do_not_unlock_if_synchronized flag 1218 __ stbool(G0, do_not_unlock_if_synchronized); 1219 1220 // check for synchronized methods 1221 // Must happen AFTER invocation_counter check and stack overflow check, 1222 // so method is not locked if overflows. 1223 1224 if (synchronized) { 1225 lock_method(); 1226 } else { 1227 #ifdef ASSERT 1228 { Label ok; 1229 __ ld(Laccess_flags, O0); 1230 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1231 __ br( Assembler::zero, false, Assembler::pt, ok); 1232 __ delayed()->nop(); 1233 __ stop("method needs synchronization"); 1234 __ bind(ok); 1235 } 1236 #endif // ASSERT 1237 } 1238 1239 1240 // start execution 1241 __ verify_thread(); 1242 1243 // JVMTI support 1244 __ notify_method_entry(); 1245 1246 // native call 1247 1248 // (note that O0 is never an oop--at most it is a handle) 1249 // It is important not to smash any handles created by this call, 1250 // until any oop handle in O0 is dereferenced. 1251 1252 // (note that the space for outgoing params is preallocated) 1253 1254 // get signature handler 1255 { Label L; 1256 Address signature_handler(Lmethod, Method::signature_handler_offset()); 1257 __ ld_ptr(signature_handler, G3_scratch); 1258 __ br_notnull_short(G3_scratch, Assembler::pt, L); 1259 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 1260 __ ld_ptr(signature_handler, G3_scratch); 1261 __ bind(L); 1262 } 1263 1264 // Push a new frame so that the args will really be stored in 1265 // Copy a few locals across so the new frame has the variables 1266 // we need but these values will be dead at the jni call and 1267 // therefore not gc volatile like the values in the current 1268 // frame (Lmethod in particular) 1269 1270 // Flush the method pointer to the register save area 1271 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 1272 __ mov(Llocals, O1); 1273 1274 // calculate where the mirror handle body is allocated in the interpreter frame: 1275 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 1276 1277 // Calculate current frame size 1278 __ sub(SP, FP, O3); // Calculate negative of current frame size 1279 __ save(SP, O3, SP); // Allocate an identical sized frame 1280 1281 // Note I7 has leftover trash. Slow signature handler will fill it in 1282 // should we get there. Normal jni call will set reasonable last_Java_pc 1283 // below (and fix I7 so the stack trace doesn't have a meaningless frame 1284 // in it). 1285 1286 // Load interpreter frame's Lmethod into same register here 1287 1288 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1289 1290 __ mov(I1, Llocals); 1291 __ mov(I2, Lscratch2); // save the address of the mirror 1292 1293 1294 // ONLY Lmethod and Llocals are valid here! 1295 1296 // call signature handler, It will move the arg properly since Llocals in current frame 1297 // matches that in outer frame 1298 1299 __ callr(G3_scratch, 0); 1300 __ delayed()->nop(); 1301 1302 // Result handler is in Lscratch 1303 1304 // Reload interpreter frame's Lmethod since slow signature handler may block 1305 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1306 1307 { Label not_static; 1308 1309 __ ld(Laccess_flags, O0); 1310 __ btst(JVM_ACC_STATIC, O0); 1311 __ br( Assembler::zero, false, Assembler::pt, not_static); 1312 // get native function entry point(O0 is a good temp until the very end) 1313 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0); 1314 // for static methods insert the mirror argument 1315 __ load_mirror(O1, Lmethod); 1316 #ifdef ASSERT 1317 if (!PrintSignatureHandlers) // do not dirty the output with this 1318 { Label L; 1319 __ br_notnull_short(O1, Assembler::pt, L); 1320 __ stop("mirror is missing"); 1321 __ bind(L); 1322 } 1323 #endif // ASSERT 1324 __ st_ptr(O1, Lscratch2, 0); 1325 __ mov(Lscratch2, O1); 1326 __ bind(not_static); 1327 } 1328 1329 // At this point, arguments have been copied off of stack into 1330 // their JNI positions, which are O1..O5 and SP[68..]. 1331 // Oops are boxed in-place on the stack, with handles copied to arguments. 1332 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1333 1334 #ifdef ASSERT 1335 { Label L; 1336 __ br_notnull_short(O0, Assembler::pt, L); 1337 __ stop("native entry point is missing"); 1338 __ bind(L); 1339 } 1340 #endif // ASSERT 1341 1342 // 1343 // setup the frame anchor 1344 // 1345 // The scavenge function only needs to know that the PC of this frame is 1346 // in the interpreter method entry code, it doesn't need to know the exact 1347 // PC and hence we can use O7 which points to the return address from the 1348 // previous call in the code stream (signature handler function) 1349 // 1350 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1351 // we have pushed the extra frame in order to protect the volatile register(s) 1352 // in that frame when we return from the jni call 1353 // 1354 1355 __ set_last_Java_frame(FP, O7); 1356 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1357 // not meaningless information that'll confuse me. 1358 1359 // flush the windows now. We don't care about the current (protection) frame 1360 // only the outer frames 1361 1362 __ flushw(); 1363 1364 // mark windows as flushed 1365 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1366 __ set(JavaFrameAnchor::flushed, G3_scratch); 1367 __ st(G3_scratch, flags); 1368 1369 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1370 1371 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1372 #ifdef ASSERT 1373 { Label L; 1374 __ ld(thread_state, G3_scratch); 1375 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); 1376 __ stop("Wrong thread state in native stub"); 1377 __ bind(L); 1378 } 1379 #endif // ASSERT 1380 __ set(_thread_in_native, G3_scratch); 1381 __ st(G3_scratch, thread_state); 1382 1383 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1384 __ save_thread(L7_thread_cache); // save Gthread 1385 __ callr(O0, 0); 1386 __ delayed()-> 1387 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1388 1389 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1390 1391 __ restore_thread(L7_thread_cache); // restore G2_thread 1392 __ reinit_heapbase(); 1393 1394 // must we block? 1395 1396 // Block, if necessary, before resuming in _thread_in_Java state. 1397 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1398 { Label no_block; 1399 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 1400 1401 // Switch thread to "native transition" state before reading the synchronization state. 1402 // This additional state is necessary because reading and testing the synchronization 1403 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1404 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1405 // VM thread changes sync state to synchronizing and suspends threads for GC. 1406 // Thread A is resumed to finish this native method, but doesn't block here since it 1407 // didn't see any synchronization is progress, and escapes. 1408 __ set(_thread_in_native_trans, G3_scratch); 1409 __ st(G3_scratch, thread_state); 1410 if (os::is_MP()) { 1411 if (UseMembar) { 1412 // Force this write out before the read below 1413 __ membar(Assembler::StoreLoad); 1414 } else { 1415 // Write serialization page so VM thread can do a pseudo remote membar. 1416 // We use the current thread pointer to calculate a thread specific 1417 // offset to write to within the page. This minimizes bus traffic 1418 // due to cache line collision. 1419 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1420 } 1421 } 1422 __ load_contents(sync_state, G3_scratch); 1423 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1424 1425 Label L; 1426 __ br(Assembler::notEqual, false, Assembler::pn, L); 1427 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1428 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 1429 __ bind(L); 1430 1431 // Block. Save any potential method result value before the operation and 1432 // use a leaf call to leave the last_Java_frame setup undisturbed. 1433 save_native_result(); 1434 __ call_VM_leaf(L7_thread_cache, 1435 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1436 G2_thread); 1437 1438 // Restore any method result value 1439 restore_native_result(); 1440 __ bind(no_block); 1441 } 1442 1443 // Clear the frame anchor now 1444 1445 __ reset_last_Java_frame(); 1446 1447 // Move the result handler address 1448 __ mov(Lscratch, G3_scratch); 1449 // return possible result to the outer frame 1450 __ restore(O0, G0, O0); 1451 1452 // Move result handler to expected register 1453 __ mov(G3_scratch, Lscratch); 1454 1455 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1456 // switch to thread_in_Java. 1457 1458 __ set(_thread_in_Java, G3_scratch); 1459 __ st(G3_scratch, thread_state); 1460 1461 if (CheckJNICalls) { 1462 // clear_pending_jni_exception_check 1463 __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); 1464 } 1465 1466 // reset handle block 1467 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1468 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1469 1470 // If we have an oop result store it where it will be safe for any further gc 1471 // until we return now that we've released the handle it might be protected by 1472 1473 { Label no_oop, store_result; 1474 1475 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1476 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); 1477 // Unbox oop result, e.g. JNIHandles::resolve value in O0. 1478 __ br_null(O0, false, Assembler::pn, store_result); // Use NULL as-is. 1479 __ delayed()->andcc(O0, JNIHandles::weak_tag_mask, G0); // Test for jweak 1480 __ brx(Assembler::zero, true, Assembler::pt, store_result); 1481 __ delayed()->ld_ptr(O0, 0, O0); // Maybe resolve (untagged) jobject. 1482 // Resolve jweak. 1483 __ ld_ptr(O0, -JNIHandles::weak_tag_value, O0); 1484 #if INCLUDE_ALL_GCS 1485 if (UseG1GC) { 1486 __ g1_write_barrier_pre(noreg /* obj */, 1487 noreg /* index */, 1488 0 /* offset */, 1489 O0 /* pre_val */, 1490 G3_scratch /* tmp */, 1491 true /* preserve_o_regs */); 1492 } 1493 #endif // INCLUDE_ALL_GCS 1494 __ bind(store_result); 1495 // Store it where gc will look for it and result handler expects it. 1496 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1497 1498 __ bind(no_oop); 1499 1500 } 1501 1502 1503 // handle exceptions (exception handling will handle unlocking!) 1504 { Label L; 1505 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1506 __ ld_ptr(exception_addr, Gtemp); 1507 __ br_null_short(Gtemp, Assembler::pt, L); 1508 // Note: This could be handled more efficiently since we know that the native 1509 // method doesn't have an exception handler. We could directly return 1510 // to the exception handler for the caller. 1511 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1512 __ should_not_reach_here(); 1513 __ bind(L); 1514 } 1515 1516 // JVMTI support (preserves thread register) 1517 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1518 1519 if (synchronized) { 1520 // save and restore any potential method result value around the unlocking operation 1521 save_native_result(); 1522 1523 __ add( __ top_most_monitor(), O1); 1524 __ unlock_object(O1); 1525 1526 restore_native_result(); 1527 } 1528 1529 // dispose of return address and remove activation 1530 #ifdef ASSERT 1531 { Label ok; 1532 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); 1533 __ stop("bad I5_savedSP value"); 1534 __ should_not_reach_here(); 1535 __ bind(ok); 1536 } 1537 #endif 1538 __ jmp(Lscratch, 0); 1539 __ delayed()->nop(); 1540 1541 if (inc_counter) { 1542 // handle invocation counter overflow 1543 __ bind(invocation_counter_overflow); 1544 generate_counter_overflow(Lcontinue); 1545 } 1546 1547 return entry; 1548 } 1549 1550 1551 // Generic method entry to (asm) interpreter 1552 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1553 address entry = __ pc(); 1554 1555 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1556 1557 // the following temporary registers are used during frame creation 1558 const Register Gtmp1 = G3_scratch ; 1559 const Register Gtmp2 = G1_scratch; 1560 1561 // make sure registers are different! 1562 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1563 1564 const Address constMethod (G5_method, Method::const_offset()); 1565 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1566 // and use in the asserts. 1567 const Address access_flags (Lmethod, Method::access_flags_offset()); 1568 1569 const Register Glocals_size = G3; 1570 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1571 1572 // make sure method is not native & not abstract 1573 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1574 #ifdef ASSERT 1575 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1576 { Label L; 1577 __ btst(JVM_ACC_NATIVE, Gtmp1); 1578 __ br(Assembler::zero, false, Assembler::pt, L); 1579 __ delayed()->nop(); 1580 __ stop("tried to execute native method as non-native"); 1581 __ bind(L); 1582 } 1583 { Label L; 1584 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1585 __ br(Assembler::zero, false, Assembler::pt, L); 1586 __ delayed()->nop(); 1587 __ stop("tried to execute abstract method as non-abstract"); 1588 __ bind(L); 1589 } 1590 #endif // ASSERT 1591 1592 // generate the code to allocate the interpreter stack frame 1593 1594 generate_fixed_frame(false); 1595 1596 // 1597 // Code to initialize the extra (i.e. non-parm) locals 1598 // 1599 Register init_value = noreg; // will be G0 if we must clear locals 1600 // The way the code was setup before zerolocals was always true for vanilla java entries. 1601 // It could only be false for the specialized entries like accessor or empty which have 1602 // no extra locals so the testing was a waste of time and the extra locals were always 1603 // initialized. We removed this extra complication to already over complicated code. 1604 1605 init_value = G0; 1606 Label clear_loop; 1607 1608 const Register RconstMethod = O1; 1609 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1610 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset()); 1611 1612 // NOTE: If you change the frame layout, this code will need to 1613 // be updated! 1614 __ ld_ptr( constMethod, RconstMethod ); 1615 __ lduh( size_of_locals, O2 ); 1616 __ lduh( size_of_parameters, O1 ); 1617 __ sll( O2, Interpreter::logStackElementSize, O2); 1618 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1619 __ sub( Llocals, O2, O2 ); 1620 __ sub( Llocals, O1, O1 ); 1621 1622 __ bind( clear_loop ); 1623 __ inc( O2, wordSize ); 1624 1625 __ cmp( O2, O1 ); 1626 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1627 __ delayed()->st_ptr( init_value, O2, 0 ); 1628 1629 const Address do_not_unlock_if_synchronized(G2_thread, 1630 JavaThread::do_not_unlock_if_synchronized_offset()); 1631 // Since at this point in the method invocation the exception handler 1632 // would try to exit the monitor of synchronized methods which hasn't 1633 // been entered yet, we set the thread local variable 1634 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1635 // runtime, exception handling i.e. unlock_if_synchronized_method will 1636 // check this thread local flag. 1637 __ movbool(true, G3_scratch); 1638 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1639 1640 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); 1641 // increment invocation counter and check for overflow 1642 // 1643 // Note: checking for negative value instead of overflow 1644 // so we have a 'sticky' overflow test (may be of 1645 // importance as soon as we have true MT/MP) 1646 Label invocation_counter_overflow; 1647 Label profile_method; 1648 Label profile_method_continue; 1649 Label Lcontinue; 1650 if (inc_counter) { 1651 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1652 if (ProfileInterpreter) { 1653 __ bind(profile_method_continue); 1654 } 1655 } 1656 __ bind(Lcontinue); 1657 1658 bang_stack_shadow_pages(false); 1659 1660 // reset the _do_not_unlock_if_synchronized flag 1661 __ stbool(G0, do_not_unlock_if_synchronized); 1662 1663 // check for synchronized methods 1664 // Must happen AFTER invocation_counter check and stack overflow check, 1665 // so method is not locked if overflows. 1666 1667 if (synchronized) { 1668 lock_method(); 1669 } else { 1670 #ifdef ASSERT 1671 { Label ok; 1672 __ ld(access_flags, O0); 1673 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1674 __ br( Assembler::zero, false, Assembler::pt, ok); 1675 __ delayed()->nop(); 1676 __ stop("method needs synchronization"); 1677 __ bind(ok); 1678 } 1679 #endif // ASSERT 1680 } 1681 1682 // start execution 1683 1684 __ verify_thread(); 1685 1686 // jvmti support 1687 __ notify_method_entry(); 1688 1689 // start executing instructions 1690 __ dispatch_next(vtos); 1691 1692 1693 if (inc_counter) { 1694 if (ProfileInterpreter) { 1695 // We have decided to profile this method in the interpreter 1696 __ bind(profile_method); 1697 1698 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1699 __ set_method_data_pointer_for_bcp(); 1700 __ ba_short(profile_method_continue); 1701 } 1702 1703 // handle invocation counter overflow 1704 __ bind(invocation_counter_overflow); 1705 generate_counter_overflow(Lcontinue); 1706 } 1707 1708 return entry; 1709 } 1710 1711 //---------------------------------------------------------------------------------------------------- 1712 // Exceptions 1713 void TemplateInterpreterGenerator::generate_throw_exception() { 1714 1715 // Entry point in previous activation (i.e., if the caller was interpreted) 1716 Interpreter::_rethrow_exception_entry = __ pc(); 1717 // O0: exception 1718 1719 // entry point for exceptions thrown within interpreter code 1720 Interpreter::_throw_exception_entry = __ pc(); 1721 __ verify_thread(); 1722 // expression stack is undefined here 1723 // O0: exception, i.e. Oexception 1724 // Lbcp: exception bcp 1725 __ verify_oop(Oexception); 1726 1727 1728 // expression stack must be empty before entering the VM in case of an exception 1729 __ empty_expression_stack(); 1730 // find exception handler address and preserve exception oop 1731 // call C routine to find handler and jump to it 1732 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1733 __ push_ptr(O1); // push exception for exception handler bytecodes 1734 1735 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1736 __ delayed()->nop(); 1737 1738 1739 // if the exception is not handled in the current frame 1740 // the frame is removed and the exception is rethrown 1741 // (i.e. exception continuation is _rethrow_exception) 1742 // 1743 // Note: At this point the bci is still the bxi for the instruction which caused 1744 // the exception and the expression stack is empty. Thus, for any VM calls 1745 // at this point, GC will find a legal oop map (with empty expression stack). 1746 1747 // in current activation 1748 // tos: exception 1749 // Lbcp: exception bcp 1750 1751 // 1752 // JVMTI PopFrame support 1753 // 1754 1755 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1756 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1757 // Set the popframe_processing bit in popframe_condition indicating that we are 1758 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1759 // popframe handling cycles. 1760 1761 __ ld(popframe_condition_addr, G3_scratch); 1762 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1763 __ stw(G3_scratch, popframe_condition_addr); 1764 1765 // Empty the expression stack, as in normal exception handling 1766 __ empty_expression_stack(); 1767 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1768 1769 { 1770 // Check to see whether we are returning to a deoptimized frame. 1771 // (The PopFrame call ensures that the caller of the popped frame is 1772 // either interpreted or compiled and deoptimizes it if compiled.) 1773 // In this case, we can't call dispatch_next() after the frame is 1774 // popped, but instead must save the incoming arguments and restore 1775 // them after deoptimization has occurred. 1776 // 1777 // Note that we don't compare the return PC against the 1778 // deoptimization blob's unpack entry because of the presence of 1779 // adapter frames in C2. 1780 Label caller_not_deoptimized; 1781 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1782 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); 1783 1784 const Register Gtmp1 = G3_scratch; 1785 const Register Gtmp2 = G1_scratch; 1786 const Register RconstMethod = Gtmp1; 1787 const Address constMethod(Lmethod, Method::const_offset()); 1788 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1789 1790 // Compute size of arguments for saving when returning to deoptimized caller 1791 __ ld_ptr(constMethod, RconstMethod); 1792 __ lduh(size_of_parameters, Gtmp1); 1793 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1794 __ sub(Llocals, Gtmp1, Gtmp2); 1795 __ add(Gtmp2, wordSize, Gtmp2); 1796 // Save these arguments 1797 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1798 // Inform deoptimization that it is responsible for restoring these arguments 1799 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1800 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1801 __ st(Gtmp1, popframe_condition_addr); 1802 1803 // Return from the current method 1804 // The caller's SP was adjusted upon method entry to accomodate 1805 // the callee's non-argument locals. Undo that adjustment. 1806 __ ret(); 1807 __ delayed()->restore(I5_savedSP, G0, SP); 1808 1809 __ bind(caller_not_deoptimized); 1810 } 1811 1812 // Clear the popframe condition flag 1813 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1814 1815 // Get out of the current method (how this is done depends on the particular compiler calling 1816 // convention that the interpreter currently follows) 1817 // The caller's SP was adjusted upon method entry to accomodate 1818 // the callee's non-argument locals. Undo that adjustment. 1819 __ restore(I5_savedSP, G0, SP); 1820 // The method data pointer was incremented already during 1821 // call profiling. We have to restore the mdp for the current bcp. 1822 if (ProfileInterpreter) { 1823 __ set_method_data_pointer_for_bcp(); 1824 } 1825 1826 #if INCLUDE_JVMTI 1827 { Label L_done; 1828 1829 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode 1830 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done); 1831 1832 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1833 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1834 1835 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp); 1836 1837 __ br_null(G1_scratch, false, Assembler::pn, L_done); 1838 __ delayed()->nop(); 1839 1840 __ st_ptr(G1_scratch, Lesp, wordSize); 1841 __ bind(L_done); 1842 } 1843 #endif // INCLUDE_JVMTI 1844 1845 // Resume bytecode interpretation at the current bcp 1846 __ dispatch_next(vtos); 1847 // end of JVMTI PopFrame support 1848 1849 Interpreter::_remove_activation_entry = __ pc(); 1850 1851 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1852 __ pop_ptr(Oexception); // get exception 1853 1854 // Intel has the following comment: 1855 //// remove the activation (without doing throws on illegalMonitorExceptions) 1856 // They remove the activation without checking for bad monitor state. 1857 // %%% We should make sure this is the right semantics before implementing. 1858 1859 __ set_vm_result(Oexception); 1860 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1861 1862 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1863 1864 __ get_vm_result(Oexception); 1865 __ verify_oop(Oexception); 1866 1867 const int return_reg_adjustment = frame::pc_return_offset; 1868 Address issuing_pc_addr(I7, return_reg_adjustment); 1869 1870 // We are done with this activation frame; find out where to go next. 1871 // The continuation point will be an exception handler, which expects 1872 // the following registers set up: 1873 // 1874 // Oexception: exception 1875 // Oissuing_pc: the local call that threw exception 1876 // Other On: garbage 1877 // In/Ln: the contents of the caller's register window 1878 // 1879 // We do the required restore at the last possible moment, because we 1880 // need to preserve some state across a runtime call. 1881 // (Remember that the caller activation is unknown--it might not be 1882 // interpreted, so things like Lscratch are useless in the caller.) 1883 1884 // Although the Intel version uses call_C, we can use the more 1885 // compact call_VM. (The only real difference on SPARC is a 1886 // harmlessly ignored [re]set_last_Java_frame, compared with 1887 // the Intel code which lacks this.) 1888 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1889 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1890 __ super_call_VM_leaf(L7_thread_cache, 1891 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1892 G2_thread, Oissuing_pc->after_save()); 1893 1894 // The caller's SP was adjusted upon method entry to accomodate 1895 // the callee's non-argument locals. Undo that adjustment. 1896 __ JMP(O0, 0); // return exception handler in caller 1897 __ delayed()->restore(I5_savedSP, G0, SP); 1898 1899 // (same old exception object is already in Oexception; see above) 1900 // Note that an "issuing PC" is actually the next PC after the call 1901 } 1902 1903 1904 // 1905 // JVMTI ForceEarlyReturn support 1906 // 1907 1908 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1909 address entry = __ pc(); 1910 1911 __ empty_expression_stack(); 1912 __ load_earlyret_value(state); 1913 1914 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1915 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1916 1917 // Clear the earlyret state 1918 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1919 1920 __ remove_activation(state, 1921 /* throw_monitor_exception */ false, 1922 /* install_monitor_exception */ false); 1923 1924 // The caller's SP was adjusted upon method entry to accomodate 1925 // the callee's non-argument locals. Undo that adjustment. 1926 __ ret(); // return to caller 1927 __ delayed()->restore(I5_savedSP, G0, SP); 1928 1929 return entry; 1930 } // end of JVMTI ForceEarlyReturn support 1931 1932 1933 //------------------------------------------------------------------------------------------------------------------------ 1934 // Helper for vtos entry point generation 1935 1936 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1937 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1938 Label L; 1939 aep = __ pc(); __ push_ptr(); __ ba_short(L); 1940 fep = __ pc(); __ push_f(); __ ba_short(L); 1941 dep = __ pc(); __ push_d(); __ ba_short(L); 1942 lep = __ pc(); __ push_l(); __ ba_short(L); 1943 iep = __ pc(); __ push_i(); 1944 bep = cep = sep = iep; // there aren't any 1945 vep = __ pc(); __ bind(L); // fall through 1946 generate_and_dispatch(t); 1947 } 1948 1949 // -------------------------------------------------------------------------------- 1950 1951 // Non-product code 1952 #ifndef PRODUCT 1953 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1954 address entry = __ pc(); 1955 1956 __ push(state); 1957 __ mov(O7, Lscratch); // protect return address within interpreter 1958 1959 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 1960 __ mov( Otos_l2, G3_scratch ); 1961 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 1962 __ mov(Lscratch, O7); // restore return address 1963 __ pop(state); 1964 __ retl(); 1965 __ delayed()->nop(); 1966 1967 return entry; 1968 } 1969 1970 1971 // helpers for generate_and_dispatch 1972 1973 void TemplateInterpreterGenerator::count_bytecode() { 1974 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 1975 } 1976 1977 1978 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1979 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 1980 } 1981 1982 1983 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1984 AddressLiteral index (&BytecodePairHistogram::_index); 1985 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 1986 1987 // get index, shift out old bytecode, bring in new bytecode, and store it 1988 // _index = (_index >> log2_number_of_codes) | 1989 // (bytecode << log2_number_of_codes); 1990 1991 __ load_contents(index, G4_scratch); 1992 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 1993 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 1994 __ or3( G3_scratch, G4_scratch, G4_scratch ); 1995 __ store_contents(G4_scratch, index, G3_scratch); 1996 1997 // bump bucket contents 1998 // _counters[_index] ++; 1999 2000 __ set(counters, G3_scratch); // loads into G3_scratch 2001 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 2002 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 2003 __ ld (G3_scratch, 0, G4_scratch); 2004 __ inc (G4_scratch); 2005 __ st (G4_scratch, 0, G3_scratch); 2006 } 2007 2008 2009 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2010 // Call a little run-time stub to avoid blow-up for each bytecode. 2011 // The run-time runtime saves the right registers, depending on 2012 // the tosca in-state for the given template. 2013 address entry = Interpreter::trace_code(t->tos_in()); 2014 guarantee(entry != NULL, "entry must have been generated"); 2015 __ call(entry, relocInfo::none); 2016 __ delayed()->nop(); 2017 } 2018 2019 2020 void TemplateInterpreterGenerator::stop_interpreter_at() { 2021 AddressLiteral counter(&BytecodeCounter::_counter_value); 2022 __ load_contents(counter, G3_scratch); 2023 AddressLiteral stop_at(&StopInterpreterAt); 2024 __ load_ptr_contents(stop_at, G4_scratch); 2025 __ cmp(G3_scratch, G4_scratch); 2026 __ breakpoint_trap(Assembler::equal, Assembler::icc); 2027 } 2028 #endif // not PRODUCT