1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #ifndef FAST_DISPATCH 51 #define FAST_DISPATCH 1 52 #endif 53 #undef FAST_DISPATCH 54 55 // Size of interpreter code. Increase if too small. Interpreter will 56 // fail with a guarantee ("not enough space for interpreter generation"); 57 // if too small. 58 // Run with +PrintInterpreter to get the VM to print out the size. 59 // Max size with JVMTI 60 #ifdef _LP64 61 // The sethi() instruction generates lots more instructions when shell 62 // stack limit is unlimited, so that's why this is much bigger. 63 int TemplateInterpreter::InterpreterCodeSize = 260 * K; 64 #else 65 int TemplateInterpreter::InterpreterCodeSize = 230 * K; 66 #endif 67 68 // Generation of Interpreter 69 // 70 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code. 71 72 73 #define __ _masm-> 74 75 76 //---------------------------------------------------------------------------------------------------- 77 78 #ifndef _LP64 79 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 80 address entry = __ pc(); 81 Argument argv(0, true); 82 83 // We are in the jni transition frame. Save the last_java_frame corresponding to the 84 // outer interpreter frame 85 // 86 __ set_last_Java_frame(FP, noreg); 87 // make sure the interpreter frame we've pushed has a valid return pc 88 __ mov(O7, I7); 89 __ mov(Lmethod, G3_scratch); 90 __ mov(Llocals, G4_scratch); 91 __ save_frame(0); 92 __ mov(G2_thread, L7_thread_cache); 93 __ add(argv.address_in_frame(), O3); 94 __ mov(G2_thread, O0); 95 __ mov(G3_scratch, O1); 96 __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); 97 __ delayed()->mov(G4_scratch, O2); 98 __ mov(L7_thread_cache, G2_thread); 99 __ reset_last_Java_frame(); 100 101 // load the register arguments (the C code packed them as varargs) 102 for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) { 103 __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); 104 } 105 __ ret(); 106 __ delayed()-> 107 restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler 108 return entry; 109 } 110 111 112 #else 113 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of 114 // O0, O1, O2 etc.. 115 // Doubles are passed in D0, D2, D4 116 // We store the signature of the first 16 arguments in the first argument 117 // slot because it will be overwritten prior to calling the native 118 // function, with the pointer to the JNIEnv. 119 // If LP64 there can be up to 16 floating point arguments in registers 120 // or 6 integer registers. 121 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 122 123 enum { 124 non_float = 0, 125 float_sig = 1, 126 double_sig = 2, 127 sig_mask = 3 128 }; 129 130 address entry = __ pc(); 131 Argument argv(0, true); 132 133 // We are in the jni transition frame. Save the last_java_frame corresponding to the 134 // outer interpreter frame 135 // 136 __ set_last_Java_frame(FP, noreg); 137 // make sure the interpreter frame we've pushed has a valid return pc 138 __ mov(O7, I7); 139 __ mov(Lmethod, G3_scratch); 140 __ mov(Llocals, G4_scratch); 141 __ save_frame(0); 142 __ mov(G2_thread, L7_thread_cache); 143 __ add(argv.address_in_frame(), O3); 144 __ mov(G2_thread, O0); 145 __ mov(G3_scratch, O1); 146 __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); 147 __ delayed()->mov(G4_scratch, O2); 148 __ mov(L7_thread_cache, G2_thread); 149 __ reset_last_Java_frame(); 150 151 152 // load the register arguments (the C code packed them as varargs) 153 Address Sig = argv.address_in_frame(); // Argument 0 holds the signature 154 __ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch 155 __ mov( G3_scratch, G4_scratch); 156 __ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0 157 Label done; 158 for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) { 159 Label NonFloatArg; 160 Label LoadFloatArg; 161 Label LoadDoubleArg; 162 Label NextArg; 163 Address a = ldarg.address_in_frame(); 164 __ andcc(G4_scratch, sig_mask, G3_scratch); 165 __ br(Assembler::zero, false, Assembler::pt, NonFloatArg); 166 __ delayed()->nop(); 167 168 __ cmp(G3_scratch, float_sig ); 169 __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg); 170 __ delayed()->nop(); 171 172 __ cmp(G3_scratch, double_sig ); 173 __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg); 174 __ delayed()->nop(); 175 176 __ bind(NonFloatArg); 177 // There are only 6 integer register arguments! 178 if ( ldarg.is_register() ) 179 __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); 180 else { 181 // Optimization, see if there are any more args and get out prior to checking 182 // all 16 float registers. My guess is that this is rare. 183 // If is_register is false, then we are done the first six integer args. 184 __ br_null_short(G4_scratch, Assembler::pt, done); 185 } 186 __ ba(NextArg); 187 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 188 189 __ bind(LoadFloatArg); 190 __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4); 191 __ ba(NextArg); 192 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 193 194 __ bind(LoadDoubleArg); 195 __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() ); 196 __ ba(NextArg); 197 __ delayed()->srl( G4_scratch, 2, G4_scratch ); 198 199 __ bind(NextArg); 200 201 } 202 203 __ bind(done); 204 __ ret(); 205 __ delayed()-> 206 restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler 207 return entry; 208 } 209 #endif 210 211 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) { 212 213 // Generate code to initiate compilation on the counter overflow. 214 215 // InterpreterRuntime::frequency_counter_overflow takes two arguments, 216 // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp) 217 // and the second is only used when the first is true. We pass zero for both. 218 // The call returns the address of the verified entry point for the method or NULL 219 // if the compilation did not complete (either went background or bailed out). 220 __ set((int)false, O2); 221 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true); 222 // returns verified_entry_point or NULL 223 // we ignore it in any case 224 __ ba_short(Lcontinue); 225 226 } 227 228 229 // End of helpers 230 231 // Various method entries 232 233 // Abstract method entry 234 // Attempt to execute abstract method. Throw exception 235 // 236 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 237 address entry = __ pc(); 238 // abstract method entry 239 // throw exception 240 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 241 // the call_VM checks for exception, so we should never return here. 242 __ should_not_reach_here(); 243 return entry; 244 245 } 246 247 void TemplateInterpreterGenerator::save_native_result(void) { 248 // result potentially in O0/O1: save it across calls 249 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 250 251 // result potentially in F0/F1: save it across calls 252 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 253 254 // save and restore any potential method result value around the unlocking operation 255 __ stf(FloatRegisterImpl::D, F0, d_tmp); 256 #ifdef _LP64 257 __ stx(O0, l_tmp); 258 #else 259 __ std(O0, l_tmp); 260 #endif 261 } 262 263 void TemplateInterpreterGenerator::restore_native_result(void) { 264 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 265 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 266 267 // Restore any method result value 268 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 269 #ifdef _LP64 270 __ ldx(l_tmp, O0); 271 #else 272 __ ldd(l_tmp, O0); 273 #endif 274 } 275 276 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 277 assert(!pass_oop || message == NULL, "either oop or message but not both"); 278 address entry = __ pc(); 279 // expression stack must be empty before entering the VM if an exception happened 280 __ empty_expression_stack(); 281 // load exception object 282 __ set((intptr_t)name, G3_scratch); 283 if (pass_oop) { 284 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 285 } else { 286 __ set((intptr_t)message, G4_scratch); 287 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 288 } 289 // throw exception 290 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 291 AddressLiteral thrower(Interpreter::throw_exception_entry()); 292 __ jump_to(thrower, G3_scratch); 293 __ delayed()->nop(); 294 return entry; 295 } 296 297 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 298 address entry = __ pc(); 299 // expression stack must be empty before entering the VM if an exception 300 // happened 301 __ empty_expression_stack(); 302 // load exception object 303 __ call_VM(Oexception, 304 CAST_FROM_FN_PTR(address, 305 InterpreterRuntime::throw_ClassCastException), 306 Otos_i); 307 __ should_not_reach_here(); 308 return entry; 309 } 310 311 312 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 313 address entry = __ pc(); 314 // expression stack must be empty before entering the VM if an exception happened 315 __ empty_expression_stack(); 316 // convention: expect aberrant index in register G3_scratch, then shuffle the 317 // index to G4_scratch for the VM call 318 __ mov(G3_scratch, G4_scratch); 319 __ set((intptr_t)name, G3_scratch); 320 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 321 __ should_not_reach_here(); 322 return entry; 323 } 324 325 326 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 327 address entry = __ pc(); 328 // expression stack must be empty before entering the VM if an exception happened 329 __ empty_expression_stack(); 330 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 331 __ should_not_reach_here(); 332 return entry; 333 } 334 335 336 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 337 address entry = __ pc(); 338 339 if (state == atos) { 340 __ profile_return_type(O0, G3_scratch, G1_scratch); 341 } 342 343 #if !defined(_LP64) && defined(COMPILER2) 344 // All return values are where we want them, except for Longs. C2 returns 345 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 346 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 347 // build even if we are returning from interpreted we just do a little 348 // stupid shuffing. 349 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 350 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 351 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 352 353 if (state == ltos) { 354 __ srl (G1, 0, O1); 355 __ srlx(G1, 32, O0); 356 } 357 #endif // !_LP64 && COMPILER2 358 359 // The callee returns with the stack possibly adjusted by adapter transition 360 // We remove that possible adjustment here. 361 // All interpreter local registers are untouched. Any result is passed back 362 // in the O0/O1 or float registers. Before continuing, the arguments must be 363 // popped from the java expression stack; i.e., Lesp must be adjusted. 364 365 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 366 367 const Register cache = G3_scratch; 368 const Register index = G1_scratch; 369 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 370 371 const Register flags = cache; 372 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); 373 const Register parameter_size = flags; 374 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words 375 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes 376 __ add(Lesp, parameter_size, Lesp); // pop arguments 377 378 __ check_and_handle_popframe(Gtemp); 379 __ check_and_handle_earlyret(Gtemp); 380 381 __ dispatch_next(state, step); 382 383 return entry; 384 } 385 386 387 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 388 address entry = __ pc(); 389 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 390 #if INCLUDE_JVMCI 391 // Check if we need to take lock at entry of synchronized method. This can 392 // only occur on method entry so emit it only for vtos with step 0. 393 if (UseJVMCICompiler && state == vtos && step == 0) { 394 Label L; 395 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 396 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 397 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 398 // Clear flag. 399 __ stbool(G0, pending_monitor_enter_addr); 400 // Take lock. 401 lock_method(); 402 __ bind(L); 403 } else { 404 #ifdef ASSERT 405 if (UseJVMCICompiler) { 406 Label L; 407 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 408 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 409 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 410 __ stop("unexpected pending monitor in deopt entry"); 411 __ bind(L); 412 } 413 #endif 414 } 415 #endif 416 { Label L; 417 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 418 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 419 __ br_null_short(Gtemp, Assembler::pt, L); 420 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 421 __ should_not_reach_here(); 422 __ bind(L); 423 } 424 __ dispatch_next(state, step); 425 return entry; 426 } 427 428 // A result handler converts/unboxes a native call result into 429 // a java interpreter/compiler result. The current frame is an 430 // interpreter frame. The activation frame unwind code must be 431 // consistent with that of TemplateTable::_return(...). In the 432 // case of native methods, the caller's SP was not modified. 433 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 434 address entry = __ pc(); 435 Register Itos_i = Otos_i ->after_save(); 436 Register Itos_l = Otos_l ->after_save(); 437 Register Itos_l1 = Otos_l1->after_save(); 438 Register Itos_l2 = Otos_l2->after_save(); 439 switch (type) { 440 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 441 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 442 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 443 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 444 case T_LONG : 445 #ifndef _LP64 446 __ mov(O1, Itos_l2); // move other half of long 447 #endif // ifdef or no ifdef, fall through to the T_INT case 448 case T_INT : __ mov(O0, Itos_i); break; 449 case T_VOID : /* nothing to do */ break; 450 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 451 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 452 case T_OBJECT : 453 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 454 __ verify_oop(Itos_i); 455 break; 456 default : ShouldNotReachHere(); 457 } 458 __ ret(); // return from interpreter activation 459 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 460 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly 461 return entry; 462 } 463 464 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 465 address entry = __ pc(); 466 __ push(state); 467 __ call_VM(noreg, runtime_entry); 468 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 469 return entry; 470 } 471 472 473 // 474 // Helpers for commoning out cases in the various type of method entries. 475 // 476 477 // increment invocation count & check for overflow 478 // 479 // Note: checking for negative value instead of overflow 480 // so we have a 'sticky' overflow test 481 // 482 // Lmethod: method 483 // ??: invocation counter 484 // 485 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 486 // Note: In tiered we increment either counters in MethodCounters* or in 487 // MDO depending if we're profiling or not. 488 const Register G3_method_counters = G3_scratch; 489 Label done; 490 491 if (TieredCompilation) { 492 const int increment = InvocationCounter::count_increment; 493 Label no_mdo; 494 if (ProfileInterpreter) { 495 // If no method data exists, go to profile_continue. 496 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 497 __ br_null_short(G4_scratch, Assembler::pn, no_mdo); 498 // Increment counter 499 Address mdo_invocation_counter(G4_scratch, 500 in_bytes(MethodData::invocation_counter_offset()) + 501 in_bytes(InvocationCounter::counter_offset())); 502 Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset())); 503 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 504 G3_scratch, Lscratch, 505 Assembler::zero, overflow); 506 __ ba_short(done); 507 } 508 509 // Increment counter in MethodCounters* 510 __ bind(no_mdo); 511 Address invocation_counter(G3_method_counters, 512 in_bytes(MethodCounters::invocation_counter_offset()) + 513 in_bytes(InvocationCounter::counter_offset())); 514 __ get_method_counters(Lmethod, G3_method_counters, done); 515 Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset())); 516 __ increment_mask_and_jump(invocation_counter, increment, mask, 517 G4_scratch, Lscratch, 518 Assembler::zero, overflow); 519 __ bind(done); 520 } else { // not TieredCompilation 521 // Update standard invocation counters 522 __ get_method_counters(Lmethod, G3_method_counters, done); 523 __ increment_invocation_counter(G3_method_counters, O0, G4_scratch); 524 if (ProfileInterpreter) { 525 Address interpreter_invocation_counter(G3_method_counters, 526 in_bytes(MethodCounters::interpreter_invocation_counter_offset())); 527 __ ld(interpreter_invocation_counter, G4_scratch); 528 __ inc(G4_scratch); 529 __ st(G4_scratch, interpreter_invocation_counter); 530 } 531 532 if (ProfileInterpreter && profile_method != NULL) { 533 // Test to see if we should create a method data oop 534 Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 535 __ ld(profile_limit, G1_scratch); 536 __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); 537 538 // if no method data exists, go to profile_method 539 __ test_method_data_pointer(*profile_method); 540 } 541 542 Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 543 __ ld(invocation_limit, G3_scratch); 544 __ cmp(O0, G3_scratch); 545 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance 546 __ delayed()->nop(); 547 __ bind(done); 548 } 549 550 } 551 552 // Allocate monitor and lock method (asm interpreter) 553 // ebx - Method* 554 // 555 void TemplateInterpreterGenerator::lock_method() { 556 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags. 557 558 #ifdef ASSERT 559 { Label ok; 560 __ btst(JVM_ACC_SYNCHRONIZED, O0); 561 __ br( Assembler::notZero, false, Assembler::pt, ok); 562 __ delayed()->nop(); 563 __ stop("method doesn't need synchronization"); 564 __ bind(ok); 565 } 566 #endif // ASSERT 567 568 // get synchronization object to O0 569 { Label done; 570 __ btst(JVM_ACC_STATIC, O0); 571 __ br( Assembler::zero, true, Assembler::pt, done); 572 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 573 574 // lock the mirror, not the Klass* 575 __ load_mirror(O0, Lmethod); 576 577 #ifdef ASSERT 578 __ tst(O0); 579 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 580 #endif // ASSERT 581 582 __ bind(done); 583 } 584 585 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 586 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 587 // __ untested("lock_object from method entry"); 588 __ lock_object(Lmonitors, O0); 589 } 590 591 // See if we've got enough room on the stack for locals plus overhead below 592 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 593 // without going through the signal handler, i.e., reserved and yellow zones 594 // will not be made usable. The shadow zone must suffice to handle the 595 // overflow. 596 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 597 Register Rscratch) { 598 const int page_size = os::vm_page_size(); 599 Label after_frame_check; 600 601 assert_different_registers(Rframe_size, Rscratch); 602 603 __ set(page_size, Rscratch); 604 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); 605 606 // Get the stack overflow limit, and in debug, verify it is non-zero. 607 __ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch); 608 #ifdef ASSERT 609 Label limit_ok; 610 __ br_notnull_short(Rscratch, Assembler::pn, limit_ok); 611 __ stop("stack overflow limit is zero in generate_stack_overflow_check"); 612 __ bind(limit_ok); 613 #endif 614 615 // Add in the size of the frame (which is the same as subtracting it from the 616 // SP, which would take another register. 617 __ add(Rscratch, Rframe_size, Rscratch); 618 619 // The frame is greater than one page in size, so check against 620 // the bottom of the stack. 621 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check); 622 623 // The stack will overflow, throw an exception. 624 625 // Note that SP is restored to sender's sp (in the delay slot). This 626 // is necessary if the sender's frame is an extended compiled frame 627 // (see gen_c2i_adapter()) and safer anyway in case of JSR292 628 // adaptations. 629 630 // Note also that the restored frame is not necessarily interpreted. 631 // Use the shared runtime version of the StackOverflowError. 632 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 633 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); 634 __ jump_to(stub, Rscratch); 635 __ delayed()->mov(O5_savedSP, SP); 636 637 // If you get to here, then there is enough stack space. 638 __ bind(after_frame_check); 639 } 640 641 642 // 643 // Generate a fixed interpreter frame. This is identical setup for interpreted 644 // methods and for native methods hence the shared code. 645 646 647 //---------------------------------------------------------------------------------------------------- 648 // Stack frame layout 649 // 650 // When control flow reaches any of the entry types for the interpreter 651 // the following holds -> 652 // 653 // C2 Calling Conventions: 654 // 655 // The entry code below assumes that the following registers are set 656 // when coming in: 657 // G5_method: holds the Method* of the method to call 658 // Lesp: points to the TOS of the callers expression stack 659 // after having pushed all the parameters 660 // 661 // The entry code does the following to setup an interpreter frame 662 // pop parameters from the callers stack by adjusting Lesp 663 // set O0 to Lesp 664 // compute X = (max_locals - num_parameters) 665 // bump SP up by X to accomadate the extra locals 666 // compute X = max_expression_stack 667 // + vm_local_words 668 // + 16 words of register save area 669 // save frame doing a save sp, -X, sp growing towards lower addresses 670 // set Lbcp, Lmethod, LcpoolCache 671 // set Llocals to i0 672 // set Lmonitors to FP - rounded_vm_local_words 673 // set Lesp to Lmonitors - 4 674 // 675 // The frame has now been setup to do the rest of the entry code 676 677 // Try this optimization: Most method entries could live in a 678 // "one size fits all" stack frame without all the dynamic size 679 // calculations. It might be profitable to do all this calculation 680 // statically and approximately for "small enough" methods. 681 682 //----------------------------------------------------------------------------------------------- 683 684 // C1 Calling conventions 685 // 686 // Upon method entry, the following registers are setup: 687 // 688 // g2 G2_thread: current thread 689 // g5 G5_method: method to activate 690 // g4 Gargs : pointer to last argument 691 // 692 // 693 // Stack: 694 // 695 // +---------------+ <--- sp 696 // | | 697 // : reg save area : 698 // | | 699 // +---------------+ <--- sp + 0x40 700 // | | 701 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 702 // | | 703 // +---------------+ <--- sp + 0x5c 704 // | | 705 // : free : 706 // | | 707 // +---------------+ <--- Gargs 708 // | | 709 // : arguments : 710 // | | 711 // +---------------+ 712 // | | 713 // 714 // 715 // 716 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 717 // 718 // +---------------+ <--- sp 719 // | | 720 // : reg save area : 721 // | | 722 // +---------------+ <--- sp + 0x40 723 // | | 724 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 725 // | | 726 // +---------------+ <--- sp + 0x5c 727 // | | 728 // : : 729 // | | <--- Lesp 730 // +---------------+ <--- Lmonitors (fp - 0x18) 731 // | VM locals | 732 // +---------------+ <--- fp 733 // | | 734 // : reg save area : 735 // | | 736 // +---------------+ <--- fp + 0x40 737 // | | 738 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 739 // | | 740 // +---------------+ <--- fp + 0x5c 741 // | | 742 // : free : 743 // | | 744 // +---------------+ 745 // | | 746 // : nonarg locals : 747 // | | 748 // +---------------+ 749 // | | 750 // : arguments : 751 // | | <--- Llocals 752 // +---------------+ <--- Gargs 753 // | | 754 755 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 756 // 757 // 758 // The entry code sets up a new interpreter frame in 4 steps: 759 // 760 // 1) Increase caller's SP by for the extra local space needed: 761 // (check for overflow) 762 // Efficient implementation of xload/xstore bytecodes requires 763 // that arguments and non-argument locals are in a contigously 764 // addressable memory block => non-argument locals must be 765 // allocated in the caller's frame. 766 // 767 // 2) Create a new stack frame and register window: 768 // The new stack frame must provide space for the standard 769 // register save area, the maximum java expression stack size, 770 // the monitor slots (0 slots initially), and some frame local 771 // scratch locations. 772 // 773 // 3) The following interpreter activation registers must be setup: 774 // Lesp : expression stack pointer 775 // Lbcp : bytecode pointer 776 // Lmethod : method 777 // Llocals : locals pointer 778 // Lmonitors : monitor pointer 779 // LcpoolCache: constant pool cache 780 // 781 // 4) Initialize the non-argument locals if necessary: 782 // Non-argument locals may need to be initialized to NULL 783 // for GC to work. If the oop-map information is accurate 784 // (in the absence of the JSR problem), no initialization 785 // is necessary. 786 // 787 // (gri - 2/25/2000) 788 789 790 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 791 792 const int extra_space = 793 rounded_vm_local_words + // frame local scratch space 794 Method::extra_stack_entries() + // extra stack for jsr 292 795 frame::memory_parameter_word_sp_offset + // register save area 796 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 797 798 const Register Glocals_size = G3; 799 const Register RconstMethod = Glocals_size; 800 const Register Otmp1 = O3; 801 const Register Otmp2 = O4; 802 // Lscratch can't be used as a temporary because the call_stub uses 803 // it to assert that the stack frame was setup correctly. 804 const Address constMethod (G5_method, Method::const_offset()); 805 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 806 807 __ ld_ptr( constMethod, RconstMethod ); 808 __ lduh( size_of_parameters, Glocals_size); 809 810 // Gargs points to first local + BytesPerWord 811 // Set the saved SP after the register window save 812 // 813 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 814 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 815 __ add(Gargs, Otmp1, Gargs); 816 817 if (native_call) { 818 __ calc_mem_param_words( Glocals_size, Gframe_size ); 819 __ add( Gframe_size, extra_space, Gframe_size); 820 __ round_to( Gframe_size, WordsPerLong ); 821 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 822 823 // Native calls don't need the stack size check since they have no 824 // expression stack and the arguments are already on the stack and 825 // we only add a handful of words to the stack. 826 } else { 827 828 // 829 // Compute number of locals in method apart from incoming parameters 830 // 831 const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset()); 832 __ ld_ptr(constMethod, Otmp1); 833 __ lduh(size_of_locals, Otmp1); 834 __ sub(Otmp1, Glocals_size, Glocals_size); 835 __ round_to(Glocals_size, WordsPerLong); 836 __ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size); 837 838 // See if the frame is greater than one page in size. If so, 839 // then we need to verify there is enough stack space remaining. 840 // Frame_size = (max_stack + extra_space) * BytesPerWord; 841 __ ld_ptr(constMethod, Gframe_size); 842 __ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); 843 __ add(Gframe_size, extra_space, Gframe_size); 844 __ round_to(Gframe_size, WordsPerLong); 845 __ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size); 846 847 // Add in java locals size for stack overflow check only 848 __ add(Gframe_size, Glocals_size, Gframe_size); 849 850 const Register Otmp2 = O4; 851 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 852 generate_stack_overflow_check(Gframe_size, Otmp1); 853 854 __ sub(Gframe_size, Glocals_size, Gframe_size); 855 856 // 857 // bump SP to accomodate the extra locals 858 // 859 __ sub(SP, Glocals_size, SP); 860 } 861 862 // 863 // now set up a stack frame with the size computed above 864 // 865 __ neg( Gframe_size ); 866 __ save( SP, Gframe_size, SP ); 867 868 // 869 // now set up all the local cache registers 870 // 871 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 872 // that all present references to Lbyte_code initialize the register 873 // immediately before use 874 if (native_call) { 875 __ mov(G0, Lbcp); 876 } else { 877 __ ld_ptr(G5_method, Method::const_offset(), Lbcp); 878 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp); 879 } 880 __ mov( G5_method, Lmethod); // set Lmethod 881 // Get mirror and store it in the frame as GC root for this Method* 882 Register mirror = LcpoolCache; 883 __ load_mirror(mirror, Lmethod); 884 __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS); 885 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 886 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 887 #ifdef _LP64 888 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias 889 #endif 890 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 891 892 // setup interpreter activation registers 893 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 894 895 if (ProfileInterpreter) { 896 #ifdef FAST_DISPATCH 897 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 898 // they both use I2. 899 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 900 #endif // FAST_DISPATCH 901 __ set_method_data_pointer(); 902 } 903 904 } 905 906 // Method entry for java.lang.ref.Reference.get. 907 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 908 #if INCLUDE_ALL_GCS 909 // Code: _aload_0, _getfield, _areturn 910 // parameter size = 1 911 // 912 // The code that gets generated by this routine is split into 2 parts: 913 // 1. The "intrinsified" code for G1 (or any SATB based GC), 914 // 2. The slow path - which is an expansion of the regular method entry. 915 // 916 // Notes:- 917 // * In the G1 code we do not check whether we need to block for 918 // a safepoint. If G1 is enabled then we must execute the specialized 919 // code for Reference.get (except when the Reference object is null) 920 // so that we can log the value in the referent field with an SATB 921 // update buffer. 922 // If the code for the getfield template is modified so that the 923 // G1 pre-barrier code is executed when the current method is 924 // Reference.get() then going through the normal method entry 925 // will be fine. 926 // * The G1 code can, however, check the receiver object (the instance 927 // of java.lang.Reference) and jump to the slow path if null. If the 928 // Reference object is null then we obviously cannot fetch the referent 929 // and so we don't need to call the G1 pre-barrier. Thus we can use the 930 // regular method entry code to generate the NPE. 931 // 932 // This code is based on generate_accessor_enty. 933 934 address entry = __ pc(); 935 936 const int referent_offset = java_lang_ref_Reference::referent_offset; 937 guarantee(referent_offset > 0, "referent offset not initialized"); 938 939 if (UseG1GC) { 940 Label slow_path; 941 942 // In the G1 code we don't check if we need to reach a safepoint. We 943 // continue and the thread will safepoint at the next bytecode dispatch. 944 945 // Check if local 0 != NULL 946 // If the receiver is null then it is OK to jump to the slow path. 947 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 948 // check if local 0 == NULL and go the slow path 949 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); 950 951 952 // Load the value of the referent field. 953 if (Assembler::is_simm13(referent_offset)) { 954 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 955 } else { 956 __ set(referent_offset, G3_scratch); 957 __ load_heap_oop(Otos_i, G3_scratch, Otos_i); 958 } 959 960 // Generate the G1 pre-barrier code to log the value of 961 // the referent field in an SATB buffer. Note with 962 // these parameters the pre-barrier does not generate 963 // the load of the previous value 964 965 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, 966 Otos_i /* pre_val */, 967 G3_scratch /* tmp */, 968 true /* preserve_o_regs */); 969 970 // _areturn 971 __ retl(); // return from leaf routine 972 __ delayed()->mov(O5_savedSP, SP); 973 974 // Generate regular method entry 975 __ bind(slow_path); 976 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 977 return entry; 978 } 979 #endif // INCLUDE_ALL_GCS 980 981 // If G1 is not enabled then attempt to go through the accessor entry point 982 // Reference.get is an accessor 983 return NULL; 984 } 985 986 /** 987 * Method entry for static native methods: 988 * int java.util.zip.CRC32.update(int crc, int b) 989 */ 990 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 991 992 if (UseCRC32Intrinsics) { 993 address entry = __ pc(); 994 995 Label L_slow_path; 996 // If we need a safepoint check, generate full interpreter entry. 997 ExternalAddress state(SafepointSynchronize::address_of_state()); 998 __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); 999 __ set(SafepointSynchronize::_not_synchronized, O3); 1000 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path); 1001 1002 // Load parameters 1003 const Register crc = O0; // initial crc 1004 const Register val = O1; // byte to update with 1005 const Register table = O2; // address of 256-entry lookup table 1006 1007 __ ldub(Gargs, 3, val); 1008 __ lduw(Gargs, 8, crc); 1009 1010 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 1011 1012 __ not1(crc); // ~crc 1013 __ clruwu(crc); 1014 __ update_byte_crc32(crc, val, table); 1015 __ not1(crc); // ~crc 1016 1017 // result in O0 1018 __ retl(); 1019 __ delayed()->nop(); 1020 1021 // generate a vanilla native entry as the slow path 1022 __ bind(L_slow_path); 1023 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 1024 return entry; 1025 } 1026 return NULL; 1027 } 1028 1029 /** 1030 * Method entry for static native methods: 1031 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 1032 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 1033 */ 1034 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1035 1036 if (UseCRC32Intrinsics) { 1037 address entry = __ pc(); 1038 1039 Label L_slow_path; 1040 // If we need a safepoint check, generate full interpreter entry. 1041 ExternalAddress state(SafepointSynchronize::address_of_state()); 1042 __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); 1043 __ set(SafepointSynchronize::_not_synchronized, O3); 1044 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path); 1045 1046 // Load parameters from the stack 1047 const Register crc = O0; // initial crc 1048 const Register buf = O1; // source java byte array address 1049 const Register len = O2; // len 1050 const Register offset = O3; // offset 1051 1052 // Arguments are reversed on java expression stack 1053 // Calculate address of start element 1054 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 1055 __ lduw(Gargs, 0, len); 1056 __ lduw(Gargs, 8, offset); 1057 __ ldx( Gargs, 16, buf); 1058 __ lduw(Gargs, 32, crc); 1059 __ add(buf, offset, buf); 1060 } else { 1061 __ lduw(Gargs, 0, len); 1062 __ lduw(Gargs, 8, offset); 1063 __ ldx( Gargs, 16, buf); 1064 __ lduw(Gargs, 24, crc); 1065 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 1066 __ add(buf ,offset, buf); 1067 } 1068 1069 // Call the crc32 kernel 1070 __ MacroAssembler::save_thread(L7_thread_cache); 1071 __ kernel_crc32(crc, buf, len, O3); 1072 __ MacroAssembler::restore_thread(L7_thread_cache); 1073 1074 // result in O0 1075 __ retl(); 1076 __ delayed()->nop(); 1077 1078 // generate a vanilla native entry as the slow path 1079 __ bind(L_slow_path); 1080 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 1081 return entry; 1082 } 1083 return NULL; 1084 } 1085 1086 /** 1087 * Method entry for intrinsic-candidate (non-native) methods: 1088 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 1089 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 1090 * Unlike CRC32, CRC32C does not have any methods marked as native 1091 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 1092 */ 1093 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1094 1095 if (UseCRC32CIntrinsics) { 1096 address entry = __ pc(); 1097 1098 // Load parameters from the stack 1099 const Register crc = O0; // initial crc 1100 const Register buf = O1; // source java byte array address 1101 const Register offset = O2; // offset 1102 const Register end = O3; // index of last element to process 1103 const Register len = O2; // len argument to the kernel 1104 const Register table = O3; // crc32c lookup table address 1105 1106 // Arguments are reversed on java expression stack 1107 // Calculate address of start element 1108 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { 1109 __ lduw(Gargs, 0, end); 1110 __ lduw(Gargs, 8, offset); 1111 __ ldx( Gargs, 16, buf); 1112 __ lduw(Gargs, 32, crc); 1113 __ add(buf, offset, buf); 1114 __ sub(end, offset, len); 1115 } else { 1116 __ lduw(Gargs, 0, end); 1117 __ lduw(Gargs, 8, offset); 1118 __ ldx( Gargs, 16, buf); 1119 __ lduw(Gargs, 24, crc); 1120 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 1121 __ add(buf, offset, buf); 1122 __ sub(end, offset, len); 1123 } 1124 1125 // Call the crc32c kernel 1126 __ MacroAssembler::save_thread(L7_thread_cache); 1127 __ kernel_crc32c(crc, buf, len, table); 1128 __ MacroAssembler::restore_thread(L7_thread_cache); 1129 1130 // result in O0 1131 __ retl(); 1132 __ delayed()->nop(); 1133 1134 return entry; 1135 } 1136 return NULL; 1137 } 1138 1139 // Not supported 1140 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 1141 return NULL; 1142 } 1143 1144 // TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to 1145 // generate exception 1146 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1147 // Quick & dirty stack overflow checking: bang the stack & handle trap. 1148 // Note that we do the banging after the frame is setup, since the exception 1149 // handling code expects to find a valid interpreter frame on the stack. 1150 // Doing the banging earlier fails if the caller frame is not an interpreter 1151 // frame. 1152 // (Also, the exception throwing code expects to unlock any synchronized 1153 // method receiever, so do the banging after locking the receiver.) 1154 1155 // Bang each page in the shadow zone. We can't assume it's been done for 1156 // an interpreter frame with greater than a page of locals, so each page 1157 // needs to be checked. Only true for non-native. 1158 if (UseStackBanging) { 1159 const int page_size = os::vm_page_size(); 1160 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 1161 const int start_page = native_call ? n_shadow_pages : 1; 1162 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 1163 __ bang_stack_with_offset(pages*page_size); 1164 } 1165 } 1166 } 1167 1168 // 1169 // Interpreter stub for calling a native method. (asm interpreter) 1170 // This sets up a somewhat different looking stack for calling the native method 1171 // than the typical interpreter frame setup. 1172 // 1173 1174 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1175 address entry = __ pc(); 1176 1177 // the following temporary registers are used during frame creation 1178 const Register Gtmp1 = G3_scratch ; 1179 const Register Gtmp2 = G1_scratch; 1180 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1181 1182 // make sure registers are different! 1183 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1184 1185 const Address Laccess_flags(Lmethod, Method::access_flags_offset()); 1186 1187 const Register Glocals_size = G3; 1188 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1189 1190 // make sure method is native & not abstract 1191 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1192 #ifdef ASSERT 1193 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1194 { 1195 Label L; 1196 __ btst(JVM_ACC_NATIVE, Gtmp1); 1197 __ br(Assembler::notZero, false, Assembler::pt, L); 1198 __ delayed()->nop(); 1199 __ stop("tried to execute non-native method as native"); 1200 __ bind(L); 1201 } 1202 { Label L; 1203 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1204 __ br(Assembler::zero, false, Assembler::pt, L); 1205 __ delayed()->nop(); 1206 __ stop("tried to execute abstract method as non-abstract"); 1207 __ bind(L); 1208 } 1209 #endif // ASSERT 1210 1211 // generate the code to allocate the interpreter stack frame 1212 generate_fixed_frame(true); 1213 1214 // 1215 // No locals to initialize for native method 1216 // 1217 1218 // this slot will be set later, we initialize it to null here just in 1219 // case we get a GC before the actual value is stored later 1220 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 1221 1222 const Address do_not_unlock_if_synchronized(G2_thread, 1223 JavaThread::do_not_unlock_if_synchronized_offset()); 1224 // Since at this point in the method invocation the exception handler 1225 // would try to exit the monitor of synchronized methods which hasn't 1226 // been entered yet, we set the thread local variable 1227 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1228 // runtime, exception handling i.e. unlock_if_synchronized_method will 1229 // check this thread local flag. 1230 // This flag has two effects, one is to force an unwind in the topmost 1231 // interpreter frame and not perform an unlock while doing so. 1232 1233 __ movbool(true, G3_scratch); 1234 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1235 1236 // increment invocation counter and check for overflow 1237 // 1238 // Note: checking for negative value instead of overflow 1239 // so we have a 'sticky' overflow test (may be of 1240 // importance as soon as we have true MT/MP) 1241 Label invocation_counter_overflow; 1242 Label Lcontinue; 1243 if (inc_counter) { 1244 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1245 1246 } 1247 __ bind(Lcontinue); 1248 1249 bang_stack_shadow_pages(true); 1250 1251 // reset the _do_not_unlock_if_synchronized flag 1252 __ stbool(G0, do_not_unlock_if_synchronized); 1253 1254 // check for synchronized methods 1255 // Must happen AFTER invocation_counter check and stack overflow check, 1256 // so method is not locked if overflows. 1257 1258 if (synchronized) { 1259 lock_method(); 1260 } else { 1261 #ifdef ASSERT 1262 { Label ok; 1263 __ ld(Laccess_flags, O0); 1264 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1265 __ br( Assembler::zero, false, Assembler::pt, ok); 1266 __ delayed()->nop(); 1267 __ stop("method needs synchronization"); 1268 __ bind(ok); 1269 } 1270 #endif // ASSERT 1271 } 1272 1273 1274 // start execution 1275 __ verify_thread(); 1276 1277 // JVMTI support 1278 __ notify_method_entry(); 1279 1280 // native call 1281 1282 // (note that O0 is never an oop--at most it is a handle) 1283 // It is important not to smash any handles created by this call, 1284 // until any oop handle in O0 is dereferenced. 1285 1286 // (note that the space for outgoing params is preallocated) 1287 1288 // get signature handler 1289 { Label L; 1290 Address signature_handler(Lmethod, Method::signature_handler_offset()); 1291 __ ld_ptr(signature_handler, G3_scratch); 1292 __ br_notnull_short(G3_scratch, Assembler::pt, L); 1293 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 1294 __ ld_ptr(signature_handler, G3_scratch); 1295 __ bind(L); 1296 } 1297 1298 // Push a new frame so that the args will really be stored in 1299 // Copy a few locals across so the new frame has the variables 1300 // we need but these values will be dead at the jni call and 1301 // therefore not gc volatile like the values in the current 1302 // frame (Lmethod in particular) 1303 1304 // Flush the method pointer to the register save area 1305 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 1306 __ mov(Llocals, O1); 1307 1308 // calculate where the mirror handle body is allocated in the interpreter frame: 1309 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 1310 1311 // Calculate current frame size 1312 __ sub(SP, FP, O3); // Calculate negative of current frame size 1313 __ save(SP, O3, SP); // Allocate an identical sized frame 1314 1315 // Note I7 has leftover trash. Slow signature handler will fill it in 1316 // should we get there. Normal jni call will set reasonable last_Java_pc 1317 // below (and fix I7 so the stack trace doesn't have a meaningless frame 1318 // in it). 1319 1320 // Load interpreter frame's Lmethod into same register here 1321 1322 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1323 1324 __ mov(I1, Llocals); 1325 __ mov(I2, Lscratch2); // save the address of the mirror 1326 1327 1328 // ONLY Lmethod and Llocals are valid here! 1329 1330 // call signature handler, It will move the arg properly since Llocals in current frame 1331 // matches that in outer frame 1332 1333 __ callr(G3_scratch, 0); 1334 __ delayed()->nop(); 1335 1336 // Result handler is in Lscratch 1337 1338 // Reload interpreter frame's Lmethod since slow signature handler may block 1339 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1340 1341 { Label not_static; 1342 1343 __ ld(Laccess_flags, O0); 1344 __ btst(JVM_ACC_STATIC, O0); 1345 __ br( Assembler::zero, false, Assembler::pt, not_static); 1346 // get native function entry point(O0 is a good temp until the very end) 1347 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0); 1348 // for static methods insert the mirror argument 1349 __ load_mirror(O1, Lmethod); 1350 #ifdef ASSERT 1351 if (!PrintSignatureHandlers) // do not dirty the output with this 1352 { Label L; 1353 __ br_notnull_short(O1, Assembler::pt, L); 1354 __ stop("mirror is missing"); 1355 __ bind(L); 1356 } 1357 #endif // ASSERT 1358 __ st_ptr(O1, Lscratch2, 0); 1359 __ mov(Lscratch2, O1); 1360 __ bind(not_static); 1361 } 1362 1363 // At this point, arguments have been copied off of stack into 1364 // their JNI positions, which are O1..O5 and SP[68..]. 1365 // Oops are boxed in-place on the stack, with handles copied to arguments. 1366 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1367 1368 #ifdef ASSERT 1369 { Label L; 1370 __ br_notnull_short(O0, Assembler::pt, L); 1371 __ stop("native entry point is missing"); 1372 __ bind(L); 1373 } 1374 #endif // ASSERT 1375 1376 // 1377 // setup the frame anchor 1378 // 1379 // The scavenge function only needs to know that the PC of this frame is 1380 // in the interpreter method entry code, it doesn't need to know the exact 1381 // PC and hence we can use O7 which points to the return address from the 1382 // previous call in the code stream (signature handler function) 1383 // 1384 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1385 // we have pushed the extra frame in order to protect the volatile register(s) 1386 // in that frame when we return from the jni call 1387 // 1388 1389 __ set_last_Java_frame(FP, O7); 1390 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1391 // not meaningless information that'll confuse me. 1392 1393 // flush the windows now. We don't care about the current (protection) frame 1394 // only the outer frames 1395 1396 __ flushw(); 1397 1398 // mark windows as flushed 1399 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1400 __ set(JavaFrameAnchor::flushed, G3_scratch); 1401 __ st(G3_scratch, flags); 1402 1403 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1404 1405 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1406 #ifdef ASSERT 1407 { Label L; 1408 __ ld(thread_state, G3_scratch); 1409 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); 1410 __ stop("Wrong thread state in native stub"); 1411 __ bind(L); 1412 } 1413 #endif // ASSERT 1414 __ set(_thread_in_native, G3_scratch); 1415 __ st(G3_scratch, thread_state); 1416 1417 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1418 __ save_thread(L7_thread_cache); // save Gthread 1419 __ callr(O0, 0); 1420 __ delayed()-> 1421 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1422 1423 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1424 1425 __ restore_thread(L7_thread_cache); // restore G2_thread 1426 __ reinit_heapbase(); 1427 1428 // must we block? 1429 1430 // Block, if necessary, before resuming in _thread_in_Java state. 1431 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1432 { Label no_block; 1433 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 1434 1435 // Switch thread to "native transition" state before reading the synchronization state. 1436 // This additional state is necessary because reading and testing the synchronization 1437 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1438 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1439 // VM thread changes sync state to synchronizing and suspends threads for GC. 1440 // Thread A is resumed to finish this native method, but doesn't block here since it 1441 // didn't see any synchronization is progress, and escapes. 1442 __ set(_thread_in_native_trans, G3_scratch); 1443 __ st(G3_scratch, thread_state); 1444 if(os::is_MP()) { 1445 if (UseMembar) { 1446 // Force this write out before the read below 1447 __ membar(Assembler::StoreLoad); 1448 } else { 1449 // Write serialization page so VM thread can do a pseudo remote membar. 1450 // We use the current thread pointer to calculate a thread specific 1451 // offset to write to within the page. This minimizes bus traffic 1452 // due to cache line collision. 1453 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1454 } 1455 } 1456 __ load_contents(sync_state, G3_scratch); 1457 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1458 1459 Label L; 1460 __ br(Assembler::notEqual, false, Assembler::pn, L); 1461 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1462 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 1463 __ bind(L); 1464 1465 // Block. Save any potential method result value before the operation and 1466 // use a leaf call to leave the last_Java_frame setup undisturbed. 1467 save_native_result(); 1468 __ call_VM_leaf(L7_thread_cache, 1469 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1470 G2_thread); 1471 1472 // Restore any method result value 1473 restore_native_result(); 1474 __ bind(no_block); 1475 } 1476 1477 // Clear the frame anchor now 1478 1479 __ reset_last_Java_frame(); 1480 1481 // Move the result handler address 1482 __ mov(Lscratch, G3_scratch); 1483 // return possible result to the outer frame 1484 #ifndef __LP64 1485 __ mov(O0, I0); 1486 __ restore(O1, G0, O1); 1487 #else 1488 __ restore(O0, G0, O0); 1489 #endif /* __LP64 */ 1490 1491 // Move result handler to expected register 1492 __ mov(G3_scratch, Lscratch); 1493 1494 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1495 // switch to thread_in_Java. 1496 1497 __ set(_thread_in_Java, G3_scratch); 1498 __ st(G3_scratch, thread_state); 1499 1500 if (CheckJNICalls) { 1501 // clear_pending_jni_exception_check 1502 __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); 1503 } 1504 1505 // reset handle block 1506 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1507 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1508 1509 // If we have an oop result store it where it will be safe for any further gc 1510 // until we return now that we've released the handle it might be protected by 1511 1512 { 1513 Label no_oop, store_result; 1514 1515 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1516 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); 1517 // Unbox oop result, e.g. JNIHandles::resolve value in O0. 1518 __ br_null(O0, false, Assembler::pn, store_result); // Use NULL as-is. 1519 __ delayed()->andcc(O0, JNIHandles::weak_tag_mask, G0); // Test for jweak 1520 __ brx(Assembler::zero, true, Assembler::pt, store_result); 1521 __ delayed()->ld_ptr(O0, 0, O0); // Maybe resolve (untagged) jobject. 1522 // Resolve jweak. 1523 __ ld_ptr(O0, -JNIHandles::weak_tag_value, O0); 1524 #if INCLUDE_ALL_GCS 1525 if (UseG1GC) { 1526 __ g1_write_barrier_pre(noreg /* obj */, 1527 noreg /* index */, 1528 0 /* offset */, 1529 O0 /* pre_val */, 1530 G3_scratch /* tmp */, 1531 true /* preserve_o_regs */); 1532 } 1533 #endif // INCLUDE_ALL_GCS 1534 __ bind(store_result); 1535 // Store it where gc will look for it and result handler expects it. 1536 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1537 1538 __ bind(no_oop); 1539 1540 } 1541 1542 1543 // handle exceptions (exception handling will handle unlocking!) 1544 { Label L; 1545 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1546 __ ld_ptr(exception_addr, Gtemp); 1547 __ br_null_short(Gtemp, Assembler::pt, L); 1548 // Note: This could be handled more efficiently since we know that the native 1549 // method doesn't have an exception handler. We could directly return 1550 // to the exception handler for the caller. 1551 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1552 __ should_not_reach_here(); 1553 __ bind(L); 1554 } 1555 1556 // JVMTI support (preserves thread register) 1557 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1558 1559 if (synchronized) { 1560 // save and restore any potential method result value around the unlocking operation 1561 save_native_result(); 1562 1563 __ add( __ top_most_monitor(), O1); 1564 __ unlock_object(O1); 1565 1566 restore_native_result(); 1567 } 1568 1569 #if defined(COMPILER2) && !defined(_LP64) 1570 1571 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1572 // or compiled so just be safe. 1573 1574 __ sllx(O0, 32, G1); // Shift bits into high G1 1575 __ srl (O1, 0, O1); // Zero extend O1 1576 __ or3 (O1, G1, G1); // OR 64 bits into G1 1577 1578 #endif /* COMPILER2 && !_LP64 */ 1579 1580 // dispose of return address and remove activation 1581 #ifdef ASSERT 1582 { 1583 Label ok; 1584 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); 1585 __ stop("bad I5_savedSP value"); 1586 __ should_not_reach_here(); 1587 __ bind(ok); 1588 } 1589 #endif 1590 __ jmp(Lscratch, 0); 1591 __ delayed()->nop(); 1592 1593 1594 if (inc_counter) { 1595 // handle invocation counter overflow 1596 __ bind(invocation_counter_overflow); 1597 generate_counter_overflow(Lcontinue); 1598 } 1599 1600 1601 1602 return entry; 1603 } 1604 1605 1606 // Generic method entry to (asm) interpreter 1607 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1608 address entry = __ pc(); 1609 1610 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1611 1612 // the following temporary registers are used during frame creation 1613 const Register Gtmp1 = G3_scratch ; 1614 const Register Gtmp2 = G1_scratch; 1615 1616 // make sure registers are different! 1617 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1618 1619 const Address constMethod (G5_method, Method::const_offset()); 1620 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1621 // and use in the asserts. 1622 const Address access_flags (Lmethod, Method::access_flags_offset()); 1623 1624 const Register Glocals_size = G3; 1625 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1626 1627 // make sure method is not native & not abstract 1628 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1629 #ifdef ASSERT 1630 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1631 { 1632 Label L; 1633 __ btst(JVM_ACC_NATIVE, Gtmp1); 1634 __ br(Assembler::zero, false, Assembler::pt, L); 1635 __ delayed()->nop(); 1636 __ stop("tried to execute native method as non-native"); 1637 __ bind(L); 1638 } 1639 { Label L; 1640 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1641 __ br(Assembler::zero, false, Assembler::pt, L); 1642 __ delayed()->nop(); 1643 __ stop("tried to execute abstract method as non-abstract"); 1644 __ bind(L); 1645 } 1646 #endif // ASSERT 1647 1648 // generate the code to allocate the interpreter stack frame 1649 1650 generate_fixed_frame(false); 1651 1652 #ifdef FAST_DISPATCH 1653 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1654 // set bytecode dispatch table base 1655 #endif 1656 1657 // 1658 // Code to initialize the extra (i.e. non-parm) locals 1659 // 1660 Register init_value = noreg; // will be G0 if we must clear locals 1661 // The way the code was setup before zerolocals was always true for vanilla java entries. 1662 // It could only be false for the specialized entries like accessor or empty which have 1663 // no extra locals so the testing was a waste of time and the extra locals were always 1664 // initialized. We removed this extra complication to already over complicated code. 1665 1666 init_value = G0; 1667 Label clear_loop; 1668 1669 const Register RconstMethod = O1; 1670 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1671 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset()); 1672 1673 // NOTE: If you change the frame layout, this code will need to 1674 // be updated! 1675 __ ld_ptr( constMethod, RconstMethod ); 1676 __ lduh( size_of_locals, O2 ); 1677 __ lduh( size_of_parameters, O1 ); 1678 __ sll( O2, Interpreter::logStackElementSize, O2); 1679 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1680 __ sub( Llocals, O2, O2 ); 1681 __ sub( Llocals, O1, O1 ); 1682 1683 __ bind( clear_loop ); 1684 __ inc( O2, wordSize ); 1685 1686 __ cmp( O2, O1 ); 1687 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1688 __ delayed()->st_ptr( init_value, O2, 0 ); 1689 1690 const Address do_not_unlock_if_synchronized(G2_thread, 1691 JavaThread::do_not_unlock_if_synchronized_offset()); 1692 // Since at this point in the method invocation the exception handler 1693 // would try to exit the monitor of synchronized methods which hasn't 1694 // been entered yet, we set the thread local variable 1695 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1696 // runtime, exception handling i.e. unlock_if_synchronized_method will 1697 // check this thread local flag. 1698 __ movbool(true, G3_scratch); 1699 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1700 1701 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); 1702 // increment invocation counter and check for overflow 1703 // 1704 // Note: checking for negative value instead of overflow 1705 // so we have a 'sticky' overflow test (may be of 1706 // importance as soon as we have true MT/MP) 1707 Label invocation_counter_overflow; 1708 Label profile_method; 1709 Label profile_method_continue; 1710 Label Lcontinue; 1711 if (inc_counter) { 1712 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1713 if (ProfileInterpreter) { 1714 __ bind(profile_method_continue); 1715 } 1716 } 1717 __ bind(Lcontinue); 1718 1719 bang_stack_shadow_pages(false); 1720 1721 // reset the _do_not_unlock_if_synchronized flag 1722 __ stbool(G0, do_not_unlock_if_synchronized); 1723 1724 // check for synchronized methods 1725 // Must happen AFTER invocation_counter check and stack overflow check, 1726 // so method is not locked if overflows. 1727 1728 if (synchronized) { 1729 lock_method(); 1730 } else { 1731 #ifdef ASSERT 1732 { Label ok; 1733 __ ld(access_flags, O0); 1734 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1735 __ br( Assembler::zero, false, Assembler::pt, ok); 1736 __ delayed()->nop(); 1737 __ stop("method needs synchronization"); 1738 __ bind(ok); 1739 } 1740 #endif // ASSERT 1741 } 1742 1743 // start execution 1744 1745 __ verify_thread(); 1746 1747 // jvmti support 1748 __ notify_method_entry(); 1749 1750 // start executing instructions 1751 __ dispatch_next(vtos); 1752 1753 1754 if (inc_counter) { 1755 if (ProfileInterpreter) { 1756 // We have decided to profile this method in the interpreter 1757 __ bind(profile_method); 1758 1759 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1760 __ set_method_data_pointer_for_bcp(); 1761 __ ba_short(profile_method_continue); 1762 } 1763 1764 // handle invocation counter overflow 1765 __ bind(invocation_counter_overflow); 1766 generate_counter_overflow(Lcontinue); 1767 } 1768 1769 1770 return entry; 1771 } 1772 1773 //---------------------------------------------------------------------------------------------------- 1774 // Exceptions 1775 void TemplateInterpreterGenerator::generate_throw_exception() { 1776 1777 // Entry point in previous activation (i.e., if the caller was interpreted) 1778 Interpreter::_rethrow_exception_entry = __ pc(); 1779 // O0: exception 1780 1781 // entry point for exceptions thrown within interpreter code 1782 Interpreter::_throw_exception_entry = __ pc(); 1783 __ verify_thread(); 1784 // expression stack is undefined here 1785 // O0: exception, i.e. Oexception 1786 // Lbcp: exception bcp 1787 __ verify_oop(Oexception); 1788 1789 1790 // expression stack must be empty before entering the VM in case of an exception 1791 __ empty_expression_stack(); 1792 // find exception handler address and preserve exception oop 1793 // call C routine to find handler and jump to it 1794 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1795 __ push_ptr(O1); // push exception for exception handler bytecodes 1796 1797 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1798 __ delayed()->nop(); 1799 1800 1801 // if the exception is not handled in the current frame 1802 // the frame is removed and the exception is rethrown 1803 // (i.e. exception continuation is _rethrow_exception) 1804 // 1805 // Note: At this point the bci is still the bxi for the instruction which caused 1806 // the exception and the expression stack is empty. Thus, for any VM calls 1807 // at this point, GC will find a legal oop map (with empty expression stack). 1808 1809 // in current activation 1810 // tos: exception 1811 // Lbcp: exception bcp 1812 1813 // 1814 // JVMTI PopFrame support 1815 // 1816 1817 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1818 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1819 // Set the popframe_processing bit in popframe_condition indicating that we are 1820 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1821 // popframe handling cycles. 1822 1823 __ ld(popframe_condition_addr, G3_scratch); 1824 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1825 __ stw(G3_scratch, popframe_condition_addr); 1826 1827 // Empty the expression stack, as in normal exception handling 1828 __ empty_expression_stack(); 1829 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1830 1831 { 1832 // Check to see whether we are returning to a deoptimized frame. 1833 // (The PopFrame call ensures that the caller of the popped frame is 1834 // either interpreted or compiled and deoptimizes it if compiled.) 1835 // In this case, we can't call dispatch_next() after the frame is 1836 // popped, but instead must save the incoming arguments and restore 1837 // them after deoptimization has occurred. 1838 // 1839 // Note that we don't compare the return PC against the 1840 // deoptimization blob's unpack entry because of the presence of 1841 // adapter frames in C2. 1842 Label caller_not_deoptimized; 1843 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1844 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); 1845 1846 const Register Gtmp1 = G3_scratch; 1847 const Register Gtmp2 = G1_scratch; 1848 const Register RconstMethod = Gtmp1; 1849 const Address constMethod(Lmethod, Method::const_offset()); 1850 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1851 1852 // Compute size of arguments for saving when returning to deoptimized caller 1853 __ ld_ptr(constMethod, RconstMethod); 1854 __ lduh(size_of_parameters, Gtmp1); 1855 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1856 __ sub(Llocals, Gtmp1, Gtmp2); 1857 __ add(Gtmp2, wordSize, Gtmp2); 1858 // Save these arguments 1859 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1860 // Inform deoptimization that it is responsible for restoring these arguments 1861 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1862 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1863 __ st(Gtmp1, popframe_condition_addr); 1864 1865 // Return from the current method 1866 // The caller's SP was adjusted upon method entry to accomodate 1867 // the callee's non-argument locals. Undo that adjustment. 1868 __ ret(); 1869 __ delayed()->restore(I5_savedSP, G0, SP); 1870 1871 __ bind(caller_not_deoptimized); 1872 } 1873 1874 // Clear the popframe condition flag 1875 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1876 1877 // Get out of the current method (how this is done depends on the particular compiler calling 1878 // convention that the interpreter currently follows) 1879 // The caller's SP was adjusted upon method entry to accomodate 1880 // the callee's non-argument locals. Undo that adjustment. 1881 __ restore(I5_savedSP, G0, SP); 1882 // The method data pointer was incremented already during 1883 // call profiling. We have to restore the mdp for the current bcp. 1884 if (ProfileInterpreter) { 1885 __ set_method_data_pointer_for_bcp(); 1886 } 1887 1888 #if INCLUDE_JVMTI 1889 { 1890 Label L_done; 1891 1892 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode 1893 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done); 1894 1895 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1896 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1897 1898 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp); 1899 1900 __ br_null(G1_scratch, false, Assembler::pn, L_done); 1901 __ delayed()->nop(); 1902 1903 __ st_ptr(G1_scratch, Lesp, wordSize); 1904 __ bind(L_done); 1905 } 1906 #endif // INCLUDE_JVMTI 1907 1908 // Resume bytecode interpretation at the current bcp 1909 __ dispatch_next(vtos); 1910 // end of JVMTI PopFrame support 1911 1912 Interpreter::_remove_activation_entry = __ pc(); 1913 1914 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1915 __ pop_ptr(Oexception); // get exception 1916 1917 // Intel has the following comment: 1918 //// remove the activation (without doing throws on illegalMonitorExceptions) 1919 // They remove the activation without checking for bad monitor state. 1920 // %%% We should make sure this is the right semantics before implementing. 1921 1922 __ set_vm_result(Oexception); 1923 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1924 1925 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1926 1927 __ get_vm_result(Oexception); 1928 __ verify_oop(Oexception); 1929 1930 const int return_reg_adjustment = frame::pc_return_offset; 1931 Address issuing_pc_addr(I7, return_reg_adjustment); 1932 1933 // We are done with this activation frame; find out where to go next. 1934 // The continuation point will be an exception handler, which expects 1935 // the following registers set up: 1936 // 1937 // Oexception: exception 1938 // Oissuing_pc: the local call that threw exception 1939 // Other On: garbage 1940 // In/Ln: the contents of the caller's register window 1941 // 1942 // We do the required restore at the last possible moment, because we 1943 // need to preserve some state across a runtime call. 1944 // (Remember that the caller activation is unknown--it might not be 1945 // interpreted, so things like Lscratch are useless in the caller.) 1946 1947 // Although the Intel version uses call_C, we can use the more 1948 // compact call_VM. (The only real difference on SPARC is a 1949 // harmlessly ignored [re]set_last_Java_frame, compared with 1950 // the Intel code which lacks this.) 1951 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1952 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1953 __ super_call_VM_leaf(L7_thread_cache, 1954 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1955 G2_thread, Oissuing_pc->after_save()); 1956 1957 // The caller's SP was adjusted upon method entry to accomodate 1958 // the callee's non-argument locals. Undo that adjustment. 1959 __ JMP(O0, 0); // return exception handler in caller 1960 __ delayed()->restore(I5_savedSP, G0, SP); 1961 1962 // (same old exception object is already in Oexception; see above) 1963 // Note that an "issuing PC" is actually the next PC after the call 1964 } 1965 1966 1967 // 1968 // JVMTI ForceEarlyReturn support 1969 // 1970 1971 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1972 address entry = __ pc(); 1973 1974 __ empty_expression_stack(); 1975 __ load_earlyret_value(state); 1976 1977 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1978 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1979 1980 // Clear the earlyret state 1981 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1982 1983 __ remove_activation(state, 1984 /* throw_monitor_exception */ false, 1985 /* install_monitor_exception */ false); 1986 1987 // The caller's SP was adjusted upon method entry to accomodate 1988 // the callee's non-argument locals. Undo that adjustment. 1989 __ ret(); // return to caller 1990 __ delayed()->restore(I5_savedSP, G0, SP); 1991 1992 return entry; 1993 } // end of JVMTI ForceEarlyReturn support 1994 1995 1996 //------------------------------------------------------------------------------------------------------------------------ 1997 // Helper for vtos entry point generation 1998 1999 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 2000 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2001 Label L; 2002 aep = __ pc(); __ push_ptr(); __ ba_short(L); 2003 fep = __ pc(); __ push_f(); __ ba_short(L); 2004 dep = __ pc(); __ push_d(); __ ba_short(L); 2005 lep = __ pc(); __ push_l(); __ ba_short(L); 2006 iep = __ pc(); __ push_i(); 2007 bep = cep = sep = iep; // there aren't any 2008 vep = __ pc(); __ bind(L); // fall through 2009 generate_and_dispatch(t); 2010 } 2011 2012 // -------------------------------------------------------------------------------- 2013 2014 // Non-product code 2015 #ifndef PRODUCT 2016 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2017 address entry = __ pc(); 2018 2019 __ push(state); 2020 __ mov(O7, Lscratch); // protect return address within interpreter 2021 2022 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 2023 __ mov( Otos_l2, G3_scratch ); 2024 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 2025 __ mov(Lscratch, O7); // restore return address 2026 __ pop(state); 2027 __ retl(); 2028 __ delayed()->nop(); 2029 2030 return entry; 2031 } 2032 2033 2034 // helpers for generate_and_dispatch 2035 2036 void TemplateInterpreterGenerator::count_bytecode() { 2037 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 2038 } 2039 2040 2041 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2042 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 2043 } 2044 2045 2046 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2047 AddressLiteral index (&BytecodePairHistogram::_index); 2048 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 2049 2050 // get index, shift out old bytecode, bring in new bytecode, and store it 2051 // _index = (_index >> log2_number_of_codes) | 2052 // (bytecode << log2_number_of_codes); 2053 2054 __ load_contents(index, G4_scratch); 2055 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 2056 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 2057 __ or3( G3_scratch, G4_scratch, G4_scratch ); 2058 __ store_contents(G4_scratch, index, G3_scratch); 2059 2060 // bump bucket contents 2061 // _counters[_index] ++; 2062 2063 __ set(counters, G3_scratch); // loads into G3_scratch 2064 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 2065 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 2066 __ ld (G3_scratch, 0, G4_scratch); 2067 __ inc (G4_scratch); 2068 __ st (G4_scratch, 0, G3_scratch); 2069 } 2070 2071 2072 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2073 // Call a little run-time stub to avoid blow-up for each bytecode. 2074 // The run-time runtime saves the right registers, depending on 2075 // the tosca in-state for the given template. 2076 address entry = Interpreter::trace_code(t->tos_in()); 2077 guarantee(entry != NULL, "entry must have been generated"); 2078 __ call(entry, relocInfo::none); 2079 __ delayed()->nop(); 2080 } 2081 2082 2083 void TemplateInterpreterGenerator::stop_interpreter_at() { 2084 AddressLiteral counter(&BytecodeCounter::_counter_value); 2085 __ load_contents(counter, G3_scratch); 2086 AddressLiteral stop_at(&StopInterpreterAt); 2087 __ load_ptr_contents(stop_at, G4_scratch); 2088 __ cmp(G3_scratch, G4_scratch); 2089 __ breakpoint_trap(Assembler::equal, Assembler::icc); 2090 } 2091 #endif // not PRODUCT