1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #ifndef CC_INTERP 50 #ifndef FAST_DISPATCH 51 #define FAST_DISPATCH 1 52 #endif 53 #undef FAST_DISPATCH 54 55 56 // Generation of Interpreter 57 // 58 // The InterpreterGenerator generates the interpreter into Interpreter::_code. 59 60 61 #define __ _masm-> 62 63 64 //---------------------------------------------------------------------------------------------------- 65 66 67 void InterpreterGenerator::save_native_result(void) { 68 // result potentially in O0/O1: save it across calls 69 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 70 71 // result potentially in F0/F1: save it across calls 72 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 73 74 // save and restore any potential method result value around the unlocking operation 75 __ stf(FloatRegisterImpl::D, F0, d_tmp); 76 #ifdef _LP64 77 __ stx(O0, l_tmp); 78 #else 79 __ std(O0, l_tmp); 80 #endif 81 } 82 83 void InterpreterGenerator::restore_native_result(void) { 84 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 85 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 86 87 // Restore any method result value 88 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 89 #ifdef _LP64 90 __ ldx(l_tmp, O0); 91 #else 92 __ ldd(l_tmp, O0); 93 #endif 94 } 95 96 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 97 assert(!pass_oop || message == NULL, "either oop or message but not both"); 98 address entry = __ pc(); 99 // expression stack must be empty before entering the VM if an exception happened 100 __ empty_expression_stack(); 101 // load exception object 102 __ set((intptr_t)name, G3_scratch); 103 if (pass_oop) { 104 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 105 } else { 106 __ set((intptr_t)message, G4_scratch); 107 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 108 } 109 // throw exception 110 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 111 AddressLiteral thrower(Interpreter::throw_exception_entry()); 112 __ jump_to(thrower, G3_scratch); 113 __ delayed()->nop(); 114 return entry; 115 } 116 117 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 118 address entry = __ pc(); 119 // expression stack must be empty before entering the VM if an exception 120 // happened 121 __ empty_expression_stack(); 122 // load exception object 123 __ call_VM(Oexception, 124 CAST_FROM_FN_PTR(address, 125 InterpreterRuntime::throw_ClassCastException), 126 Otos_i); 127 __ should_not_reach_here(); 128 return entry; 129 } 130 131 132 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 133 address entry = __ pc(); 134 // expression stack must be empty before entering the VM if an exception happened 135 __ empty_expression_stack(); 136 // convention: expect aberrant index in register G3_scratch, then shuffle the 137 // index to G4_scratch for the VM call 138 __ mov(G3_scratch, G4_scratch); 139 __ set((intptr_t)name, G3_scratch); 140 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 141 __ should_not_reach_here(); 142 return entry; 143 } 144 145 146 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 147 address entry = __ pc(); 148 // expression stack must be empty before entering the VM if an exception happened 149 __ empty_expression_stack(); 150 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 151 __ should_not_reach_here(); 152 return entry; 153 } 154 155 156 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 157 address entry = __ pc(); 158 159 if (state == atos) { 160 __ profile_return_type(O0, G3_scratch, G1_scratch); 161 } 162 163 #if !defined(_LP64) && defined(COMPILER2) 164 // All return values are where we want them, except for Longs. C2 returns 165 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 166 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 167 // build even if we are returning from interpreted we just do a little 168 // stupid shuffing. 169 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 170 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 171 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 172 173 if (state == ltos) { 174 __ srl (G1, 0, O1); 175 __ srlx(G1, 32, O0); 176 } 177 #endif // !_LP64 && COMPILER2 178 179 // The callee returns with the stack possibly adjusted by adapter transition 180 // We remove that possible adjustment here. 181 // All interpreter local registers are untouched. Any result is passed back 182 // in the O0/O1 or float registers. Before continuing, the arguments must be 183 // popped from the java expression stack; i.e., Lesp must be adjusted. 184 185 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 186 187 const Register cache = G3_scratch; 188 const Register index = G1_scratch; 189 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 190 191 const Register flags = cache; 192 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); 193 const Register parameter_size = flags; 194 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words 195 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes 196 __ add(Lesp, parameter_size, Lesp); // pop arguments 197 __ dispatch_next(state, step); 198 199 return entry; 200 } 201 202 203 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 204 address entry = __ pc(); 205 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 206 { Label L; 207 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 208 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 209 __ br_null_short(Gtemp, Assembler::pt, L); 210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 211 __ should_not_reach_here(); 212 __ bind(L); 213 } 214 __ dispatch_next(state, step); 215 return entry; 216 } 217 218 // A result handler converts/unboxes a native call result into 219 // a java interpreter/compiler result. The current frame is an 220 // interpreter frame. The activation frame unwind code must be 221 // consistent with that of TemplateTable::_return(...). In the 222 // case of native methods, the caller's SP was not modified. 223 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 224 address entry = __ pc(); 225 Register Itos_i = Otos_i ->after_save(); 226 Register Itos_l = Otos_l ->after_save(); 227 Register Itos_l1 = Otos_l1->after_save(); 228 Register Itos_l2 = Otos_l2->after_save(); 229 switch (type) { 230 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 231 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 232 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 233 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 234 case T_LONG : 235 #ifndef _LP64 236 __ mov(O1, Itos_l2); // move other half of long 237 #endif // ifdef or no ifdef, fall through to the T_INT case 238 case T_INT : __ mov(O0, Itos_i); break; 239 case T_VOID : /* nothing to do */ break; 240 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 241 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 242 case T_OBJECT : 243 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 244 __ verify_oop(Itos_i); 245 break; 246 default : ShouldNotReachHere(); 247 } 248 __ ret(); // return from interpreter activation 249 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 250 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly 251 return entry; 252 } 253 254 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 255 address entry = __ pc(); 256 __ push(state); 257 __ call_VM(noreg, runtime_entry); 258 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 259 return entry; 260 } 261 262 263 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 264 address entry = __ pc(); 265 __ dispatch_next(state); 266 return entry; 267 } 268 269 // 270 // Helpers for commoning out cases in the various type of method entries. 271 // 272 273 // increment invocation count & check for overflow 274 // 275 // Note: checking for negative value instead of overflow 276 // so we have a 'sticky' overflow test 277 // 278 // Lmethod: method 279 // ??: invocation counter 280 // 281 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 282 // Note: In tiered we increment either counters in MethodCounters* or in 283 // MDO depending if we're profiling or not. 284 const Register Rcounters = G3_scratch; 285 Label done; 286 287 if (TieredCompilation) { 288 const int increment = InvocationCounter::count_increment; 289 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 290 Label no_mdo; 291 if (ProfileInterpreter) { 292 // If no method data exists, go to profile_continue. 293 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 294 __ br_null_short(G4_scratch, Assembler::pn, no_mdo); 295 // Increment counter 296 Address mdo_invocation_counter(G4_scratch, 297 in_bytes(MethodData::invocation_counter_offset()) + 298 in_bytes(InvocationCounter::counter_offset())); 299 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 300 G3_scratch, Lscratch, 301 Assembler::zero, overflow); 302 __ ba_short(done); 303 } 304 305 // Increment counter in MethodCounters* 306 __ bind(no_mdo); 307 Address invocation_counter(Rcounters, 308 in_bytes(MethodCounters::invocation_counter_offset()) + 309 in_bytes(InvocationCounter::counter_offset())); 310 __ get_method_counters(Lmethod, Rcounters, done); 311 __ increment_mask_and_jump(invocation_counter, increment, mask, 312 G4_scratch, Lscratch, 313 Assembler::zero, overflow); 314 __ bind(done); 315 } else { 316 // Update standard invocation counters 317 __ get_method_counters(Lmethod, Rcounters, done); 318 __ increment_invocation_counter(Rcounters, O0, G4_scratch); 319 if (ProfileInterpreter) { 320 Address interpreter_invocation_counter(Rcounters, 321 in_bytes(MethodCounters::interpreter_invocation_counter_offset())); 322 __ ld(interpreter_invocation_counter, G4_scratch); 323 __ inc(G4_scratch); 324 __ st(G4_scratch, interpreter_invocation_counter); 325 } 326 327 if (ProfileInterpreter && profile_method != NULL) { 328 // Test to see if we should create a method data oop 329 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit); 330 __ load_contents(profile_limit, G3_scratch); 331 __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); 332 333 // if no method data exists, go to profile_method 334 __ test_method_data_pointer(*profile_method); 335 } 336 337 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit); 338 __ load_contents(invocation_limit, G3_scratch); 339 __ cmp(O0, G3_scratch); 340 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance 341 __ delayed()->nop(); 342 __ bind(done); 343 } 344 345 } 346 347 // Allocate monitor and lock method (asm interpreter) 348 // ebx - Method* 349 // 350 void InterpreterGenerator::lock_method(void) { 351 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags. 352 353 #ifdef ASSERT 354 { Label ok; 355 __ btst(JVM_ACC_SYNCHRONIZED, O0); 356 __ br( Assembler::notZero, false, Assembler::pt, ok); 357 __ delayed()->nop(); 358 __ stop("method doesn't need synchronization"); 359 __ bind(ok); 360 } 361 #endif // ASSERT 362 363 // get synchronization object to O0 364 { Label done; 365 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 366 __ btst(JVM_ACC_STATIC, O0); 367 __ br( Assembler::zero, true, Assembler::pt, done); 368 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 369 370 __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0); 371 __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0); 372 __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0); 373 374 // lock the mirror, not the Klass* 375 __ ld_ptr( O0, mirror_offset, O0); 376 377 #ifdef ASSERT 378 __ tst(O0); 379 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 380 #endif // ASSERT 381 382 __ bind(done); 383 } 384 385 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 386 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 387 // __ untested("lock_object from method entry"); 388 __ lock_object(Lmonitors, O0); 389 } 390 391 392 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 393 Register Rscratch, 394 Register Rscratch2) { 395 const int page_size = os::vm_page_size(); 396 Label after_frame_check; 397 398 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 399 400 __ set(page_size, Rscratch); 401 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); 402 403 // get the stack base, and in debug, verify it is non-zero 404 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); 405 #ifdef ASSERT 406 Label base_not_zero; 407 __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero); 408 __ stop("stack base is zero in generate_stack_overflow_check"); 409 __ bind(base_not_zero); 410 #endif 411 412 // get the stack size, and in debug, verify it is non-zero 413 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 414 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); 415 #ifdef ASSERT 416 Label size_not_zero; 417 __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero); 418 __ stop("stack size is zero in generate_stack_overflow_check"); 419 __ bind(size_not_zero); 420 #endif 421 422 // compute the beginning of the protected zone minus the requested frame size 423 __ sub( Rscratch, Rscratch2, Rscratch ); 424 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 ); 425 __ add( Rscratch, Rscratch2, Rscratch ); 426 427 // Add in the size of the frame (which is the same as subtracting it from the 428 // SP, which would take another register 429 __ add( Rscratch, Rframe_size, Rscratch ); 430 431 // the frame is greater than one page in size, so check against 432 // the bottom of the stack 433 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check); 434 435 // the stack will overflow, throw an exception 436 437 // Note that SP is restored to sender's sp (in the delay slot). This 438 // is necessary if the sender's frame is an extended compiled frame 439 // (see gen_c2i_adapter()) and safer anyway in case of JSR292 440 // adaptations. 441 442 // Note also that the restored frame is not necessarily interpreted. 443 // Use the shared runtime version of the StackOverflowError. 444 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 445 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); 446 __ jump_to(stub, Rscratch); 447 __ delayed()->mov(O5_savedSP, SP); 448 449 // if you get to here, then there is enough stack space 450 __ bind( after_frame_check ); 451 } 452 453 454 // 455 // Generate a fixed interpreter frame. This is identical setup for interpreted 456 // methods and for native methods hence the shared code. 457 458 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 459 // 460 // 461 // The entry code sets up a new interpreter frame in 4 steps: 462 // 463 // 1) Increase caller's SP by for the extra local space needed: 464 // (check for overflow) 465 // Efficient implementation of xload/xstore bytecodes requires 466 // that arguments and non-argument locals are in a contigously 467 // addressable memory block => non-argument locals must be 468 // allocated in the caller's frame. 469 // 470 // 2) Create a new stack frame and register window: 471 // The new stack frame must provide space for the standard 472 // register save area, the maximum java expression stack size, 473 // the monitor slots (0 slots initially), and some frame local 474 // scratch locations. 475 // 476 // 3) The following interpreter activation registers must be setup: 477 // Lesp : expression stack pointer 478 // Lbcp : bytecode pointer 479 // Lmethod : method 480 // Llocals : locals pointer 481 // Lmonitors : monitor pointer 482 // LcpoolCache: constant pool cache 483 // 484 // 4) Initialize the non-argument locals if necessary: 485 // Non-argument locals may need to be initialized to NULL 486 // for GC to work. If the oop-map information is accurate 487 // (in the absence of the JSR problem), no initialization 488 // is necessary. 489 // 490 // (gri - 2/25/2000) 491 492 493 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 494 495 const int extra_space = 496 rounded_vm_local_words + // frame local scratch space 497 Method::extra_stack_entries() + // extra stack for jsr 292 498 frame::memory_parameter_word_sp_offset + // register save area 499 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 500 501 const Register Glocals_size = G3; 502 const Register RconstMethod = Glocals_size; 503 const Register Otmp1 = O3; 504 const Register Otmp2 = O4; 505 // Lscratch can't be used as a temporary because the call_stub uses 506 // it to assert that the stack frame was setup correctly. 507 const Address constMethod (G5_method, Method::const_offset()); 508 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 509 510 __ ld_ptr( constMethod, RconstMethod ); 511 __ lduh( size_of_parameters, Glocals_size); 512 513 // Gargs points to first local + BytesPerWord 514 // Set the saved SP after the register window save 515 // 516 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 517 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 518 __ add(Gargs, Otmp1, Gargs); 519 520 if (native_call) { 521 __ calc_mem_param_words( Glocals_size, Gframe_size ); 522 __ add( Gframe_size, extra_space, Gframe_size); 523 __ round_to( Gframe_size, WordsPerLong ); 524 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 525 } else { 526 527 // 528 // Compute number of locals in method apart from incoming parameters 529 // 530 const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset()); 531 __ ld_ptr( constMethod, Otmp1 ); 532 __ lduh( size_of_locals, Otmp1 ); 533 __ sub( Otmp1, Glocals_size, Glocals_size ); 534 __ round_to( Glocals_size, WordsPerLong ); 535 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size ); 536 537 // see if the frame is greater than one page in size. If so, 538 // then we need to verify there is enough stack space remaining 539 // Frame_size = (max_stack + extra_space) * BytesPerWord; 540 __ ld_ptr( constMethod, Gframe_size ); 541 __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size ); 542 __ add( Gframe_size, extra_space, Gframe_size ); 543 __ round_to( Gframe_size, WordsPerLong ); 544 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size); 545 546 // Add in java locals size for stack overflow check only 547 __ add( Gframe_size, Glocals_size, Gframe_size ); 548 549 const Register Otmp2 = O4; 550 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 551 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); 552 553 __ sub( Gframe_size, Glocals_size, Gframe_size); 554 555 // 556 // bump SP to accomodate the extra locals 557 // 558 __ sub( SP, Glocals_size, SP ); 559 } 560 561 // 562 // now set up a stack frame with the size computed above 563 // 564 __ neg( Gframe_size ); 565 __ save( SP, Gframe_size, SP ); 566 567 // 568 // now set up all the local cache registers 569 // 570 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 571 // that all present references to Lbyte_code initialize the register 572 // immediately before use 573 if (native_call) { 574 __ mov(G0, Lbcp); 575 } else { 576 __ ld_ptr(G5_method, Method::const_offset(), Lbcp); 577 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp); 578 } 579 __ mov( G5_method, Lmethod); // set Lmethod 580 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 581 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 582 #ifdef _LP64 583 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias 584 #endif 585 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 586 587 // setup interpreter activation registers 588 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 589 590 if (ProfileInterpreter) { 591 #ifdef FAST_DISPATCH 592 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 593 // they both use I2. 594 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 595 #endif // FAST_DISPATCH 596 __ set_method_data_pointer(); 597 } 598 599 } 600 601 // Empty method, generate a very fast return. 602 603 address InterpreterGenerator::generate_empty_entry(void) { 604 605 // A method that does nother but return... 606 607 address entry = __ pc(); 608 Label slow_path; 609 610 // do nothing for empty methods (do not even increment invocation counter) 611 if ( UseFastEmptyMethods) { 612 // If we need a safepoint check, generate full interpreter entry. 613 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 614 __ set(sync_state, G3_scratch); 615 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path); 616 617 // Code: _return 618 __ retl(); 619 __ delayed()->mov(O5_savedSP, SP); 620 621 __ bind(slow_path); 622 (void) generate_normal_entry(false); 623 624 return entry; 625 } 626 return NULL; 627 } 628 629 // Call an accessor method (assuming it is resolved, otherwise drop into 630 // vanilla (slow path) entry 631 632 // Generates code to elide accessor methods 633 // Uses G3_scratch and G1_scratch as scratch 634 address InterpreterGenerator::generate_accessor_entry(void) { 635 636 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; 637 // parameter size = 1 638 // Note: We can only use this code if the getfield has been resolved 639 // and if we don't have a null-pointer exception => check for 640 // these conditions first and use slow path if necessary. 641 address entry = __ pc(); 642 Label slow_path; 643 644 645 // XXX: for compressed oops pointer loading and decoding doesn't fit in 646 // delay slot and damages G1 647 if ( UseFastAccessorMethods && !UseCompressedOops ) { 648 // Check if we need to reach a safepoint and generate full interpreter 649 // frame if so. 650 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 651 __ load_contents(sync_state, G3_scratch); 652 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 653 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path); 654 655 // Check if local 0 != NULL 656 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 657 // check if local 0 == NULL and go the slow path 658 __ br_null_short(Otos_i, Assembler::pn, slow_path); 659 660 661 // read first instruction word and extract bytecode @ 1 and index @ 2 662 // get first 4 bytes of the bytecodes (big endian!) 663 __ ld_ptr(G5_method, Method::const_offset(), G1_scratch); 664 __ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch); 665 666 // move index @ 2 far left then to the right most two bytes. 667 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); 668 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( 669 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); 670 671 // get constant pool cache 672 __ ld_ptr(G5_method, Method::const_offset(), G3_scratch); 673 __ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch); 674 __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch); 675 676 // get specific constant pool cache entry 677 __ add(G3_scratch, G1_scratch, G3_scratch); 678 679 // Check the constant Pool cache entry to see if it has been resolved. 680 // If not, need the slow path. 681 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 682 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch); 683 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); 684 __ and3(G1_scratch, 0xFF, G1_scratch); 685 __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path); 686 687 // Get the type and return field offset from the constant pool cache 688 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch); 689 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch); 690 691 Label xreturn_path; 692 // Need to differentiate between igetfield, agetfield, bgetfield etc. 693 // because they are different sizes. 694 // Get the type from the constant pool cache 695 __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch); 696 // Make sure we don't need to mask G1_scratch after the above shift 697 ConstantPoolCacheEntry::verify_tos_state_shift(); 698 __ cmp(G1_scratch, atos ); 699 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 700 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); 701 __ cmp(G1_scratch, itos); 702 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 703 __ delayed()->ld(Otos_i, G3_scratch, Otos_i); 704 __ cmp(G1_scratch, stos); 705 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 706 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i); 707 __ cmp(G1_scratch, ctos); 708 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 709 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i); 710 #ifdef ASSERT 711 __ cmp(G1_scratch, btos); 712 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 713 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i); 714 __ should_not_reach_here(); 715 #endif 716 __ ldsb(Otos_i, G3_scratch, Otos_i); 717 __ bind(xreturn_path); 718 719 // _ireturn/_areturn 720 __ retl(); // return from leaf routine 721 __ delayed()->mov(O5_savedSP, SP); 722 723 // Generate regular method entry 724 __ bind(slow_path); 725 (void) generate_normal_entry(false); 726 return entry; 727 } 728 return NULL; 729 } 730 731 // Method entry for java.lang.ref.Reference.get. 732 address InterpreterGenerator::generate_Reference_get_entry(void) { 733 #if INCLUDE_ALL_GCS 734 // Code: _aload_0, _getfield, _areturn 735 // parameter size = 1 736 // 737 // The code that gets generated by this routine is split into 2 parts: 738 // 1. The "intrinsified" code for G1 (or any SATB based GC), 739 // 2. The slow path - which is an expansion of the regular method entry. 740 // 741 // Notes:- 742 // * In the G1 code we do not check whether we need to block for 743 // a safepoint. If G1 is enabled then we must execute the specialized 744 // code for Reference.get (except when the Reference object is null) 745 // so that we can log the value in the referent field with an SATB 746 // update buffer. 747 // If the code for the getfield template is modified so that the 748 // G1 pre-barrier code is executed when the current method is 749 // Reference.get() then going through the normal method entry 750 // will be fine. 751 // * The G1 code can, however, check the receiver object (the instance 752 // of java.lang.Reference) and jump to the slow path if null. If the 753 // Reference object is null then we obviously cannot fetch the referent 754 // and so we don't need to call the G1 pre-barrier. Thus we can use the 755 // regular method entry code to generate the NPE. 756 // 757 // This code is based on generate_accessor_enty. 758 759 address entry = __ pc(); 760 761 const int referent_offset = java_lang_ref_Reference::referent_offset; 762 guarantee(referent_offset > 0, "referent offset not initialized"); 763 764 if (UseG1GC) { 765 Label slow_path; 766 767 // In the G1 code we don't check if we need to reach a safepoint. We 768 // continue and the thread will safepoint at the next bytecode dispatch. 769 770 // Check if local 0 != NULL 771 // If the receiver is null then it is OK to jump to the slow path. 772 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 773 // check if local 0 == NULL and go the slow path 774 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); 775 776 777 // Load the value of the referent field. 778 if (Assembler::is_simm13(referent_offset)) { 779 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 780 } else { 781 __ set(referent_offset, G3_scratch); 782 __ load_heap_oop(Otos_i, G3_scratch, Otos_i); 783 } 784 785 // Generate the G1 pre-barrier code to log the value of 786 // the referent field in an SATB buffer. Note with 787 // these parameters the pre-barrier does not generate 788 // the load of the previous value 789 790 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, 791 Otos_i /* pre_val */, 792 G3_scratch /* tmp */, 793 true /* preserve_o_regs */); 794 795 // _areturn 796 __ retl(); // return from leaf routine 797 __ delayed()->mov(O5_savedSP, SP); 798 799 // Generate regular method entry 800 __ bind(slow_path); 801 (void) generate_normal_entry(false); 802 return entry; 803 } 804 #endif // INCLUDE_ALL_GCS 805 806 // If G1 is not enabled then attempt to go through the accessor entry point 807 // Reference.get is an accessor 808 return generate_accessor_entry(); 809 } 810 811 // 812 // Interpreter stub for calling a native method. (asm interpreter) 813 // This sets up a somewhat different looking stack for calling the native method 814 // than the typical interpreter frame setup. 815 // 816 817 address InterpreterGenerator::generate_native_entry(bool synchronized) { 818 address entry = __ pc(); 819 820 // the following temporary registers are used during frame creation 821 const Register Gtmp1 = G3_scratch ; 822 const Register Gtmp2 = G1_scratch; 823 bool inc_counter = UseCompiler || CountCompiledCalls; 824 825 // make sure registers are different! 826 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 827 828 const Address Laccess_flags(Lmethod, Method::access_flags_offset()); 829 830 const Register Glocals_size = G3; 831 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 832 833 // make sure method is native & not abstract 834 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 835 #ifdef ASSERT 836 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 837 { 838 Label L; 839 __ btst(JVM_ACC_NATIVE, Gtmp1); 840 __ br(Assembler::notZero, false, Assembler::pt, L); 841 __ delayed()->nop(); 842 __ stop("tried to execute non-native method as native"); 843 __ bind(L); 844 } 845 { Label L; 846 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 847 __ br(Assembler::zero, false, Assembler::pt, L); 848 __ delayed()->nop(); 849 __ stop("tried to execute abstract method as non-abstract"); 850 __ bind(L); 851 } 852 #endif // ASSERT 853 854 // generate the code to allocate the interpreter stack frame 855 generate_fixed_frame(true); 856 857 // 858 // No locals to initialize for native method 859 // 860 861 // this slot will be set later, we initialize it to null here just in 862 // case we get a GC before the actual value is stored later 863 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 864 865 const Address do_not_unlock_if_synchronized(G2_thread, 866 JavaThread::do_not_unlock_if_synchronized_offset()); 867 // Since at this point in the method invocation the exception handler 868 // would try to exit the monitor of synchronized methods which hasn't 869 // been entered yet, we set the thread local variable 870 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 871 // runtime, exception handling i.e. unlock_if_synchronized_method will 872 // check this thread local flag. 873 // This flag has two effects, one is to force an unwind in the topmost 874 // interpreter frame and not perform an unlock while doing so. 875 876 __ movbool(true, G3_scratch); 877 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 878 879 // increment invocation counter and check for overflow 880 // 881 // Note: checking for negative value instead of overflow 882 // so we have a 'sticky' overflow test (may be of 883 // importance as soon as we have true MT/MP) 884 Label invocation_counter_overflow; 885 Label Lcontinue; 886 if (inc_counter) { 887 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 888 889 } 890 __ bind(Lcontinue); 891 892 bang_stack_shadow_pages(true); 893 894 // reset the _do_not_unlock_if_synchronized flag 895 __ stbool(G0, do_not_unlock_if_synchronized); 896 897 // check for synchronized methods 898 // Must happen AFTER invocation_counter check and stack overflow check, 899 // so method is not locked if overflows. 900 901 if (synchronized) { 902 lock_method(); 903 } else { 904 #ifdef ASSERT 905 { Label ok; 906 __ ld(Laccess_flags, O0); 907 __ btst(JVM_ACC_SYNCHRONIZED, O0); 908 __ br( Assembler::zero, false, Assembler::pt, ok); 909 __ delayed()->nop(); 910 __ stop("method needs synchronization"); 911 __ bind(ok); 912 } 913 #endif // ASSERT 914 } 915 916 917 // start execution 918 __ verify_thread(); 919 920 // JVMTI support 921 __ notify_method_entry(); 922 923 // native call 924 925 // (note that O0 is never an oop--at most it is a handle) 926 // It is important not to smash any handles created by this call, 927 // until any oop handle in O0 is dereferenced. 928 929 // (note that the space for outgoing params is preallocated) 930 931 // get signature handler 932 { Label L; 933 Address signature_handler(Lmethod, Method::signature_handler_offset()); 934 __ ld_ptr(signature_handler, G3_scratch); 935 __ br_notnull_short(G3_scratch, Assembler::pt, L); 936 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 937 __ ld_ptr(signature_handler, G3_scratch); 938 __ bind(L); 939 } 940 941 // Push a new frame so that the args will really be stored in 942 // Copy a few locals across so the new frame has the variables 943 // we need but these values will be dead at the jni call and 944 // therefore not gc volatile like the values in the current 945 // frame (Lmethod in particular) 946 947 // Flush the method pointer to the register save area 948 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 949 __ mov(Llocals, O1); 950 951 // calculate where the mirror handle body is allocated in the interpreter frame: 952 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 953 954 // Calculate current frame size 955 __ sub(SP, FP, O3); // Calculate negative of current frame size 956 __ save(SP, O3, SP); // Allocate an identical sized frame 957 958 // Note I7 has leftover trash. Slow signature handler will fill it in 959 // should we get there. Normal jni call will set reasonable last_Java_pc 960 // below (and fix I7 so the stack trace doesn't have a meaningless frame 961 // in it). 962 963 // Load interpreter frame's Lmethod into same register here 964 965 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 966 967 __ mov(I1, Llocals); 968 __ mov(I2, Lscratch2); // save the address of the mirror 969 970 971 // ONLY Lmethod and Llocals are valid here! 972 973 // call signature handler, It will move the arg properly since Llocals in current frame 974 // matches that in outer frame 975 976 __ callr(G3_scratch, 0); 977 __ delayed()->nop(); 978 979 // Result handler is in Lscratch 980 981 // Reload interpreter frame's Lmethod since slow signature handler may block 982 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 983 984 { Label not_static; 985 986 __ ld(Laccess_flags, O0); 987 __ btst(JVM_ACC_STATIC, O0); 988 __ br( Assembler::zero, false, Assembler::pt, not_static); 989 // get native function entry point(O0 is a good temp until the very end) 990 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0); 991 // for static methods insert the mirror argument 992 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 993 994 __ ld_ptr(Lmethod, Method:: const_offset(), O1); 995 __ ld_ptr(O1, ConstMethod::constants_offset(), O1); 996 __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1); 997 __ ld_ptr(O1, mirror_offset, O1); 998 #ifdef ASSERT 999 if (!PrintSignatureHandlers) // do not dirty the output with this 1000 { Label L; 1001 __ br_notnull_short(O1, Assembler::pt, L); 1002 __ stop("mirror is missing"); 1003 __ bind(L); 1004 } 1005 #endif // ASSERT 1006 __ st_ptr(O1, Lscratch2, 0); 1007 __ mov(Lscratch2, O1); 1008 __ bind(not_static); 1009 } 1010 1011 // At this point, arguments have been copied off of stack into 1012 // their JNI positions, which are O1..O5 and SP[68..]. 1013 // Oops are boxed in-place on the stack, with handles copied to arguments. 1014 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1015 1016 #ifdef ASSERT 1017 { Label L; 1018 __ br_notnull_short(O0, Assembler::pt, L); 1019 __ stop("native entry point is missing"); 1020 __ bind(L); 1021 } 1022 #endif // ASSERT 1023 1024 // 1025 // setup the frame anchor 1026 // 1027 // The scavenge function only needs to know that the PC of this frame is 1028 // in the interpreter method entry code, it doesn't need to know the exact 1029 // PC and hence we can use O7 which points to the return address from the 1030 // previous call in the code stream (signature handler function) 1031 // 1032 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1033 // we have pushed the extra frame in order to protect the volatile register(s) 1034 // in that frame when we return from the jni call 1035 // 1036 1037 __ set_last_Java_frame(FP, O7); 1038 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1039 // not meaningless information that'll confuse me. 1040 1041 // flush the windows now. We don't care about the current (protection) frame 1042 // only the outer frames 1043 1044 __ flushw(); 1045 1046 // mark windows as flushed 1047 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1048 __ set(JavaFrameAnchor::flushed, G3_scratch); 1049 __ st(G3_scratch, flags); 1050 1051 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1052 1053 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1054 #ifdef ASSERT 1055 { Label L; 1056 __ ld(thread_state, G3_scratch); 1057 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); 1058 __ stop("Wrong thread state in native stub"); 1059 __ bind(L); 1060 } 1061 #endif // ASSERT 1062 __ set(_thread_in_native, G3_scratch); 1063 __ st(G3_scratch, thread_state); 1064 1065 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1066 __ save_thread(L7_thread_cache); // save Gthread 1067 __ callr(O0, 0); 1068 __ delayed()-> 1069 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1070 1071 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1072 1073 __ restore_thread(L7_thread_cache); // restore G2_thread 1074 __ reinit_heapbase(); 1075 1076 // must we block? 1077 1078 // Block, if necessary, before resuming in _thread_in_Java state. 1079 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1080 { Label no_block; 1081 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 1082 1083 // Switch thread to "native transition" state before reading the synchronization state. 1084 // This additional state is necessary because reading and testing the synchronization 1085 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1086 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1087 // VM thread changes sync state to synchronizing and suspends threads for GC. 1088 // Thread A is resumed to finish this native method, but doesn't block here since it 1089 // didn't see any synchronization is progress, and escapes. 1090 __ set(_thread_in_native_trans, G3_scratch); 1091 __ st(G3_scratch, thread_state); 1092 if(os::is_MP()) { 1093 if (UseMembar) { 1094 // Force this write out before the read below 1095 __ membar(Assembler::StoreLoad); 1096 } else { 1097 // Write serialization page so VM thread can do a pseudo remote membar. 1098 // We use the current thread pointer to calculate a thread specific 1099 // offset to write to within the page. This minimizes bus traffic 1100 // due to cache line collision. 1101 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1102 } 1103 } 1104 __ load_contents(sync_state, G3_scratch); 1105 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1106 1107 Label L; 1108 __ br(Assembler::notEqual, false, Assembler::pn, L); 1109 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1110 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 1111 __ bind(L); 1112 1113 // Block. Save any potential method result value before the operation and 1114 // use a leaf call to leave the last_Java_frame setup undisturbed. 1115 save_native_result(); 1116 __ call_VM_leaf(L7_thread_cache, 1117 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1118 G2_thread); 1119 1120 // Restore any method result value 1121 restore_native_result(); 1122 __ bind(no_block); 1123 } 1124 1125 // Clear the frame anchor now 1126 1127 __ reset_last_Java_frame(); 1128 1129 // Move the result handler address 1130 __ mov(Lscratch, G3_scratch); 1131 // return possible result to the outer frame 1132 #ifndef __LP64 1133 __ mov(O0, I0); 1134 __ restore(O1, G0, O1); 1135 #else 1136 __ restore(O0, G0, O0); 1137 #endif /* __LP64 */ 1138 1139 // Move result handler to expected register 1140 __ mov(G3_scratch, Lscratch); 1141 1142 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1143 // switch to thread_in_Java. 1144 1145 __ set(_thread_in_Java, G3_scratch); 1146 __ st(G3_scratch, thread_state); 1147 1148 // reset handle block 1149 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1150 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1151 1152 // If we have an oop result store it where it will be safe for any further gc 1153 // until we return now that we've released the handle it might be protected by 1154 1155 { 1156 Label no_oop, store_result; 1157 1158 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1159 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); 1160 __ addcc(G0, O0, O0); 1161 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: 1162 __ delayed()->ld_ptr(O0, 0, O0); // unbox it 1163 __ mov(G0, O0); 1164 1165 __ bind(store_result); 1166 // Store it where gc will look for it and result handler expects it. 1167 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1168 1169 __ bind(no_oop); 1170 1171 } 1172 1173 1174 // handle exceptions (exception handling will handle unlocking!) 1175 { Label L; 1176 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1177 __ ld_ptr(exception_addr, Gtemp); 1178 __ br_null_short(Gtemp, Assembler::pt, L); 1179 // Note: This could be handled more efficiently since we know that the native 1180 // method doesn't have an exception handler. We could directly return 1181 // to the exception handler for the caller. 1182 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1183 __ should_not_reach_here(); 1184 __ bind(L); 1185 } 1186 1187 // JVMTI support (preserves thread register) 1188 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1189 1190 if (synchronized) { 1191 // save and restore any potential method result value around the unlocking operation 1192 save_native_result(); 1193 1194 __ add( __ top_most_monitor(), O1); 1195 __ unlock_object(O1); 1196 1197 restore_native_result(); 1198 } 1199 1200 #if defined(COMPILER2) && !defined(_LP64) 1201 1202 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1203 // or compiled so just be safe. 1204 1205 __ sllx(O0, 32, G1); // Shift bits into high G1 1206 __ srl (O1, 0, O1); // Zero extend O1 1207 __ or3 (O1, G1, G1); // OR 64 bits into G1 1208 1209 #endif /* COMPILER2 && !_LP64 */ 1210 1211 // dispose of return address and remove activation 1212 #ifdef ASSERT 1213 { 1214 Label ok; 1215 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); 1216 __ stop("bad I5_savedSP value"); 1217 __ should_not_reach_here(); 1218 __ bind(ok); 1219 } 1220 #endif 1221 if (TraceJumps) { 1222 // Move target to register that is recordable 1223 __ mov(Lscratch, G3_scratch); 1224 __ JMP(G3_scratch, 0); 1225 } else { 1226 __ jmp(Lscratch, 0); 1227 } 1228 __ delayed()->nop(); 1229 1230 1231 if (inc_counter) { 1232 // handle invocation counter overflow 1233 __ bind(invocation_counter_overflow); 1234 generate_counter_overflow(Lcontinue); 1235 } 1236 1237 1238 1239 return entry; 1240 } 1241 1242 1243 // Generic method entry to (asm) interpreter 1244 //------------------------------------------------------------------------------------------------------------------------ 1245 // 1246 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1247 address entry = __ pc(); 1248 1249 bool inc_counter = UseCompiler || CountCompiledCalls; 1250 1251 // the following temporary registers are used during frame creation 1252 const Register Gtmp1 = G3_scratch ; 1253 const Register Gtmp2 = G1_scratch; 1254 1255 // make sure registers are different! 1256 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1257 1258 const Address constMethod (G5_method, Method::const_offset()); 1259 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1260 // and use in the asserts. 1261 const Address access_flags (Lmethod, Method::access_flags_offset()); 1262 1263 const Register Glocals_size = G3; 1264 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1265 1266 // make sure method is not native & not abstract 1267 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1268 #ifdef ASSERT 1269 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1270 { 1271 Label L; 1272 __ btst(JVM_ACC_NATIVE, Gtmp1); 1273 __ br(Assembler::zero, false, Assembler::pt, L); 1274 __ delayed()->nop(); 1275 __ stop("tried to execute native method as non-native"); 1276 __ bind(L); 1277 } 1278 { Label L; 1279 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1280 __ br(Assembler::zero, false, Assembler::pt, L); 1281 __ delayed()->nop(); 1282 __ stop("tried to execute abstract method as non-abstract"); 1283 __ bind(L); 1284 } 1285 #endif // ASSERT 1286 1287 // generate the code to allocate the interpreter stack frame 1288 1289 generate_fixed_frame(false); 1290 1291 #ifdef FAST_DISPATCH 1292 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1293 // set bytecode dispatch table base 1294 #endif 1295 1296 // 1297 // Code to initialize the extra (i.e. non-parm) locals 1298 // 1299 Register init_value = noreg; // will be G0 if we must clear locals 1300 // The way the code was setup before zerolocals was always true for vanilla java entries. 1301 // It could only be false for the specialized entries like accessor or empty which have 1302 // no extra locals so the testing was a waste of time and the extra locals were always 1303 // initialized. We removed this extra complication to already over complicated code. 1304 1305 init_value = G0; 1306 Label clear_loop; 1307 1308 const Register RconstMethod = O1; 1309 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1310 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset()); 1311 1312 // NOTE: If you change the frame layout, this code will need to 1313 // be updated! 1314 __ ld_ptr( constMethod, RconstMethod ); 1315 __ lduh( size_of_locals, O2 ); 1316 __ lduh( size_of_parameters, O1 ); 1317 __ sll( O2, Interpreter::logStackElementSize, O2); 1318 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1319 __ sub( Llocals, O2, O2 ); 1320 __ sub( Llocals, O1, O1 ); 1321 1322 __ bind( clear_loop ); 1323 __ inc( O2, wordSize ); 1324 1325 __ cmp( O2, O1 ); 1326 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1327 __ delayed()->st_ptr( init_value, O2, 0 ); 1328 1329 const Address do_not_unlock_if_synchronized(G2_thread, 1330 JavaThread::do_not_unlock_if_synchronized_offset()); 1331 // Since at this point in the method invocation the exception handler 1332 // would try to exit the monitor of synchronized methods which hasn't 1333 // been entered yet, we set the thread local variable 1334 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1335 // runtime, exception handling i.e. unlock_if_synchronized_method will 1336 // check this thread local flag. 1337 __ movbool(true, G3_scratch); 1338 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1339 1340 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); 1341 // increment invocation counter and check for overflow 1342 // 1343 // Note: checking for negative value instead of overflow 1344 // so we have a 'sticky' overflow test (may be of 1345 // importance as soon as we have true MT/MP) 1346 Label invocation_counter_overflow; 1347 Label profile_method; 1348 Label profile_method_continue; 1349 Label Lcontinue; 1350 if (inc_counter) { 1351 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1352 if (ProfileInterpreter) { 1353 __ bind(profile_method_continue); 1354 } 1355 } 1356 __ bind(Lcontinue); 1357 1358 bang_stack_shadow_pages(false); 1359 1360 // reset the _do_not_unlock_if_synchronized flag 1361 __ stbool(G0, do_not_unlock_if_synchronized); 1362 1363 // check for synchronized methods 1364 // Must happen AFTER invocation_counter check and stack overflow check, 1365 // so method is not locked if overflows. 1366 1367 if (synchronized) { 1368 lock_method(); 1369 } else { 1370 #ifdef ASSERT 1371 { Label ok; 1372 __ ld(access_flags, O0); 1373 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1374 __ br( Assembler::zero, false, Assembler::pt, ok); 1375 __ delayed()->nop(); 1376 __ stop("method needs synchronization"); 1377 __ bind(ok); 1378 } 1379 #endif // ASSERT 1380 } 1381 1382 // start execution 1383 1384 __ verify_thread(); 1385 1386 // jvmti support 1387 __ notify_method_entry(); 1388 1389 // start executing instructions 1390 __ dispatch_next(vtos); 1391 1392 1393 if (inc_counter) { 1394 if (ProfileInterpreter) { 1395 // We have decided to profile this method in the interpreter 1396 __ bind(profile_method); 1397 1398 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1399 __ set_method_data_pointer_for_bcp(); 1400 __ ba_short(profile_method_continue); 1401 } 1402 1403 // handle invocation counter overflow 1404 __ bind(invocation_counter_overflow); 1405 generate_counter_overflow(Lcontinue); 1406 } 1407 1408 1409 return entry; 1410 } 1411 1412 1413 //---------------------------------------------------------------------------------------------------- 1414 // Entry points & stack frame layout 1415 // 1416 // Here we generate the various kind of entries into the interpreter. 1417 // The two main entry type are generic bytecode methods and native call method. 1418 // These both come in synchronized and non-synchronized versions but the 1419 // frame layout they create is very similar. The other method entry 1420 // types are really just special purpose entries that are really entry 1421 // and interpretation all in one. These are for trivial methods like 1422 // accessor, empty, or special math methods. 1423 // 1424 // When control flow reaches any of the entry types for the interpreter 1425 // the following holds -> 1426 // 1427 // C2 Calling Conventions: 1428 // 1429 // The entry code below assumes that the following registers are set 1430 // when coming in: 1431 // G5_method: holds the Method* of the method to call 1432 // Lesp: points to the TOS of the callers expression stack 1433 // after having pushed all the parameters 1434 // 1435 // The entry code does the following to setup an interpreter frame 1436 // pop parameters from the callers stack by adjusting Lesp 1437 // set O0 to Lesp 1438 // compute X = (max_locals - num_parameters) 1439 // bump SP up by X to accomadate the extra locals 1440 // compute X = max_expression_stack 1441 // + vm_local_words 1442 // + 16 words of register save area 1443 // save frame doing a save sp, -X, sp growing towards lower addresses 1444 // set Lbcp, Lmethod, LcpoolCache 1445 // set Llocals to i0 1446 // set Lmonitors to FP - rounded_vm_local_words 1447 // set Lesp to Lmonitors - 4 1448 // 1449 // The frame has now been setup to do the rest of the entry code 1450 1451 // Try this optimization: Most method entries could live in a 1452 // "one size fits all" stack frame without all the dynamic size 1453 // calculations. It might be profitable to do all this calculation 1454 // statically and approximately for "small enough" methods. 1455 1456 //----------------------------------------------------------------------------------------------- 1457 1458 // C1 Calling conventions 1459 // 1460 // Upon method entry, the following registers are setup: 1461 // 1462 // g2 G2_thread: current thread 1463 // g5 G5_method: method to activate 1464 // g4 Gargs : pointer to last argument 1465 // 1466 // 1467 // Stack: 1468 // 1469 // +---------------+ <--- sp 1470 // | | 1471 // : reg save area : 1472 // | | 1473 // +---------------+ <--- sp + 0x40 1474 // | | 1475 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1476 // | | 1477 // +---------------+ <--- sp + 0x5c 1478 // | | 1479 // : free : 1480 // | | 1481 // +---------------+ <--- Gargs 1482 // | | 1483 // : arguments : 1484 // | | 1485 // +---------------+ 1486 // | | 1487 // 1488 // 1489 // 1490 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 1491 // 1492 // +---------------+ <--- sp 1493 // | | 1494 // : reg save area : 1495 // | | 1496 // +---------------+ <--- sp + 0x40 1497 // | | 1498 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1499 // | | 1500 // +---------------+ <--- sp + 0x5c 1501 // | | 1502 // : : 1503 // | | <--- Lesp 1504 // +---------------+ <--- Lmonitors (fp - 0x18) 1505 // | VM locals | 1506 // +---------------+ <--- fp 1507 // | | 1508 // : reg save area : 1509 // | | 1510 // +---------------+ <--- fp + 0x40 1511 // | | 1512 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1513 // | | 1514 // +---------------+ <--- fp + 0x5c 1515 // | | 1516 // : free : 1517 // | | 1518 // +---------------+ 1519 // | | 1520 // : nonarg locals : 1521 // | | 1522 // +---------------+ 1523 // | | 1524 // : arguments : 1525 // | | <--- Llocals 1526 // +---------------+ <--- Gargs 1527 // | | 1528 1529 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { 1530 1531 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated 1532 // expression stack, the callee will have callee_extra_locals (so we can account for 1533 // frame extension) and monitor_size for monitors. Basically we need to calculate 1534 // this exactly like generate_fixed_frame/generate_compute_interpreter_state. 1535 // 1536 // 1537 // The big complicating thing here is that we must ensure that the stack stays properly 1538 // aligned. This would be even uglier if monitor size wasn't modulo what the stack 1539 // needs to be aligned for). We are given that the sp (fp) is already aligned by 1540 // the caller so we must ensure that it is properly aligned for our callee. 1541 // 1542 const int rounded_vm_local_words = 1543 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1544 // callee_locals and max_stack are counts, not the size in frame. 1545 const int locals_size = 1546 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong); 1547 const int max_stack_words = max_stack * Interpreter::stackElementWords; 1548 return (round_to((max_stack_words 1549 + rounded_vm_local_words 1550 + frame::memory_parameter_word_sp_offset), WordsPerLong) 1551 // already rounded 1552 + locals_size + monitor_size); 1553 } 1554 1555 // How much stack a method top interpreter activation needs in words. 1556 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1557 1558 // See call_stub code 1559 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, 1560 WordsPerLong); // 7 + register save area 1561 1562 // Save space for one monitor to get into the interpreted method in case 1563 // the method is synchronized 1564 int monitor_size = method->is_synchronized() ? 1565 1*frame::interpreter_frame_monitor_size() : 0; 1566 return size_activation_helper(method->max_locals(), method->max_stack(), 1567 monitor_size) + call_stub_size; 1568 } 1569 1570 int AbstractInterpreter::size_activation(int max_stack, 1571 int temps, 1572 int extra_args, 1573 int monitors, 1574 int callee_params, 1575 int callee_locals, 1576 bool is_top_frame) { 1577 // Note: This calculation must exactly parallel the frame setup 1578 // in InterpreterGenerator::generate_fixed_frame. 1579 1580 int monitor_size = monitors * frame::interpreter_frame_monitor_size(); 1581 1582 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); 1583 1584 // 1585 // Note: if you look closely this appears to be doing something much different 1586 // than generate_fixed_frame. What is happening is this. On sparc we have to do 1587 // this dance with interpreter_sp_adjustment because the window save area would 1588 // appear just below the bottom (tos) of the caller's java expression stack. Because 1589 // the interpreter want to have the locals completely contiguous generate_fixed_frame 1590 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size). 1591 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee. 1592 // In this code the opposite occurs the caller adjusts it's own stack base on the callee. 1593 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest) 1594 // because the oldest frame would have adjust its callers frame and yet that frame 1595 // already exists and isn't part of this array of frames we are unpacking. So at first 1596 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() 1597 // will after it calculates all of the frame's on_stack_size()'s will then figure out the 1598 // amount to adjust the caller of the initial (oldest) frame and the calculation will all 1599 // add up. It does seem like it simpler to account for the adjustment here (and remove the 1600 // callee... parameters here). However this would mean that this routine would have to take 1601 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) 1602 // and run the calling loop in the reverse order. This would also would appear to mean making 1603 // this code aware of what the interactions are when that initial caller fram was an osr or 1604 // other adapter frame. deoptimization is complicated enough and hard enough to debug that 1605 // there is no sense in messing working code. 1606 // 1607 1608 int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong); 1609 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align"); 1610 1611 int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size); 1612 1613 return raw_frame_size; 1614 } 1615 1616 void AbstractInterpreter::layout_activation(Method* method, 1617 int tempcount, 1618 int popframe_extra_args, 1619 int moncount, 1620 int caller_actual_parameters, 1621 int callee_param_count, 1622 int callee_local_count, 1623 frame* caller, 1624 frame* interpreter_frame, 1625 bool is_top_frame, 1626 bool is_bottom_frame) { 1627 // Set up the following variables: 1628 // - Lmethod 1629 // - Llocals 1630 // - Lmonitors (to the indicated number of monitors) 1631 // - Lesp (to the indicated number of temps) 1632 // The frame caller on entry is a description of the caller of the 1633 // frame we are about to layout. We are guaranteed that we will be 1634 // able to fill in a new interpreter frame as its callee (i.e. the 1635 // stack space is allocated and the amount was determined by an 1636 // earlier call to the size_activation() method). On return caller 1637 // while describe the interpreter frame we just layed out. 1638 1639 // The skeleton frame must already look like an interpreter frame 1640 // even if not fully filled out. 1641 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame"); 1642 1643 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1644 int monitor_size = moncount * frame::interpreter_frame_monitor_size(); 1645 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); 1646 1647 intptr_t* fp = interpreter_frame->fp(); 1648 1649 JavaThread* thread = JavaThread::current(); 1650 RegisterMap map(thread, false); 1651 // More verification that skeleton frame is properly walkable 1652 assert(fp == caller->sp(), "fp must match"); 1653 1654 intptr_t* montop = fp - rounded_vm_local_words; 1655 1656 // preallocate monitors (cf. __ add_monitor_to_stack) 1657 intptr_t* monitors = montop - monitor_size; 1658 1659 // preallocate stack space 1660 intptr_t* esp = monitors - 1 - 1661 (tempcount * Interpreter::stackElementWords) - 1662 popframe_extra_args; 1663 1664 int local_words = method->max_locals() * Interpreter::stackElementWords; 1665 NEEDS_CLEANUP; 1666 intptr_t* locals; 1667 if (caller->is_interpreted_frame()) { 1668 // Can force the locals area to end up properly overlapping the top of the expression stack. 1669 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; 1670 // Note that this computation means we replace size_of_parameters() values from the caller 1671 // interpreter frame's expression stack with our argument locals 1672 int parm_words = caller_actual_parameters * Interpreter::stackElementWords; 1673 locals = Lesp_ptr + parm_words; 1674 int delta = local_words - parm_words; 1675 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; 1676 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; 1677 if (!is_bottom_frame) { 1678 // Llast_SP is set below for the current frame to SP (with the 1679 // extra space for the callee's locals). Here we adjust 1680 // Llast_SP for the caller's frame, removing the extra space 1681 // for the current method's locals. 1682 *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP); 1683 } else { 1684 assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP"); 1685 } 1686 } else { 1687 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); 1688 // Don't have Lesp available; lay out locals block in the caller 1689 // adjacent to the register window save area. 1690 // 1691 // Compiled frames do not allocate a varargs area which is why this if 1692 // statement is needed. 1693 // 1694 if (caller->is_compiled_frame()) { 1695 locals = fp + frame::register_save_words + local_words - 1; 1696 } else { 1697 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; 1698 } 1699 if (!caller->is_entry_frame()) { 1700 // Caller wants his own SP back 1701 int caller_frame_size = caller->cb()->frame_size(); 1702 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; 1703 } 1704 } 1705 if (TraceDeoptimization) { 1706 if (caller->is_entry_frame()) { 1707 // make sure I5_savedSP and the entry frames notion of saved SP 1708 // agree. This assertion duplicate a check in entry frame code 1709 // but catches the failure earlier. 1710 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP), 1711 "would change callers SP"); 1712 } 1713 if (caller->is_entry_frame()) { 1714 tty->print("entry "); 1715 } 1716 if (caller->is_compiled_frame()) { 1717 tty->print("compiled "); 1718 if (caller->is_deoptimized_frame()) { 1719 tty->print("(deopt) "); 1720 } 1721 } 1722 if (caller->is_interpreted_frame()) { 1723 tty->print("interpreted "); 1724 } 1725 tty->print_cr("caller fp=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->sp())); 1726 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->sp()), p2i(caller->sp() + 16)); 1727 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->fp() + 16)); 1728 tty->print_cr("interpreter fp=" INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->sp())); 1729 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->sp()), p2i(interpreter_frame->sp() + 16)); 1730 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->fp() + 16)); 1731 tty->print_cr("Llocals = " INTPTR_FORMAT, p2i(locals)); 1732 tty->print_cr("Lesp = " INTPTR_FORMAT, p2i(esp)); 1733 tty->print_cr("Lmonitors = " INTPTR_FORMAT, p2i(monitors)); 1734 } 1735 1736 if (method->max_locals() > 0) { 1737 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area"); 1738 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area"); 1739 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); 1740 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); 1741 } 1742 #ifdef _LP64 1743 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); 1744 #endif 1745 1746 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method; 1747 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals; 1748 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors; 1749 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp; 1750 // Llast_SP will be same as SP as there is no adapter space 1751 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS; 1752 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); 1753 #ifdef FAST_DISPATCH 1754 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table(); 1755 #endif 1756 1757 1758 #ifdef ASSERT 1759 BasicObjectLock* mp = (BasicObjectLock*)monitors; 1760 1761 assert(interpreter_frame->interpreter_frame_method() == method, "method matches"); 1762 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match"); 1763 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches"); 1764 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches"); 1765 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches"); 1766 1767 // check bounds 1768 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1); 1769 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words; 1770 assert(lo < monitors && montop <= hi, "monitors in bounds"); 1771 assert(lo <= esp && esp < monitors, "esp in bounds"); 1772 #endif // ASSERT 1773 } 1774 1775 //---------------------------------------------------------------------------------------------------- 1776 // Exceptions 1777 void TemplateInterpreterGenerator::generate_throw_exception() { 1778 1779 // Entry point in previous activation (i.e., if the caller was interpreted) 1780 Interpreter::_rethrow_exception_entry = __ pc(); 1781 // O0: exception 1782 1783 // entry point for exceptions thrown within interpreter code 1784 Interpreter::_throw_exception_entry = __ pc(); 1785 __ verify_thread(); 1786 // expression stack is undefined here 1787 // O0: exception, i.e. Oexception 1788 // Lbcp: exception bcx 1789 __ verify_oop(Oexception); 1790 1791 1792 // expression stack must be empty before entering the VM in case of an exception 1793 __ empty_expression_stack(); 1794 // find exception handler address and preserve exception oop 1795 // call C routine to find handler and jump to it 1796 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1797 __ push_ptr(O1); // push exception for exception handler bytecodes 1798 1799 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1800 __ delayed()->nop(); 1801 1802 1803 // if the exception is not handled in the current frame 1804 // the frame is removed and the exception is rethrown 1805 // (i.e. exception continuation is _rethrow_exception) 1806 // 1807 // Note: At this point the bci is still the bxi for the instruction which caused 1808 // the exception and the expression stack is empty. Thus, for any VM calls 1809 // at this point, GC will find a legal oop map (with empty expression stack). 1810 1811 // in current activation 1812 // tos: exception 1813 // Lbcp: exception bcp 1814 1815 // 1816 // JVMTI PopFrame support 1817 // 1818 1819 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1820 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1821 // Set the popframe_processing bit in popframe_condition indicating that we are 1822 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1823 // popframe handling cycles. 1824 1825 __ ld(popframe_condition_addr, G3_scratch); 1826 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1827 __ stw(G3_scratch, popframe_condition_addr); 1828 1829 // Empty the expression stack, as in normal exception handling 1830 __ empty_expression_stack(); 1831 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1832 1833 { 1834 // Check to see whether we are returning to a deoptimized frame. 1835 // (The PopFrame call ensures that the caller of the popped frame is 1836 // either interpreted or compiled and deoptimizes it if compiled.) 1837 // In this case, we can't call dispatch_next() after the frame is 1838 // popped, but instead must save the incoming arguments and restore 1839 // them after deoptimization has occurred. 1840 // 1841 // Note that we don't compare the return PC against the 1842 // deoptimization blob's unpack entry because of the presence of 1843 // adapter frames in C2. 1844 Label caller_not_deoptimized; 1845 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1846 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); 1847 1848 const Register Gtmp1 = G3_scratch; 1849 const Register Gtmp2 = G1_scratch; 1850 const Register RconstMethod = Gtmp1; 1851 const Address constMethod(Lmethod, Method::const_offset()); 1852 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1853 1854 // Compute size of arguments for saving when returning to deoptimized caller 1855 __ ld_ptr(constMethod, RconstMethod); 1856 __ lduh(size_of_parameters, Gtmp1); 1857 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1858 __ sub(Llocals, Gtmp1, Gtmp2); 1859 __ add(Gtmp2, wordSize, Gtmp2); 1860 // Save these arguments 1861 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1862 // Inform deoptimization that it is responsible for restoring these arguments 1863 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1864 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1865 __ st(Gtmp1, popframe_condition_addr); 1866 1867 // Return from the current method 1868 // The caller's SP was adjusted upon method entry to accomodate 1869 // the callee's non-argument locals. Undo that adjustment. 1870 __ ret(); 1871 __ delayed()->restore(I5_savedSP, G0, SP); 1872 1873 __ bind(caller_not_deoptimized); 1874 } 1875 1876 // Clear the popframe condition flag 1877 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1878 1879 // Get out of the current method (how this is done depends on the particular compiler calling 1880 // convention that the interpreter currently follows) 1881 // The caller's SP was adjusted upon method entry to accomodate 1882 // the callee's non-argument locals. Undo that adjustment. 1883 __ restore(I5_savedSP, G0, SP); 1884 // The method data pointer was incremented already during 1885 // call profiling. We have to restore the mdp for the current bcp. 1886 if (ProfileInterpreter) { 1887 __ set_method_data_pointer_for_bcp(); 1888 } 1889 1890 #if INCLUDE_JVMTI 1891 { 1892 Label L_done; 1893 1894 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode 1895 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done); 1896 1897 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1898 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1899 1900 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp); 1901 1902 __ br_null(G1_scratch, false, Assembler::pn, L_done); 1903 __ delayed()->nop(); 1904 1905 __ st_ptr(G1_scratch, Lesp, wordSize); 1906 __ bind(L_done); 1907 } 1908 #endif // INCLUDE_JVMTI 1909 1910 // Resume bytecode interpretation at the current bcp 1911 __ dispatch_next(vtos); 1912 // end of JVMTI PopFrame support 1913 1914 Interpreter::_remove_activation_entry = __ pc(); 1915 1916 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1917 __ pop_ptr(Oexception); // get exception 1918 1919 // Intel has the following comment: 1920 //// remove the activation (without doing throws on illegalMonitorExceptions) 1921 // They remove the activation without checking for bad monitor state. 1922 // %%% We should make sure this is the right semantics before implementing. 1923 1924 __ set_vm_result(Oexception); 1925 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1926 1927 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1928 1929 __ get_vm_result(Oexception); 1930 __ verify_oop(Oexception); 1931 1932 const int return_reg_adjustment = frame::pc_return_offset; 1933 Address issuing_pc_addr(I7, return_reg_adjustment); 1934 1935 // We are done with this activation frame; find out where to go next. 1936 // The continuation point will be an exception handler, which expects 1937 // the following registers set up: 1938 // 1939 // Oexception: exception 1940 // Oissuing_pc: the local call that threw exception 1941 // Other On: garbage 1942 // In/Ln: the contents of the caller's register window 1943 // 1944 // We do the required restore at the last possible moment, because we 1945 // need to preserve some state across a runtime call. 1946 // (Remember that the caller activation is unknown--it might not be 1947 // interpreted, so things like Lscratch are useless in the caller.) 1948 1949 // Although the Intel version uses call_C, we can use the more 1950 // compact call_VM. (The only real difference on SPARC is a 1951 // harmlessly ignored [re]set_last_Java_frame, compared with 1952 // the Intel code which lacks this.) 1953 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1954 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1955 __ super_call_VM_leaf(L7_thread_cache, 1956 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1957 G2_thread, Oissuing_pc->after_save()); 1958 1959 // The caller's SP was adjusted upon method entry to accomodate 1960 // the callee's non-argument locals. Undo that adjustment. 1961 __ JMP(O0, 0); // return exception handler in caller 1962 __ delayed()->restore(I5_savedSP, G0, SP); 1963 1964 // (same old exception object is already in Oexception; see above) 1965 // Note that an "issuing PC" is actually the next PC after the call 1966 } 1967 1968 1969 // 1970 // JVMTI ForceEarlyReturn support 1971 // 1972 1973 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1974 address entry = __ pc(); 1975 1976 __ empty_expression_stack(); 1977 __ load_earlyret_value(state); 1978 1979 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1980 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1981 1982 // Clear the earlyret state 1983 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1984 1985 __ remove_activation(state, 1986 /* throw_monitor_exception */ false, 1987 /* install_monitor_exception */ false); 1988 1989 // The caller's SP was adjusted upon method entry to accomodate 1990 // the callee's non-argument locals. Undo that adjustment. 1991 __ ret(); // return to caller 1992 __ delayed()->restore(I5_savedSP, G0, SP); 1993 1994 return entry; 1995 } // end of JVMTI ForceEarlyReturn support 1996 1997 1998 //------------------------------------------------------------------------------------------------------------------------ 1999 // Helper for vtos entry point generation 2000 2001 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 2002 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2003 Label L; 2004 aep = __ pc(); __ push_ptr(); __ ba_short(L); 2005 fep = __ pc(); __ push_f(); __ ba_short(L); 2006 dep = __ pc(); __ push_d(); __ ba_short(L); 2007 lep = __ pc(); __ push_l(); __ ba_short(L); 2008 iep = __ pc(); __ push_i(); 2009 bep = cep = sep = iep; // there aren't any 2010 vep = __ pc(); __ bind(L); // fall through 2011 generate_and_dispatch(t); 2012 } 2013 2014 // -------------------------------------------------------------------------------- 2015 2016 2017 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2018 : TemplateInterpreterGenerator(code) { 2019 generate_all(); // down here so it can be "virtual" 2020 } 2021 2022 // -------------------------------------------------------------------------------- 2023 2024 // Non-product code 2025 #ifndef PRODUCT 2026 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2027 address entry = __ pc(); 2028 2029 __ push(state); 2030 __ mov(O7, Lscratch); // protect return address within interpreter 2031 2032 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 2033 __ mov( Otos_l2, G3_scratch ); 2034 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 2035 __ mov(Lscratch, O7); // restore return address 2036 __ pop(state); 2037 __ retl(); 2038 __ delayed()->nop(); 2039 2040 return entry; 2041 } 2042 2043 2044 // helpers for generate_and_dispatch 2045 2046 void TemplateInterpreterGenerator::count_bytecode() { 2047 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 2048 } 2049 2050 2051 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2052 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 2053 } 2054 2055 2056 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2057 AddressLiteral index (&BytecodePairHistogram::_index); 2058 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 2059 2060 // get index, shift out old bytecode, bring in new bytecode, and store it 2061 // _index = (_index >> log2_number_of_codes) | 2062 // (bytecode << log2_number_of_codes); 2063 2064 __ load_contents(index, G4_scratch); 2065 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 2066 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 2067 __ or3( G3_scratch, G4_scratch, G4_scratch ); 2068 __ store_contents(G4_scratch, index, G3_scratch); 2069 2070 // bump bucket contents 2071 // _counters[_index] ++; 2072 2073 __ set(counters, G3_scratch); // loads into G3_scratch 2074 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 2075 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 2076 __ ld (G3_scratch, 0, G4_scratch); 2077 __ inc (G4_scratch); 2078 __ st (G4_scratch, 0, G3_scratch); 2079 } 2080 2081 2082 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2083 // Call a little run-time stub to avoid blow-up for each bytecode. 2084 // The run-time runtime saves the right registers, depending on 2085 // the tosca in-state for the given template. 2086 address entry = Interpreter::trace_code(t->tos_in()); 2087 guarantee(entry != NULL, "entry must have been generated"); 2088 __ call(entry, relocInfo::none); 2089 __ delayed()->nop(); 2090 } 2091 2092 2093 void TemplateInterpreterGenerator::stop_interpreter_at() { 2094 AddressLiteral counter(&BytecodeCounter::_counter_value); 2095 __ load_contents(counter, G3_scratch); 2096 AddressLiteral stop_at(&StopInterpreterAt); 2097 __ load_ptr_contents(stop_at, G4_scratch); 2098 __ cmp(G3_scratch, G4_scratch); 2099 __ breakpoint_trap(Assembler::equal, Assembler::icc); 2100 } 2101 #endif // not PRODUCT 2102 #endif // !CC_INTERP