1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 48 #ifndef CC_INTERP 49 #ifndef FAST_DISPATCH 50 #define FAST_DISPATCH 1 51 #endif 52 #undef FAST_DISPATCH 53 54 55 // Generation of Interpreter 56 // 57 // The InterpreterGenerator generates the interpreter into Interpreter::_code. 58 59 60 #define __ _masm-> 61 62 63 //---------------------------------------------------------------------------------------------------- 64 65 66 void InterpreterGenerator::save_native_result(void) { 67 // result potentially in O0/O1: save it across calls 68 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 69 70 // result potentially in F0/F1: save it across calls 71 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 72 73 // save and restore any potential method result value around the unlocking operation 74 __ stf(FloatRegisterImpl::D, F0, d_tmp); 75 #ifdef _LP64 76 __ stx(O0, l_tmp); 77 #else 78 __ std(O0, l_tmp); 79 #endif 80 } 81 82 void InterpreterGenerator::restore_native_result(void) { 83 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 84 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 85 86 // Restore any method result value 87 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 88 #ifdef _LP64 89 __ ldx(l_tmp, O0); 90 #else 91 __ ldd(l_tmp, O0); 92 #endif 93 } 94 95 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 96 assert(!pass_oop || message == NULL, "either oop or message but not both"); 97 address entry = __ pc(); 98 // expression stack must be empty before entering the VM if an exception happened 99 __ empty_expression_stack(); 100 // load exception object 101 __ set((intptr_t)name, G3_scratch); 102 if (pass_oop) { 103 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 104 } else { 105 __ set((intptr_t)message, G4_scratch); 106 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 107 } 108 // throw exception 109 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 110 AddressLiteral thrower(Interpreter::throw_exception_entry()); 111 __ jump_to(thrower, G3_scratch); 112 __ delayed()->nop(); 113 return entry; 114 } 115 116 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 117 address entry = __ pc(); 118 // expression stack must be empty before entering the VM if an exception 119 // happened 120 __ empty_expression_stack(); 121 // load exception object 122 __ call_VM(Oexception, 123 CAST_FROM_FN_PTR(address, 124 InterpreterRuntime::throw_ClassCastException), 125 Otos_i); 126 __ should_not_reach_here(); 127 return entry; 128 } 129 130 131 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 132 address entry = __ pc(); 133 // expression stack must be empty before entering the VM if an exception happened 134 __ empty_expression_stack(); 135 // convention: expect aberrant index in register G3_scratch, then shuffle the 136 // index to G4_scratch for the VM call 137 __ mov(G3_scratch, G4_scratch); 138 __ set((intptr_t)name, G3_scratch); 139 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 140 __ should_not_reach_here(); 141 return entry; 142 } 143 144 145 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 146 address entry = __ pc(); 147 // expression stack must be empty before entering the VM if an exception happened 148 __ empty_expression_stack(); 149 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 150 __ should_not_reach_here(); 151 return entry; 152 } 153 154 155 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 156 TosState incoming_state = state; 157 158 Label cont; 159 address compiled_entry = __ pc(); 160 161 address entry = __ pc(); 162 #if !defined(_LP64) && defined(COMPILER2) 163 // All return values are where we want them, except for Longs. C2 returns 164 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 165 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 166 // build even if we are returning from interpreted we just do a little 167 // stupid shuffing. 168 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 169 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 170 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 171 172 if (incoming_state == ltos) { 173 __ srl (G1, 0, O1); 174 __ srlx(G1, 32, O0); 175 } 176 #endif // !_LP64 && COMPILER2 177 178 __ bind(cont); 179 180 // The callee returns with the stack possibly adjusted by adapter transition 181 // We remove that possible adjustment here. 182 // All interpreter local registers are untouched. Any result is passed back 183 // in the O0/O1 or float registers. Before continuing, the arguments must be 184 // popped from the java expression stack; i.e., Lesp must be adjusted. 185 186 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 187 188 Label L_got_cache, L_giant_index; 189 const Register cache = G3_scratch; 190 const Register size = G1_scratch; 191 if (EnableInvokeDynamic) { 192 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode. 193 __ cmp(G1_scratch, Bytecodes::_invokedynamic); 194 __ br(Assembler::equal, false, Assembler::pn, L_giant_index); 195 __ delayed()->nop(); 196 } 197 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); 198 __ bind(L_got_cache); 199 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + 200 ConstantPoolCacheEntry::flags_offset(), size); 201 __ and3(size, 0xFF, size); // argument size in words 202 __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes 203 __ add(Lesp, size, Lesp); // pop arguments 204 __ dispatch_next(state, step); 205 206 // out of the main line of code... 207 if (EnableInvokeDynamic) { 208 __ bind(L_giant_index); 209 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4)); 210 __ ba(false, L_got_cache); 211 __ delayed()->nop(); 212 } 213 214 return entry; 215 } 216 217 218 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 219 address entry = __ pc(); 220 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 221 { Label L; 222 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 223 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 224 __ tst(Gtemp); 225 __ brx(Assembler::equal, false, Assembler::pt, L); 226 __ delayed()->nop(); 227 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 228 __ should_not_reach_here(); 229 __ bind(L); 230 } 231 __ dispatch_next(state, step); 232 return entry; 233 } 234 235 // A result handler converts/unboxes a native call result into 236 // a java interpreter/compiler result. The current frame is an 237 // interpreter frame. The activation frame unwind code must be 238 // consistent with that of TemplateTable::_return(...). In the 239 // case of native methods, the caller's SP was not modified. 240 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 241 address entry = __ pc(); 242 Register Itos_i = Otos_i ->after_save(); 243 Register Itos_l = Otos_l ->after_save(); 244 Register Itos_l1 = Otos_l1->after_save(); 245 Register Itos_l2 = Otos_l2->after_save(); 246 switch (type) { 247 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 248 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 249 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 250 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 251 case T_LONG : 252 #ifndef _LP64 253 __ mov(O1, Itos_l2); // move other half of long 254 #endif // ifdef or no ifdef, fall through to the T_INT case 255 case T_INT : __ mov(O0, Itos_i); break; 256 case T_VOID : /* nothing to do */ break; 257 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 258 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 259 case T_OBJECT : 260 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 261 __ verify_oop(Itos_i); 262 break; 263 default : ShouldNotReachHere(); 264 } 265 __ ret(); // return from interpreter activation 266 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 267 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly 268 return entry; 269 } 270 271 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 272 address entry = __ pc(); 273 __ push(state); 274 __ call_VM(noreg, runtime_entry); 275 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 276 return entry; 277 } 278 279 280 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 281 address entry = __ pc(); 282 __ dispatch_next(state); 283 return entry; 284 } 285 286 // 287 // Helpers for commoning out cases in the various type of method entries. 288 // 289 290 // increment invocation count & check for overflow 291 // 292 // Note: checking for negative value instead of overflow 293 // so we have a 'sticky' overflow test 294 // 295 // Lmethod: method 296 // ??: invocation counter 297 // 298 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 299 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not. 300 if (TieredCompilation) { 301 const int increment = InvocationCounter::count_increment; 302 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 303 Label no_mdo, done; 304 if (ProfileInterpreter) { 305 // If no method data exists, go to profile_continue. 306 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch); 307 __ br_null(G4_scratch, false, Assembler::pn, no_mdo); 308 __ delayed()->nop(); 309 // Increment counter 310 Address mdo_invocation_counter(G4_scratch, 311 in_bytes(methodDataOopDesc::invocation_counter_offset()) + 312 in_bytes(InvocationCounter::counter_offset())); 313 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 314 G3_scratch, Lscratch, 315 Assembler::zero, overflow); 316 __ ba(false, done); 317 __ delayed()->nop(); 318 } 319 320 // Increment counter in methodOop 321 __ bind(no_mdo); 322 Address invocation_counter(Lmethod, 323 in_bytes(methodOopDesc::invocation_counter_offset()) + 324 in_bytes(InvocationCounter::counter_offset())); 325 __ increment_mask_and_jump(invocation_counter, increment, mask, 326 G3_scratch, Lscratch, 327 Assembler::zero, overflow); 328 __ bind(done); 329 } else { 330 // Update standard invocation counters 331 __ increment_invocation_counter(O0, G3_scratch); 332 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 333 Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); 334 __ ld(interpreter_invocation_counter, G3_scratch); 335 __ inc(G3_scratch); 336 __ st(G3_scratch, interpreter_invocation_counter); 337 } 338 339 if (ProfileInterpreter && profile_method != NULL) { 340 // Test to see if we should create a method data oop 341 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit); 342 __ load_contents(profile_limit, G3_scratch); 343 __ cmp(O0, G3_scratch); 344 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); 345 __ delayed()->nop(); 346 347 // if no method data exists, go to profile_method 348 __ test_method_data_pointer(*profile_method); 349 } 350 351 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit); 352 __ load_contents(invocation_limit, G3_scratch); 353 __ cmp(O0, G3_scratch); 354 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); 355 __ delayed()->nop(); 356 } 357 358 } 359 360 // Allocate monitor and lock method (asm interpreter) 361 // ebx - methodOop 362 // 363 void InterpreterGenerator::lock_method(void) { 364 __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags. 365 366 #ifdef ASSERT 367 { Label ok; 368 __ btst(JVM_ACC_SYNCHRONIZED, O0); 369 __ br( Assembler::notZero, false, Assembler::pt, ok); 370 __ delayed()->nop(); 371 __ stop("method doesn't need synchronization"); 372 __ bind(ok); 373 } 374 #endif // ASSERT 375 376 // get synchronization object to O0 377 { Label done; 378 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 379 __ btst(JVM_ACC_STATIC, O0); 380 __ br( Assembler::zero, true, Assembler::pt, done); 381 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 382 383 __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0); 384 __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0); 385 386 // lock the mirror, not the klassOop 387 __ ld_ptr( O0, mirror_offset, O0); 388 389 #ifdef ASSERT 390 __ tst(O0); 391 __ breakpoint_trap(Assembler::zero); 392 #endif // ASSERT 393 394 __ bind(done); 395 } 396 397 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 398 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 399 // __ untested("lock_object from method entry"); 400 __ lock_object(Lmonitors, O0); 401 } 402 403 404 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 405 Register Rscratch, 406 Register Rscratch2) { 407 const int page_size = os::vm_page_size(); 408 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset()); 409 Label after_frame_check; 410 411 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 412 413 __ set( page_size, Rscratch ); 414 __ cmp( Rframe_size, Rscratch ); 415 416 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check ); 417 __ delayed()->nop(); 418 419 // get the stack base, and in debug, verify it is non-zero 420 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); 421 #ifdef ASSERT 422 Label base_not_zero; 423 __ cmp( Rscratch, G0 ); 424 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero ); 425 __ delayed()->nop(); 426 __ stop("stack base is zero in generate_stack_overflow_check"); 427 __ bind(base_not_zero); 428 #endif 429 430 // get the stack size, and in debug, verify it is non-zero 431 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 432 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); 433 #ifdef ASSERT 434 Label size_not_zero; 435 __ cmp( Rscratch2, G0 ); 436 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero ); 437 __ delayed()->nop(); 438 __ stop("stack size is zero in generate_stack_overflow_check"); 439 __ bind(size_not_zero); 440 #endif 441 442 // compute the beginning of the protected zone minus the requested frame size 443 __ sub( Rscratch, Rscratch2, Rscratch ); 444 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 ); 445 __ add( Rscratch, Rscratch2, Rscratch ); 446 447 // Add in the size of the frame (which is the same as subtracting it from the 448 // SP, which would take another register 449 __ add( Rscratch, Rframe_size, Rscratch ); 450 451 // the frame is greater than one page in size, so check against 452 // the bottom of the stack 453 __ cmp( SP, Rscratch ); 454 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check ); 455 __ delayed()->nop(); 456 457 // Save the return address as the exception pc 458 __ st_ptr(O7, saved_exception_pc); 459 460 // the stack will overflow, throw an exception 461 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 462 463 // if you get to here, then there is enough stack space 464 __ bind( after_frame_check ); 465 } 466 467 468 // 469 // Generate a fixed interpreter frame. This is identical setup for interpreted 470 // methods and for native methods hence the shared code. 471 472 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 473 // 474 // 475 // The entry code sets up a new interpreter frame in 4 steps: 476 // 477 // 1) Increase caller's SP by for the extra local space needed: 478 // (check for overflow) 479 // Efficient implementation of xload/xstore bytecodes requires 480 // that arguments and non-argument locals are in a contigously 481 // addressable memory block => non-argument locals must be 482 // allocated in the caller's frame. 483 // 484 // 2) Create a new stack frame and register window: 485 // The new stack frame must provide space for the standard 486 // register save area, the maximum java expression stack size, 487 // the monitor slots (0 slots initially), and some frame local 488 // scratch locations. 489 // 490 // 3) The following interpreter activation registers must be setup: 491 // Lesp : expression stack pointer 492 // Lbcp : bytecode pointer 493 // Lmethod : method 494 // Llocals : locals pointer 495 // Lmonitors : monitor pointer 496 // LcpoolCache: constant pool cache 497 // 498 // 4) Initialize the non-argument locals if necessary: 499 // Non-argument locals may need to be initialized to NULL 500 // for GC to work. If the oop-map information is accurate 501 // (in the absence of the JSR problem), no initialization 502 // is necessary. 503 // 504 // (gri - 2/25/2000) 505 506 507 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset()); 508 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset()); 509 const Address max_stack (G5_method, methodOopDesc::max_stack_offset()); 510 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 511 512 const int extra_space = 513 rounded_vm_local_words + // frame local scratch space 514 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters 515 frame::memory_parameter_word_sp_offset + // register save area 516 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 517 518 const Register Glocals_size = G3; 519 const Register Otmp1 = O3; 520 const Register Otmp2 = O4; 521 // Lscratch can't be used as a temporary because the call_stub uses 522 // it to assert that the stack frame was setup correctly. 523 524 __ lduh( size_of_parameters, Glocals_size); 525 526 // Gargs points to first local + BytesPerWord 527 // Set the saved SP after the register window save 528 // 529 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 530 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 531 __ add(Gargs, Otmp1, Gargs); 532 533 if (native_call) { 534 __ calc_mem_param_words( Glocals_size, Gframe_size ); 535 __ add( Gframe_size, extra_space, Gframe_size); 536 __ round_to( Gframe_size, WordsPerLong ); 537 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 538 } else { 539 540 // 541 // Compute number of locals in method apart from incoming parameters 542 // 543 __ lduh( size_of_locals, Otmp1 ); 544 __ sub( Otmp1, Glocals_size, Glocals_size ); 545 __ round_to( Glocals_size, WordsPerLong ); 546 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size ); 547 548 // see if the frame is greater than one page in size. If so, 549 // then we need to verify there is enough stack space remaining 550 // Frame_size = (max_stack + extra_space) * BytesPerWord; 551 __ lduh( max_stack, Gframe_size ); 552 __ add( Gframe_size, extra_space, Gframe_size ); 553 __ round_to( Gframe_size, WordsPerLong ); 554 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size); 555 556 // Add in java locals size for stack overflow check only 557 __ add( Gframe_size, Glocals_size, Gframe_size ); 558 559 const Register Otmp2 = O4; 560 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 561 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); 562 563 __ sub( Gframe_size, Glocals_size, Gframe_size); 564 565 // 566 // bump SP to accomodate the extra locals 567 // 568 __ sub( SP, Glocals_size, SP ); 569 } 570 571 // 572 // now set up a stack frame with the size computed above 573 // 574 __ neg( Gframe_size ); 575 __ save( SP, Gframe_size, SP ); 576 577 // 578 // now set up all the local cache registers 579 // 580 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 581 // that all present references to Lbyte_code initialize the register 582 // immediately before use 583 if (native_call) { 584 __ mov(G0, Lbcp); 585 } else { 586 __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp); 587 __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); 588 } 589 __ mov( G5_method, Lmethod); // set Lmethod 590 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 591 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 592 #ifdef _LP64 593 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias 594 #endif 595 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 596 597 // setup interpreter activation registers 598 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 599 600 if (ProfileInterpreter) { 601 #ifdef FAST_DISPATCH 602 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 603 // they both use I2. 604 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 605 #endif // FAST_DISPATCH 606 __ set_method_data_pointer(); 607 } 608 609 } 610 611 // Empty method, generate a very fast return. 612 613 address InterpreterGenerator::generate_empty_entry(void) { 614 615 // A method that does nother but return... 616 617 address entry = __ pc(); 618 Label slow_path; 619 620 __ verify_oop(G5_method); 621 622 // do nothing for empty methods (do not even increment invocation counter) 623 if ( UseFastEmptyMethods) { 624 // If we need a safepoint check, generate full interpreter entry. 625 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 626 __ set(sync_state, G3_scratch); 627 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 628 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 629 __ delayed()->nop(); 630 631 // Code: _return 632 __ retl(); 633 __ delayed()->mov(O5_savedSP, SP); 634 635 __ bind(slow_path); 636 (void) generate_normal_entry(false); 637 638 return entry; 639 } 640 return NULL; 641 } 642 643 // Call an accessor method (assuming it is resolved, otherwise drop into 644 // vanilla (slow path) entry 645 646 // Generates code to elide accessor methods 647 // Uses G3_scratch and G1_scratch as scratch 648 address InterpreterGenerator::generate_accessor_entry(void) { 649 650 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; 651 // parameter size = 1 652 // Note: We can only use this code if the getfield has been resolved 653 // and if we don't have a null-pointer exception => check for 654 // these conditions first and use slow path if necessary. 655 address entry = __ pc(); 656 Label slow_path; 657 658 659 // XXX: for compressed oops pointer loading and decoding doesn't fit in 660 // delay slot and damages G1 661 if ( UseFastAccessorMethods && !UseCompressedOops ) { 662 // Check if we need to reach a safepoint and generate full interpreter 663 // frame if so. 664 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 665 __ load_contents(sync_state, G3_scratch); 666 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 667 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 668 __ delayed()->nop(); 669 670 // Check if local 0 != NULL 671 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 672 __ tst(Otos_i); // check if local 0 == NULL and go the slow path 673 __ brx(Assembler::zero, false, Assembler::pn, slow_path); 674 __ delayed()->nop(); 675 676 677 // read first instruction word and extract bytecode @ 1 and index @ 2 678 // get first 4 bytes of the bytecodes (big endian!) 679 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch); 680 __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch); 681 682 // move index @ 2 far left then to the right most two bytes. 683 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); 684 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( 685 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); 686 687 // get constant pool cache 688 __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch); 689 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); 690 691 // get specific constant pool cache entry 692 __ add(G3_scratch, G1_scratch, G3_scratch); 693 694 // Check the constant Pool cache entry to see if it has been resolved. 695 // If not, need the slow path. 696 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); 697 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch); 698 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); 699 __ and3(G1_scratch, 0xFF, G1_scratch); 700 __ cmp(G1_scratch, Bytecodes::_getfield); 701 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 702 __ delayed()->nop(); 703 704 // Get the type and return field offset from the constant pool cache 705 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch); 706 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch); 707 708 Label xreturn_path; 709 // Need to differentiate between igetfield, agetfield, bgetfield etc. 710 // because they are different sizes. 711 // Get the type from the constant pool cache 712 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); 713 // Make sure we don't need to mask G1_scratch for tosBits after the above shift 714 ConstantPoolCacheEntry::verify_tosBits(); 715 __ cmp(G1_scratch, atos ); 716 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 717 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); 718 __ cmp(G1_scratch, itos); 719 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 720 __ delayed()->ld(Otos_i, G3_scratch, Otos_i); 721 __ cmp(G1_scratch, stos); 722 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 723 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i); 724 __ cmp(G1_scratch, ctos); 725 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 726 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i); 727 #ifdef ASSERT 728 __ cmp(G1_scratch, btos); 729 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 730 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i); 731 __ should_not_reach_here(); 732 #endif 733 __ ldsb(Otos_i, G3_scratch, Otos_i); 734 __ bind(xreturn_path); 735 736 // _ireturn/_areturn 737 __ retl(); // return from leaf routine 738 __ delayed()->mov(O5_savedSP, SP); 739 740 // Generate regular method entry 741 __ bind(slow_path); 742 (void) generate_normal_entry(false); 743 return entry; 744 } 745 return NULL; 746 } 747 748 // Method entry for java.lang.ref.Reference.get. 749 address InterpreterGenerator::generate_Reference_get_entry(void) { 750 #ifndef SERIALGC 751 // Code: _aload_0, _getfield, _areturn 752 // parameter size = 1 753 // 754 // The code that gets generated by this routine is split into 2 parts: 755 // 1. The "intrinsified" code for G1 (or any SATB based GC), 756 // 2. The slow path - which is an expansion of the regular method entry. 757 // 758 // Notes:- 759 // * In the G1 code we do not check whether we need to block for 760 // a safepoint. If G1 is enabled then we must execute the specialized 761 // code for Reference.get (except when the Reference object is null) 762 // so that we can log the value in the referent field with an SATB 763 // update buffer. 764 // If the code for the getfield template is modified so that the 765 // G1 pre-barrier code is executed when the current method is 766 // Reference.get() then going through the normal method entry 767 // will be fine. 768 // * The G1 code can, however, check the receiver object (the instance 769 // of java.lang.Reference) and jump to the slow path if null. If the 770 // Reference object is null then we obviously cannot fetch the referent 771 // and so we don't need to call the G1 pre-barrier. Thus we can use the 772 // regular method entry code to generate the NPE. 773 // 774 // This code is based on generate_accessor_enty. 775 776 address entry = __ pc(); 777 778 const int referent_offset = java_lang_ref_Reference::referent_offset; 779 guarantee(referent_offset > 0, "referent offset not initialized"); 780 781 if (UseG1GC) { 782 Label slow_path; 783 784 // In the G1 code we don't check if we need to reach a safepoint. We 785 // continue and the thread will safepoint at the next bytecode dispatch. 786 787 // Check if local 0 != NULL 788 // If the receiver is null then it is OK to jump to the slow path. 789 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 790 __ tst(Otos_i); // check if local 0 == NULL and go the slow path 791 __ brx(Assembler::zero, false, Assembler::pn, slow_path); 792 __ delayed()->nop(); 793 794 795 // Load the value of the referent field. 796 if (Assembler::is_simm13(referent_offset)) { 797 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 798 } else { 799 __ set(referent_offset, G3_scratch); 800 __ load_heap_oop(Otos_i, G3_scratch, Otos_i); 801 } 802 803 // Generate the G1 pre-barrier code to log the value of 804 // the referent field in an SATB buffer. Note with 805 // these parameters the pre-barrier does not generate 806 // the load of the previous value 807 808 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, 809 Otos_i /* pre_val */, 810 G3_scratch /* tmp */, 811 true /* preserve_o_regs */); 812 813 // _areturn 814 __ retl(); // return from leaf routine 815 __ delayed()->mov(O5_savedSP, SP); 816 817 // Generate regular method entry 818 __ bind(slow_path); 819 (void) generate_normal_entry(false); 820 return entry; 821 } 822 #endif // SERIALGC 823 824 // If G1 is not enabled then attempt to go through the accessor entry point 825 // Reference.get is an accessor 826 return generate_accessor_entry(); 827 } 828 829 // 830 // Interpreter stub for calling a native method. (asm interpreter) 831 // This sets up a somewhat different looking stack for calling the native method 832 // than the typical interpreter frame setup. 833 // 834 835 address InterpreterGenerator::generate_native_entry(bool synchronized) { 836 address entry = __ pc(); 837 838 // the following temporary registers are used during frame creation 839 const Register Gtmp1 = G3_scratch ; 840 const Register Gtmp2 = G1_scratch; 841 bool inc_counter = UseCompiler || CountCompiledCalls; 842 843 // make sure registers are different! 844 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 845 846 const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset()); 847 848 __ verify_oop(G5_method); 849 850 const Register Glocals_size = G3; 851 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 852 853 // make sure method is native & not abstract 854 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 855 #ifdef ASSERT 856 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1); 857 { 858 Label L; 859 __ btst(JVM_ACC_NATIVE, Gtmp1); 860 __ br(Assembler::notZero, false, Assembler::pt, L); 861 __ delayed()->nop(); 862 __ stop("tried to execute non-native method as native"); 863 __ bind(L); 864 } 865 { Label L; 866 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 867 __ br(Assembler::zero, false, Assembler::pt, L); 868 __ delayed()->nop(); 869 __ stop("tried to execute abstract method as non-abstract"); 870 __ bind(L); 871 } 872 #endif // ASSERT 873 874 // generate the code to allocate the interpreter stack frame 875 generate_fixed_frame(true); 876 877 // 878 // No locals to initialize for native method 879 // 880 881 // this slot will be set later, we initialize it to null here just in 882 // case we get a GC before the actual value is stored later 883 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 884 885 const Address do_not_unlock_if_synchronized(G2_thread, 886 JavaThread::do_not_unlock_if_synchronized_offset()); 887 // Since at this point in the method invocation the exception handler 888 // would try to exit the monitor of synchronized methods which hasn't 889 // been entered yet, we set the thread local variable 890 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 891 // runtime, exception handling i.e. unlock_if_synchronized_method will 892 // check this thread local flag. 893 // This flag has two effects, one is to force an unwind in the topmost 894 // interpreter frame and not perform an unlock while doing so. 895 896 __ movbool(true, G3_scratch); 897 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 898 899 // increment invocation counter and check for overflow 900 // 901 // Note: checking for negative value instead of overflow 902 // so we have a 'sticky' overflow test (may be of 903 // importance as soon as we have true MT/MP) 904 Label invocation_counter_overflow; 905 Label Lcontinue; 906 if (inc_counter) { 907 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 908 909 } 910 __ bind(Lcontinue); 911 912 bang_stack_shadow_pages(true); 913 914 // reset the _do_not_unlock_if_synchronized flag 915 __ stbool(G0, do_not_unlock_if_synchronized); 916 917 // check for synchronized methods 918 // Must happen AFTER invocation_counter check and stack overflow check, 919 // so method is not locked if overflows. 920 921 if (synchronized) { 922 lock_method(); 923 } else { 924 #ifdef ASSERT 925 { Label ok; 926 __ ld(Laccess_flags, O0); 927 __ btst(JVM_ACC_SYNCHRONIZED, O0); 928 __ br( Assembler::zero, false, Assembler::pt, ok); 929 __ delayed()->nop(); 930 __ stop("method needs synchronization"); 931 __ bind(ok); 932 } 933 #endif // ASSERT 934 } 935 936 937 // start execution 938 __ verify_thread(); 939 940 // JVMTI support 941 __ notify_method_entry(); 942 943 // native call 944 945 // (note that O0 is never an oop--at most it is a handle) 946 // It is important not to smash any handles created by this call, 947 // until any oop handle in O0 is dereferenced. 948 949 // (note that the space for outgoing params is preallocated) 950 951 // get signature handler 952 { Label L; 953 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset()); 954 __ ld_ptr(signature_handler, G3_scratch); 955 __ tst(G3_scratch); 956 __ brx(Assembler::notZero, false, Assembler::pt, L); 957 __ delayed()->nop(); 958 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 959 __ ld_ptr(signature_handler, G3_scratch); 960 __ bind(L); 961 } 962 963 // Push a new frame so that the args will really be stored in 964 // Copy a few locals across so the new frame has the variables 965 // we need but these values will be dead at the jni call and 966 // therefore not gc volatile like the values in the current 967 // frame (Lmethod in particular) 968 969 // Flush the method pointer to the register save area 970 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 971 __ mov(Llocals, O1); 972 973 // calculate where the mirror handle body is allocated in the interpreter frame: 974 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 975 976 // Calculate current frame size 977 __ sub(SP, FP, O3); // Calculate negative of current frame size 978 __ save(SP, O3, SP); // Allocate an identical sized frame 979 980 // Note I7 has leftover trash. Slow signature handler will fill it in 981 // should we get there. Normal jni call will set reasonable last_Java_pc 982 // below (and fix I7 so the stack trace doesn't have a meaningless frame 983 // in it). 984 985 // Load interpreter frame's Lmethod into same register here 986 987 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 988 989 __ mov(I1, Llocals); 990 __ mov(I2, Lscratch2); // save the address of the mirror 991 992 993 // ONLY Lmethod and Llocals are valid here! 994 995 // call signature handler, It will move the arg properly since Llocals in current frame 996 // matches that in outer frame 997 998 __ callr(G3_scratch, 0); 999 __ delayed()->nop(); 1000 1001 // Result handler is in Lscratch 1002 1003 // Reload interpreter frame's Lmethod since slow signature handler may block 1004 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1005 1006 { Label not_static; 1007 1008 __ ld(Laccess_flags, O0); 1009 __ btst(JVM_ACC_STATIC, O0); 1010 __ br( Assembler::zero, false, Assembler::pt, not_static); 1011 // get native function entry point(O0 is a good temp until the very end) 1012 __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0); 1013 // for static methods insert the mirror argument 1014 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 1015 1016 __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1); 1017 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); 1018 __ ld_ptr(O1, mirror_offset, O1); 1019 #ifdef ASSERT 1020 if (!PrintSignatureHandlers) // do not dirty the output with this 1021 { Label L; 1022 __ tst(O1); 1023 __ brx(Assembler::notZero, false, Assembler::pt, L); 1024 __ delayed()->nop(); 1025 __ stop("mirror is missing"); 1026 __ bind(L); 1027 } 1028 #endif // ASSERT 1029 __ st_ptr(O1, Lscratch2, 0); 1030 __ mov(Lscratch2, O1); 1031 __ bind(not_static); 1032 } 1033 1034 // At this point, arguments have been copied off of stack into 1035 // their JNI positions, which are O1..O5 and SP[68..]. 1036 // Oops are boxed in-place on the stack, with handles copied to arguments. 1037 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1038 1039 #ifdef ASSERT 1040 { Label L; 1041 __ tst(O0); 1042 __ brx(Assembler::notZero, false, Assembler::pt, L); 1043 __ delayed()->nop(); 1044 __ stop("native entry point is missing"); 1045 __ bind(L); 1046 } 1047 #endif // ASSERT 1048 1049 // 1050 // setup the frame anchor 1051 // 1052 // The scavenge function only needs to know that the PC of this frame is 1053 // in the interpreter method entry code, it doesn't need to know the exact 1054 // PC and hence we can use O7 which points to the return address from the 1055 // previous call in the code stream (signature handler function) 1056 // 1057 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1058 // we have pushed the extra frame in order to protect the volatile register(s) 1059 // in that frame when we return from the jni call 1060 // 1061 1062 __ set_last_Java_frame(FP, O7); 1063 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1064 // not meaningless information that'll confuse me. 1065 1066 // flush the windows now. We don't care about the current (protection) frame 1067 // only the outer frames 1068 1069 __ flush_windows(); 1070 1071 // mark windows as flushed 1072 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1073 __ set(JavaFrameAnchor::flushed, G3_scratch); 1074 __ st(G3_scratch, flags); 1075 1076 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1077 1078 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1079 #ifdef ASSERT 1080 { Label L; 1081 __ ld(thread_state, G3_scratch); 1082 __ cmp(G3_scratch, _thread_in_Java); 1083 __ br(Assembler::equal, false, Assembler::pt, L); 1084 __ delayed()->nop(); 1085 __ stop("Wrong thread state in native stub"); 1086 __ bind(L); 1087 } 1088 #endif // ASSERT 1089 __ set(_thread_in_native, G3_scratch); 1090 __ st(G3_scratch, thread_state); 1091 1092 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1093 __ save_thread(L7_thread_cache); // save Gthread 1094 __ callr(O0, 0); 1095 __ delayed()-> 1096 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1097 1098 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1099 1100 __ restore_thread(L7_thread_cache); // restore G2_thread 1101 __ reinit_heapbase(); 1102 1103 // must we block? 1104 1105 // Block, if necessary, before resuming in _thread_in_Java state. 1106 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1107 { Label no_block; 1108 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 1109 1110 // Switch thread to "native transition" state before reading the synchronization state. 1111 // This additional state is necessary because reading and testing the synchronization 1112 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1113 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1114 // VM thread changes sync state to synchronizing and suspends threads for GC. 1115 // Thread A is resumed to finish this native method, but doesn't block here since it 1116 // didn't see any synchronization is progress, and escapes. 1117 __ set(_thread_in_native_trans, G3_scratch); 1118 __ st(G3_scratch, thread_state); 1119 if(os::is_MP()) { 1120 if (UseMembar) { 1121 // Force this write out before the read below 1122 __ membar(Assembler::StoreLoad); 1123 } else { 1124 // Write serialization page so VM thread can do a pseudo remote membar. 1125 // We use the current thread pointer to calculate a thread specific 1126 // offset to write to within the page. This minimizes bus traffic 1127 // due to cache line collision. 1128 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1129 } 1130 } 1131 __ load_contents(sync_state, G3_scratch); 1132 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1133 1134 Label L; 1135 __ br(Assembler::notEqual, false, Assembler::pn, L); 1136 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1137 __ cmp(G3_scratch, 0); 1138 __ br(Assembler::equal, false, Assembler::pt, no_block); 1139 __ delayed()->nop(); 1140 __ bind(L); 1141 1142 // Block. Save any potential method result value before the operation and 1143 // use a leaf call to leave the last_Java_frame setup undisturbed. 1144 save_native_result(); 1145 __ call_VM_leaf(L7_thread_cache, 1146 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1147 G2_thread); 1148 1149 // Restore any method result value 1150 restore_native_result(); 1151 __ bind(no_block); 1152 } 1153 1154 // Clear the frame anchor now 1155 1156 __ reset_last_Java_frame(); 1157 1158 // Move the result handler address 1159 __ mov(Lscratch, G3_scratch); 1160 // return possible result to the outer frame 1161 #ifndef __LP64 1162 __ mov(O0, I0); 1163 __ restore(O1, G0, O1); 1164 #else 1165 __ restore(O0, G0, O0); 1166 #endif /* __LP64 */ 1167 1168 // Move result handler to expected register 1169 __ mov(G3_scratch, Lscratch); 1170 1171 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1172 // switch to thread_in_Java. 1173 1174 __ set(_thread_in_Java, G3_scratch); 1175 __ st(G3_scratch, thread_state); 1176 1177 // reset handle block 1178 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1179 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1180 1181 // If we have an oop result store it where it will be safe for any further gc 1182 // until we return now that we've released the handle it might be protected by 1183 1184 { 1185 Label no_oop, store_result; 1186 1187 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1188 __ cmp(G3_scratch, Lscratch); 1189 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop); 1190 __ delayed()->nop(); 1191 __ addcc(G0, O0, O0); 1192 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: 1193 __ delayed()->ld_ptr(O0, 0, O0); // unbox it 1194 __ mov(G0, O0); 1195 1196 __ bind(store_result); 1197 // Store it where gc will look for it and result handler expects it. 1198 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1199 1200 __ bind(no_oop); 1201 1202 } 1203 1204 1205 // handle exceptions (exception handling will handle unlocking!) 1206 { Label L; 1207 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1208 __ ld_ptr(exception_addr, Gtemp); 1209 __ tst(Gtemp); 1210 __ brx(Assembler::equal, false, Assembler::pt, L); 1211 __ delayed()->nop(); 1212 // Note: This could be handled more efficiently since we know that the native 1213 // method doesn't have an exception handler. We could directly return 1214 // to the exception handler for the caller. 1215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1216 __ should_not_reach_here(); 1217 __ bind(L); 1218 } 1219 1220 // JVMTI support (preserves thread register) 1221 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1222 1223 if (synchronized) { 1224 // save and restore any potential method result value around the unlocking operation 1225 save_native_result(); 1226 1227 __ add( __ top_most_monitor(), O1); 1228 __ unlock_object(O1); 1229 1230 restore_native_result(); 1231 } 1232 1233 #if defined(COMPILER2) && !defined(_LP64) 1234 1235 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1236 // or compiled so just be safe. 1237 1238 __ sllx(O0, 32, G1); // Shift bits into high G1 1239 __ srl (O1, 0, O1); // Zero extend O1 1240 __ or3 (O1, G1, G1); // OR 64 bits into G1 1241 1242 #endif /* COMPILER2 && !_LP64 */ 1243 1244 // dispose of return address and remove activation 1245 #ifdef ASSERT 1246 { 1247 Label ok; 1248 __ cmp(I5_savedSP, FP); 1249 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); 1250 __ delayed()->nop(); 1251 __ stop("bad I5_savedSP value"); 1252 __ should_not_reach_here(); 1253 __ bind(ok); 1254 } 1255 #endif 1256 if (TraceJumps) { 1257 // Move target to register that is recordable 1258 __ mov(Lscratch, G3_scratch); 1259 __ JMP(G3_scratch, 0); 1260 } else { 1261 __ jmp(Lscratch, 0); 1262 } 1263 __ delayed()->nop(); 1264 1265 1266 if (inc_counter) { 1267 // handle invocation counter overflow 1268 __ bind(invocation_counter_overflow); 1269 generate_counter_overflow(Lcontinue); 1270 } 1271 1272 1273 1274 return entry; 1275 } 1276 1277 1278 // Generic method entry to (asm) interpreter 1279 //------------------------------------------------------------------------------------------------------------------------ 1280 // 1281 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1282 address entry = __ pc(); 1283 1284 bool inc_counter = UseCompiler || CountCompiledCalls; 1285 1286 // the following temporary registers are used during frame creation 1287 const Register Gtmp1 = G3_scratch ; 1288 const Register Gtmp2 = G1_scratch; 1289 1290 // make sure registers are different! 1291 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1292 1293 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset()); 1294 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset()); 1295 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1296 // and use in the asserts. 1297 const Address access_flags (Lmethod, methodOopDesc::access_flags_offset()); 1298 1299 __ verify_oop(G5_method); 1300 1301 const Register Glocals_size = G3; 1302 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1303 1304 // make sure method is not native & not abstract 1305 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1306 #ifdef ASSERT 1307 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1); 1308 { 1309 Label L; 1310 __ btst(JVM_ACC_NATIVE, Gtmp1); 1311 __ br(Assembler::zero, false, Assembler::pt, L); 1312 __ delayed()->nop(); 1313 __ stop("tried to execute native method as non-native"); 1314 __ bind(L); 1315 } 1316 { Label L; 1317 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1318 __ br(Assembler::zero, false, Assembler::pt, L); 1319 __ delayed()->nop(); 1320 __ stop("tried to execute abstract method as non-abstract"); 1321 __ bind(L); 1322 } 1323 #endif // ASSERT 1324 1325 // generate the code to allocate the interpreter stack frame 1326 1327 generate_fixed_frame(false); 1328 1329 #ifdef FAST_DISPATCH 1330 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1331 // set bytecode dispatch table base 1332 #endif 1333 1334 // 1335 // Code to initialize the extra (i.e. non-parm) locals 1336 // 1337 Register init_value = noreg; // will be G0 if we must clear locals 1338 // The way the code was setup before zerolocals was always true for vanilla java entries. 1339 // It could only be false for the specialized entries like accessor or empty which have 1340 // no extra locals so the testing was a waste of time and the extra locals were always 1341 // initialized. We removed this extra complication to already over complicated code. 1342 1343 init_value = G0; 1344 Label clear_loop; 1345 1346 // NOTE: If you change the frame layout, this code will need to 1347 // be updated! 1348 __ lduh( size_of_locals, O2 ); 1349 __ lduh( size_of_parameters, O1 ); 1350 __ sll( O2, Interpreter::logStackElementSize, O2); 1351 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1352 __ sub( Llocals, O2, O2 ); 1353 __ sub( Llocals, O1, O1 ); 1354 1355 __ bind( clear_loop ); 1356 __ inc( O2, wordSize ); 1357 1358 __ cmp( O2, O1 ); 1359 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1360 __ delayed()->st_ptr( init_value, O2, 0 ); 1361 1362 const Address do_not_unlock_if_synchronized(G2_thread, 1363 JavaThread::do_not_unlock_if_synchronized_offset()); 1364 // Since at this point in the method invocation the exception handler 1365 // would try to exit the monitor of synchronized methods which hasn't 1366 // been entered yet, we set the thread local variable 1367 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1368 // runtime, exception handling i.e. unlock_if_synchronized_method will 1369 // check this thread local flag. 1370 __ movbool(true, G3_scratch); 1371 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1372 1373 // increment invocation counter and check for overflow 1374 // 1375 // Note: checking for negative value instead of overflow 1376 // so we have a 'sticky' overflow test (may be of 1377 // importance as soon as we have true MT/MP) 1378 Label invocation_counter_overflow; 1379 Label profile_method; 1380 Label profile_method_continue; 1381 Label Lcontinue; 1382 if (inc_counter) { 1383 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1384 if (ProfileInterpreter) { 1385 __ bind(profile_method_continue); 1386 } 1387 } 1388 __ bind(Lcontinue); 1389 1390 bang_stack_shadow_pages(false); 1391 1392 // reset the _do_not_unlock_if_synchronized flag 1393 __ stbool(G0, do_not_unlock_if_synchronized); 1394 1395 // check for synchronized methods 1396 // Must happen AFTER invocation_counter check and stack overflow check, 1397 // so method is not locked if overflows. 1398 1399 if (synchronized) { 1400 lock_method(); 1401 } else { 1402 #ifdef ASSERT 1403 { Label ok; 1404 __ ld(access_flags, O0); 1405 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1406 __ br( Assembler::zero, false, Assembler::pt, ok); 1407 __ delayed()->nop(); 1408 __ stop("method needs synchronization"); 1409 __ bind(ok); 1410 } 1411 #endif // ASSERT 1412 } 1413 1414 // start execution 1415 1416 __ verify_thread(); 1417 1418 // jvmti support 1419 __ notify_method_entry(); 1420 1421 // start executing instructions 1422 __ dispatch_next(vtos); 1423 1424 1425 if (inc_counter) { 1426 if (ProfileInterpreter) { 1427 // We have decided to profile this method in the interpreter 1428 __ bind(profile_method); 1429 1430 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1431 __ set_method_data_pointer_for_bcp(); 1432 __ ba(false, profile_method_continue); 1433 __ delayed()->nop(); 1434 } 1435 1436 // handle invocation counter overflow 1437 __ bind(invocation_counter_overflow); 1438 generate_counter_overflow(Lcontinue); 1439 } 1440 1441 1442 return entry; 1443 } 1444 1445 1446 //---------------------------------------------------------------------------------------------------- 1447 // Entry points & stack frame layout 1448 // 1449 // Here we generate the various kind of entries into the interpreter. 1450 // The two main entry type are generic bytecode methods and native call method. 1451 // These both come in synchronized and non-synchronized versions but the 1452 // frame layout they create is very similar. The other method entry 1453 // types are really just special purpose entries that are really entry 1454 // and interpretation all in one. These are for trivial methods like 1455 // accessor, empty, or special math methods. 1456 // 1457 // When control flow reaches any of the entry types for the interpreter 1458 // the following holds -> 1459 // 1460 // C2 Calling Conventions: 1461 // 1462 // The entry code below assumes that the following registers are set 1463 // when coming in: 1464 // G5_method: holds the methodOop of the method to call 1465 // Lesp: points to the TOS of the callers expression stack 1466 // after having pushed all the parameters 1467 // 1468 // The entry code does the following to setup an interpreter frame 1469 // pop parameters from the callers stack by adjusting Lesp 1470 // set O0 to Lesp 1471 // compute X = (max_locals - num_parameters) 1472 // bump SP up by X to accomadate the extra locals 1473 // compute X = max_expression_stack 1474 // + vm_local_words 1475 // + 16 words of register save area 1476 // save frame doing a save sp, -X, sp growing towards lower addresses 1477 // set Lbcp, Lmethod, LcpoolCache 1478 // set Llocals to i0 1479 // set Lmonitors to FP - rounded_vm_local_words 1480 // set Lesp to Lmonitors - 4 1481 // 1482 // The frame has now been setup to do the rest of the entry code 1483 1484 // Try this optimization: Most method entries could live in a 1485 // "one size fits all" stack frame without all the dynamic size 1486 // calculations. It might be profitable to do all this calculation 1487 // statically and approximately for "small enough" methods. 1488 1489 //----------------------------------------------------------------------------------------------- 1490 1491 // C1 Calling conventions 1492 // 1493 // Upon method entry, the following registers are setup: 1494 // 1495 // g2 G2_thread: current thread 1496 // g5 G5_method: method to activate 1497 // g4 Gargs : pointer to last argument 1498 // 1499 // 1500 // Stack: 1501 // 1502 // +---------------+ <--- sp 1503 // | | 1504 // : reg save area : 1505 // | | 1506 // +---------------+ <--- sp + 0x40 1507 // | | 1508 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1509 // | | 1510 // +---------------+ <--- sp + 0x5c 1511 // | | 1512 // : free : 1513 // | | 1514 // +---------------+ <--- Gargs 1515 // | | 1516 // : arguments : 1517 // | | 1518 // +---------------+ 1519 // | | 1520 // 1521 // 1522 // 1523 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 1524 // 1525 // +---------------+ <--- sp 1526 // | | 1527 // : reg save area : 1528 // | | 1529 // +---------------+ <--- sp + 0x40 1530 // | | 1531 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1532 // | | 1533 // +---------------+ <--- sp + 0x5c 1534 // | | 1535 // : : 1536 // | | <--- Lesp 1537 // +---------------+ <--- Lmonitors (fp - 0x18) 1538 // | VM locals | 1539 // +---------------+ <--- fp 1540 // | | 1541 // : reg save area : 1542 // | | 1543 // +---------------+ <--- fp + 0x40 1544 // | | 1545 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1546 // | | 1547 // +---------------+ <--- fp + 0x5c 1548 // | | 1549 // : free : 1550 // | | 1551 // +---------------+ 1552 // | | 1553 // : nonarg locals : 1554 // | | 1555 // +---------------+ 1556 // | | 1557 // : arguments : 1558 // | | <--- Llocals 1559 // +---------------+ <--- Gargs 1560 // | | 1561 1562 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { 1563 1564 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated 1565 // expression stack, the callee will have callee_extra_locals (so we can account for 1566 // frame extension) and monitor_size for monitors. Basically we need to calculate 1567 // this exactly like generate_fixed_frame/generate_compute_interpreter_state. 1568 // 1569 // 1570 // The big complicating thing here is that we must ensure that the stack stays properly 1571 // aligned. This would be even uglier if monitor size wasn't modulo what the stack 1572 // needs to be aligned for). We are given that the sp (fp) is already aligned by 1573 // the caller so we must ensure that it is properly aligned for our callee. 1574 // 1575 const int rounded_vm_local_words = 1576 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1577 // callee_locals and max_stack are counts, not the size in frame. 1578 const int locals_size = 1579 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong); 1580 const int max_stack_words = max_stack * Interpreter::stackElementWords; 1581 return (round_to((max_stack_words 1582 //6815692//+ methodOopDesc::extra_stack_words() 1583 + rounded_vm_local_words 1584 + frame::memory_parameter_word_sp_offset), WordsPerLong) 1585 // already rounded 1586 + locals_size + monitor_size); 1587 } 1588 1589 // How much stack a method top interpreter activation needs in words. 1590 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { 1591 1592 // See call_stub code 1593 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, 1594 WordsPerLong); // 7 + register save area 1595 1596 // Save space for one monitor to get into the interpreted method in case 1597 // the method is synchronized 1598 int monitor_size = method->is_synchronized() ? 1599 1*frame::interpreter_frame_monitor_size() : 0; 1600 return size_activation_helper(method->max_locals(), method->max_stack(), 1601 monitor_size) + call_stub_size; 1602 } 1603 1604 int AbstractInterpreter::layout_activation(methodOop method, 1605 int tempcount, 1606 int popframe_extra_args, 1607 int moncount, 1608 int caller_actual_parameters, 1609 int callee_param_count, 1610 int callee_local_count, 1611 frame* caller, 1612 frame* interpreter_frame, 1613 bool is_top_frame) { 1614 // Note: This calculation must exactly parallel the frame setup 1615 // in InterpreterGenerator::generate_fixed_frame. 1616 // If f!=NULL, set up the following variables: 1617 // - Lmethod 1618 // - Llocals 1619 // - Lmonitors (to the indicated number of monitors) 1620 // - Lesp (to the indicated number of temps) 1621 // The frame f (if not NULL) on entry is a description of the caller of the frame 1622 // we are about to layout. We are guaranteed that we will be able to fill in a 1623 // new interpreter frame as its callee (i.e. the stack space is allocated and 1624 // the amount was determined by an earlier call to this method with f == NULL). 1625 // On return f (if not NULL) while describe the interpreter frame we just layed out. 1626 1627 int monitor_size = moncount * frame::interpreter_frame_monitor_size(); 1628 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1629 1630 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); 1631 // 1632 // Note: if you look closely this appears to be doing something much different 1633 // than generate_fixed_frame. What is happening is this. On sparc we have to do 1634 // this dance with interpreter_sp_adjustment because the window save area would 1635 // appear just below the bottom (tos) of the caller's java expression stack. Because 1636 // the interpreter want to have the locals completely contiguous generate_fixed_frame 1637 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size). 1638 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee. 1639 // In this code the opposite occurs the caller adjusts it's own stack base on the callee. 1640 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest) 1641 // because the oldest frame would have adjust its callers frame and yet that frame 1642 // already exists and isn't part of this array of frames we are unpacking. So at first 1643 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() 1644 // will after it calculates all of the frame's on_stack_size()'s will then figure out the 1645 // amount to adjust the caller of the initial (oldest) frame and the calculation will all 1646 // add up. It does seem like it simpler to account for the adjustment here (and remove the 1647 // callee... parameters here). However this would mean that this routine would have to take 1648 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) 1649 // and run the calling loop in the reverse order. This would also would appear to mean making 1650 // this code aware of what the interactions are when that initial caller fram was an osr or 1651 // other adapter frame. deoptimization is complicated enough and hard enough to debug that 1652 // there is no sense in messing working code. 1653 // 1654 1655 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong); 1656 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align"); 1657 1658 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(), 1659 monitor_size); 1660 1661 if (interpreter_frame != NULL) { 1662 // The skeleton frame must already look like an interpreter frame 1663 // even if not fully filled out. 1664 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame"); 1665 1666 intptr_t* fp = interpreter_frame->fp(); 1667 1668 JavaThread* thread = JavaThread::current(); 1669 RegisterMap map(thread, false); 1670 // More verification that skeleton frame is properly walkable 1671 assert(fp == caller->sp(), "fp must match"); 1672 1673 intptr_t* montop = fp - rounded_vm_local_words; 1674 1675 // preallocate monitors (cf. __ add_monitor_to_stack) 1676 intptr_t* monitors = montop - monitor_size; 1677 1678 // preallocate stack space 1679 intptr_t* esp = monitors - 1 - 1680 (tempcount * Interpreter::stackElementWords) - 1681 popframe_extra_args; 1682 1683 int local_words = method->max_locals() * Interpreter::stackElementWords; 1684 NEEDS_CLEANUP; 1685 intptr_t* locals; 1686 if (caller->is_interpreted_frame()) { 1687 // Can force the locals area to end up properly overlapping the top of the expression stack. 1688 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; 1689 // Note that this computation means we replace size_of_parameters() values from the caller 1690 // interpreter frame's expression stack with our argument locals 1691 int parm_words = caller_actual_parameters * Interpreter::stackElementWords; 1692 locals = Lesp_ptr + parm_words; 1693 int delta = local_words - parm_words; 1694 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; 1695 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; 1696 } else { 1697 assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases"); 1698 // Don't have Lesp available; lay out locals block in the caller 1699 // adjacent to the register window save area. 1700 // 1701 // Compiled frames do not allocate a varargs area which is why this if 1702 // statement is needed. 1703 // 1704 if (caller->is_compiled_frame()) { 1705 locals = fp + frame::register_save_words + local_words - 1; 1706 } else { 1707 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; 1708 } 1709 if (!caller->is_entry_frame()) { 1710 // Caller wants his own SP back 1711 int caller_frame_size = caller->cb()->frame_size(); 1712 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; 1713 } 1714 } 1715 if (TraceDeoptimization) { 1716 if (caller->is_entry_frame()) { 1717 // make sure I5_savedSP and the entry frames notion of saved SP 1718 // agree. This assertion duplicate a check in entry frame code 1719 // but catches the failure earlier. 1720 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP), 1721 "would change callers SP"); 1722 } 1723 if (caller->is_entry_frame()) { 1724 tty->print("entry "); 1725 } 1726 if (caller->is_compiled_frame()) { 1727 tty->print("compiled "); 1728 if (caller->is_deoptimized_frame()) { 1729 tty->print("(deopt) "); 1730 } 1731 } 1732 if (caller->is_interpreted_frame()) { 1733 tty->print("interpreted "); 1734 } 1735 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp()); 1736 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16); 1737 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16); 1738 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp()); 1739 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16); 1740 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16); 1741 tty->print_cr("Llocals = 0x%x", locals); 1742 tty->print_cr("Lesp = 0x%x", esp); 1743 tty->print_cr("Lmonitors = 0x%x", monitors); 1744 } 1745 1746 if (method->max_locals() > 0) { 1747 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area"); 1748 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area"); 1749 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); 1750 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); 1751 } 1752 #ifdef _LP64 1753 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); 1754 #endif 1755 1756 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method; 1757 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals; 1758 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors; 1759 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp; 1760 // Llast_SP will be same as SP as there is no adapter space 1761 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS; 1762 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); 1763 #ifdef FAST_DISPATCH 1764 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table(); 1765 #endif 1766 1767 1768 #ifdef ASSERT 1769 BasicObjectLock* mp = (BasicObjectLock*)monitors; 1770 1771 assert(interpreter_frame->interpreter_frame_method() == method, "method matches"); 1772 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match"); 1773 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches"); 1774 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches"); 1775 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches"); 1776 1777 // check bounds 1778 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1); 1779 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words; 1780 assert(lo < monitors && montop <= hi, "monitors in bounds"); 1781 assert(lo <= esp && esp < monitors, "esp in bounds"); 1782 #endif // ASSERT 1783 } 1784 1785 return raw_frame_size; 1786 } 1787 1788 //---------------------------------------------------------------------------------------------------- 1789 // Exceptions 1790 void TemplateInterpreterGenerator::generate_throw_exception() { 1791 1792 // Entry point in previous activation (i.e., if the caller was interpreted) 1793 Interpreter::_rethrow_exception_entry = __ pc(); 1794 // O0: exception 1795 1796 // entry point for exceptions thrown within interpreter code 1797 Interpreter::_throw_exception_entry = __ pc(); 1798 __ verify_thread(); 1799 // expression stack is undefined here 1800 // O0: exception, i.e. Oexception 1801 // Lbcp: exception bcx 1802 __ verify_oop(Oexception); 1803 1804 1805 // expression stack must be empty before entering the VM in case of an exception 1806 __ empty_expression_stack(); 1807 // find exception handler address and preserve exception oop 1808 // call C routine to find handler and jump to it 1809 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1810 __ push_ptr(O1); // push exception for exception handler bytecodes 1811 1812 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1813 __ delayed()->nop(); 1814 1815 1816 // if the exception is not handled in the current frame 1817 // the frame is removed and the exception is rethrown 1818 // (i.e. exception continuation is _rethrow_exception) 1819 // 1820 // Note: At this point the bci is still the bxi for the instruction which caused 1821 // the exception and the expression stack is empty. Thus, for any VM calls 1822 // at this point, GC will find a legal oop map (with empty expression stack). 1823 1824 // in current activation 1825 // tos: exception 1826 // Lbcp: exception bcp 1827 1828 // 1829 // JVMTI PopFrame support 1830 // 1831 1832 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1833 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1834 // Set the popframe_processing bit in popframe_condition indicating that we are 1835 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1836 // popframe handling cycles. 1837 1838 __ ld(popframe_condition_addr, G3_scratch); 1839 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1840 __ stw(G3_scratch, popframe_condition_addr); 1841 1842 // Empty the expression stack, as in normal exception handling 1843 __ empty_expression_stack(); 1844 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1845 1846 { 1847 // Check to see whether we are returning to a deoptimized frame. 1848 // (The PopFrame call ensures that the caller of the popped frame is 1849 // either interpreted or compiled and deoptimizes it if compiled.) 1850 // In this case, we can't call dispatch_next() after the frame is 1851 // popped, but instead must save the incoming arguments and restore 1852 // them after deoptimization has occurred. 1853 // 1854 // Note that we don't compare the return PC against the 1855 // deoptimization blob's unpack entry because of the presence of 1856 // adapter frames in C2. 1857 Label caller_not_deoptimized; 1858 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1859 __ tst(O0); 1860 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized); 1861 __ delayed()->nop(); 1862 1863 const Register Gtmp1 = G3_scratch; 1864 const Register Gtmp2 = G1_scratch; 1865 1866 // Compute size of arguments for saving when returning to deoptimized caller 1867 __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1); 1868 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1869 __ sub(Llocals, Gtmp1, Gtmp2); 1870 __ add(Gtmp2, wordSize, Gtmp2); 1871 // Save these arguments 1872 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1873 // Inform deoptimization that it is responsible for restoring these arguments 1874 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1875 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1876 __ st(Gtmp1, popframe_condition_addr); 1877 1878 // Return from the current method 1879 // The caller's SP was adjusted upon method entry to accomodate 1880 // the callee's non-argument locals. Undo that adjustment. 1881 __ ret(); 1882 __ delayed()->restore(I5_savedSP, G0, SP); 1883 1884 __ bind(caller_not_deoptimized); 1885 } 1886 1887 // Clear the popframe condition flag 1888 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1889 1890 // Get out of the current method (how this is done depends on the particular compiler calling 1891 // convention that the interpreter currently follows) 1892 // The caller's SP was adjusted upon method entry to accomodate 1893 // the callee's non-argument locals. Undo that adjustment. 1894 __ restore(I5_savedSP, G0, SP); 1895 // The method data pointer was incremented already during 1896 // call profiling. We have to restore the mdp for the current bcp. 1897 if (ProfileInterpreter) { 1898 __ set_method_data_pointer_for_bcp(); 1899 } 1900 // Resume bytecode interpretation at the current bcp 1901 __ dispatch_next(vtos); 1902 // end of JVMTI PopFrame support 1903 1904 Interpreter::_remove_activation_entry = __ pc(); 1905 1906 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1907 __ pop_ptr(Oexception); // get exception 1908 1909 // Intel has the following comment: 1910 //// remove the activation (without doing throws on illegalMonitorExceptions) 1911 // They remove the activation without checking for bad monitor state. 1912 // %%% We should make sure this is the right semantics before implementing. 1913 1914 // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here? 1915 __ set_vm_result(Oexception); 1916 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1917 1918 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1919 1920 __ get_vm_result(Oexception); 1921 __ verify_oop(Oexception); 1922 1923 const int return_reg_adjustment = frame::pc_return_offset; 1924 Address issuing_pc_addr(I7, return_reg_adjustment); 1925 1926 // We are done with this activation frame; find out where to go next. 1927 // The continuation point will be an exception handler, which expects 1928 // the following registers set up: 1929 // 1930 // Oexception: exception 1931 // Oissuing_pc: the local call that threw exception 1932 // Other On: garbage 1933 // In/Ln: the contents of the caller's register window 1934 // 1935 // We do the required restore at the last possible moment, because we 1936 // need to preserve some state across a runtime call. 1937 // (Remember that the caller activation is unknown--it might not be 1938 // interpreted, so things like Lscratch are useless in the caller.) 1939 1940 // Although the Intel version uses call_C, we can use the more 1941 // compact call_VM. (The only real difference on SPARC is a 1942 // harmlessly ignored [re]set_last_Java_frame, compared with 1943 // the Intel code which lacks this.) 1944 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1945 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1946 __ super_call_VM_leaf(L7_thread_cache, 1947 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1948 G2_thread, Oissuing_pc->after_save()); 1949 1950 // The caller's SP was adjusted upon method entry to accomodate 1951 // the callee's non-argument locals. Undo that adjustment. 1952 __ JMP(O0, 0); // return exception handler in caller 1953 __ delayed()->restore(I5_savedSP, G0, SP); 1954 1955 // (same old exception object is already in Oexception; see above) 1956 // Note that an "issuing PC" is actually the next PC after the call 1957 } 1958 1959 1960 // 1961 // JVMTI ForceEarlyReturn support 1962 // 1963 1964 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1965 address entry = __ pc(); 1966 1967 __ empty_expression_stack(); 1968 __ load_earlyret_value(state); 1969 1970 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1971 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1972 1973 // Clear the earlyret state 1974 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1975 1976 __ remove_activation(state, 1977 /* throw_monitor_exception */ false, 1978 /* install_monitor_exception */ false); 1979 1980 // The caller's SP was adjusted upon method entry to accomodate 1981 // the callee's non-argument locals. Undo that adjustment. 1982 __ ret(); // return to caller 1983 __ delayed()->restore(I5_savedSP, G0, SP); 1984 1985 return entry; 1986 } // end of JVMTI ForceEarlyReturn support 1987 1988 1989 //------------------------------------------------------------------------------------------------------------------------ 1990 // Helper for vtos entry point generation 1991 1992 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1993 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1994 Label L; 1995 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); 1996 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop(); 1997 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop(); 1998 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop(); 1999 iep = __ pc(); __ push_i(); 2000 bep = cep = sep = iep; // there aren't any 2001 vep = __ pc(); __ bind(L); // fall through 2002 generate_and_dispatch(t); 2003 } 2004 2005 // -------------------------------------------------------------------------------- 2006 2007 2008 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2009 : TemplateInterpreterGenerator(code) { 2010 generate_all(); // down here so it can be "virtual" 2011 } 2012 2013 // -------------------------------------------------------------------------------- 2014 2015 // Non-product code 2016 #ifndef PRODUCT 2017 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2018 address entry = __ pc(); 2019 2020 __ push(state); 2021 __ mov(O7, Lscratch); // protect return address within interpreter 2022 2023 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 2024 __ mov( Otos_l2, G3_scratch ); 2025 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 2026 __ mov(Lscratch, O7); // restore return address 2027 __ pop(state); 2028 __ retl(); 2029 __ delayed()->nop(); 2030 2031 return entry; 2032 } 2033 2034 2035 // helpers for generate_and_dispatch 2036 2037 void TemplateInterpreterGenerator::count_bytecode() { 2038 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 2039 } 2040 2041 2042 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2043 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 2044 } 2045 2046 2047 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2048 AddressLiteral index (&BytecodePairHistogram::_index); 2049 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 2050 2051 // get index, shift out old bytecode, bring in new bytecode, and store it 2052 // _index = (_index >> log2_number_of_codes) | 2053 // (bytecode << log2_number_of_codes); 2054 2055 __ load_contents(index, G4_scratch); 2056 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 2057 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 2058 __ or3( G3_scratch, G4_scratch, G4_scratch ); 2059 __ store_contents(G4_scratch, index, G3_scratch); 2060 2061 // bump bucket contents 2062 // _counters[_index] ++; 2063 2064 __ set(counters, G3_scratch); // loads into G3_scratch 2065 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 2066 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 2067 __ ld (G3_scratch, 0, G4_scratch); 2068 __ inc (G4_scratch); 2069 __ st (G4_scratch, 0, G3_scratch); 2070 } 2071 2072 2073 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2074 // Call a little run-time stub to avoid blow-up for each bytecode. 2075 // The run-time runtime saves the right registers, depending on 2076 // the tosca in-state for the given template. 2077 address entry = Interpreter::trace_code(t->tos_in()); 2078 guarantee(entry != NULL, "entry must have been generated"); 2079 __ call(entry, relocInfo::none); 2080 __ delayed()->nop(); 2081 } 2082 2083 2084 void TemplateInterpreterGenerator::stop_interpreter_at() { 2085 AddressLiteral counter(&BytecodeCounter::_counter_value); 2086 __ load_contents(counter, G3_scratch); 2087 AddressLiteral stop_at(&StopInterpreterAt); 2088 __ load_ptr_contents(stop_at, G4_scratch); 2089 __ cmp(G3_scratch, G4_scratch); 2090 __ breakpoint_trap(Assembler::equal); 2091 } 2092 #endif // not PRODUCT 2093 #endif // !CC_INTERP