1 /* 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_templateInterpreter_sparc.cpp.incl" 27 28 #ifndef CC_INTERP 29 #ifndef FAST_DISPATCH 30 #define FAST_DISPATCH 1 31 #endif 32 #undef FAST_DISPATCH 33 34 35 // Generation of Interpreter 36 // 37 // The InterpreterGenerator generates the interpreter into Interpreter::_code. 38 39 40 #define __ _masm-> 41 42 43 //---------------------------------------------------------------------------------------------------- 44 45 46 void InterpreterGenerator::save_native_result(void) { 47 // result potentially in O0/O1: save it across calls 48 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 49 50 // result potentially in F0/F1: save it across calls 51 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 52 53 // save and restore any potential method result value around the unlocking operation 54 __ stf(FloatRegisterImpl::D, F0, d_tmp); 55 #ifdef _LP64 56 __ stx(O0, l_tmp); 57 #else 58 __ std(O0, l_tmp); 59 #endif 60 } 61 62 void InterpreterGenerator::restore_native_result(void) { 63 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 64 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 65 66 // Restore any method result value 67 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 68 #ifdef _LP64 69 __ ldx(l_tmp, O0); 70 #else 71 __ ldd(l_tmp, O0); 72 #endif 73 } 74 75 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 76 assert(!pass_oop || message == NULL, "either oop or message but not both"); 77 address entry = __ pc(); 78 // expression stack must be empty before entering the VM if an exception happened 79 __ empty_expression_stack(); 80 // load exception object 81 __ set((intptr_t)name, G3_scratch); 82 if (pass_oop) { 83 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 84 } else { 85 __ set((intptr_t)message, G4_scratch); 86 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 87 } 88 // throw exception 89 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 90 AddressLiteral thrower(Interpreter::throw_exception_entry()); 91 __ jump_to(thrower, G3_scratch); 92 __ delayed()->nop(); 93 return entry; 94 } 95 96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 97 address entry = __ pc(); 98 // expression stack must be empty before entering the VM if an exception 99 // happened 100 __ empty_expression_stack(); 101 // load exception object 102 __ call_VM(Oexception, 103 CAST_FROM_FN_PTR(address, 104 InterpreterRuntime::throw_ClassCastException), 105 Otos_i); 106 __ should_not_reach_here(); 107 return entry; 108 } 109 110 111 // Arguments are: required type in G5_method_type, and 112 // failing object (or NULL) in G3_method_handle. 113 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { 114 address entry = __ pc(); 115 // expression stack must be empty before entering the VM if an exception 116 // happened 117 __ empty_expression_stack(); 118 // load exception object 119 __ call_VM(Oexception, 120 CAST_FROM_FN_PTR(address, 121 InterpreterRuntime::throw_WrongMethodTypeException), 122 G5_method_type, // required 123 G3_method_handle); // actual 124 __ should_not_reach_here(); 125 return entry; 126 } 127 128 129 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 130 address entry = __ pc(); 131 // expression stack must be empty before entering the VM if an exception happened 132 __ empty_expression_stack(); 133 // convention: expect aberrant index in register G3_scratch, then shuffle the 134 // index to G4_scratch for the VM call 135 __ mov(G3_scratch, G4_scratch); 136 __ set((intptr_t)name, G3_scratch); 137 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 138 __ should_not_reach_here(); 139 return entry; 140 } 141 142 143 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 144 address entry = __ pc(); 145 // expression stack must be empty before entering the VM if an exception happened 146 __ empty_expression_stack(); 147 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 148 __ should_not_reach_here(); 149 return entry; 150 } 151 152 153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 154 address compiled_entry = __ pc(); 155 Label cont; 156 157 address entry = __ pc(); 158 #if !defined(_LP64) && defined(COMPILER2) 159 // All return values are where we want them, except for Longs. C2 returns 160 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 161 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 162 // build even if we are returning from interpreted we just do a little 163 // stupid shuffing. 164 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 165 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 166 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 167 168 if( state == ltos ) { 169 __ srl (G1, 0,O1); 170 __ srlx(G1,32,O0); 171 } 172 #endif /* !_LP64 && COMPILER2 */ 173 174 175 __ bind(cont); 176 177 // The callee returns with the stack possibly adjusted by adapter transition 178 // We remove that possible adjustment here. 179 // All interpreter local registers are untouched. Any result is passed back 180 // in the O0/O1 or float registers. Before continuing, the arguments must be 181 // popped from the java expression stack; i.e., Lesp must be adjusted. 182 183 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 184 185 186 const Register cache = G3_scratch; 187 const Register size = G1_scratch; 188 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); 189 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + 190 ConstantPoolCacheEntry::flags_offset(), size); 191 __ and3(size, 0xFF, size); // argument size in words 192 __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes 193 __ add(Lesp, size, Lesp); // pop arguments 194 __ dispatch_next(state, step); 195 196 return entry; 197 } 198 199 200 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 201 address entry = __ pc(); 202 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 203 { Label L; 204 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 205 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 206 __ tst(Gtemp); 207 __ brx(Assembler::equal, false, Assembler::pt, L); 208 __ delayed()->nop(); 209 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 210 __ should_not_reach_here(); 211 __ bind(L); 212 } 213 __ dispatch_next(state, step); 214 return entry; 215 } 216 217 // A result handler converts/unboxes a native call result into 218 // a java interpreter/compiler result. The current frame is an 219 // interpreter frame. The activation frame unwind code must be 220 // consistent with that of TemplateTable::_return(...). In the 221 // case of native methods, the caller's SP was not modified. 222 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 223 address entry = __ pc(); 224 Register Itos_i = Otos_i ->after_save(); 225 Register Itos_l = Otos_l ->after_save(); 226 Register Itos_l1 = Otos_l1->after_save(); 227 Register Itos_l2 = Otos_l2->after_save(); 228 switch (type) { 229 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 230 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 231 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 232 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 233 case T_LONG : 234 #ifndef _LP64 235 __ mov(O1, Itos_l2); // move other half of long 236 #endif // ifdef or no ifdef, fall through to the T_INT case 237 case T_INT : __ mov(O0, Itos_i); break; 238 case T_VOID : /* nothing to do */ break; 239 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 240 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 241 case T_OBJECT : 242 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 243 __ verify_oop(Itos_i); 244 break; 245 default : ShouldNotReachHere(); 246 } 247 __ ret(); // return from interpreter activation 248 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 249 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly 250 return entry; 251 } 252 253 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 254 address entry = __ pc(); 255 __ push(state); 256 __ call_VM(noreg, runtime_entry); 257 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 258 return entry; 259 } 260 261 262 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 263 address entry = __ pc(); 264 __ dispatch_next(state); 265 return entry; 266 } 267 268 // 269 // Helpers for commoning out cases in the various type of method entries. 270 // 271 272 // increment invocation count & check for overflow 273 // 274 // Note: checking for negative value instead of overflow 275 // so we have a 'sticky' overflow test 276 // 277 // Lmethod: method 278 // ??: invocation counter 279 // 280 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 281 // Update standard invocation counters 282 __ increment_invocation_counter(O0, G3_scratch); 283 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 284 Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset()); 285 __ ld(interpreter_invocation_counter, G3_scratch); 286 __ inc(G3_scratch); 287 __ st(G3_scratch, interpreter_invocation_counter); 288 } 289 290 if (ProfileInterpreter && profile_method != NULL) { 291 // Test to see if we should create a method data oop 292 AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit); 293 __ sethi(profile_limit, G3_scratch); 294 __ ld(G3_scratch, profile_limit.low10(), G3_scratch); 295 __ cmp(O0, G3_scratch); 296 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); 297 __ delayed()->nop(); 298 299 // if no method data exists, go to profile_method 300 __ test_method_data_pointer(*profile_method); 301 } 302 303 AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit); 304 __ sethi(invocation_limit, G3_scratch); 305 __ ld(G3_scratch, invocation_limit.low10(), G3_scratch); 306 __ cmp(O0, G3_scratch); 307 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); 308 __ delayed()->nop(); 309 310 } 311 312 // Allocate monitor and lock method (asm interpreter) 313 // ebx - methodOop 314 // 315 void InterpreterGenerator::lock_method(void) { 316 __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags. 317 318 #ifdef ASSERT 319 { Label ok; 320 __ btst(JVM_ACC_SYNCHRONIZED, O0); 321 __ br( Assembler::notZero, false, Assembler::pt, ok); 322 __ delayed()->nop(); 323 __ stop("method doesn't need synchronization"); 324 __ bind(ok); 325 } 326 #endif // ASSERT 327 328 // get synchronization object to O0 329 { Label done; 330 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 331 __ btst(JVM_ACC_STATIC, O0); 332 __ br( Assembler::zero, true, Assembler::pt, done); 333 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 334 335 __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0); 336 __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0); 337 338 // lock the mirror, not the klassOop 339 __ ld_ptr( O0, mirror_offset, O0); 340 341 #ifdef ASSERT 342 __ tst(O0); 343 __ breakpoint_trap(Assembler::zero); 344 #endif // ASSERT 345 346 __ bind(done); 347 } 348 349 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 350 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 351 // __ untested("lock_object from method entry"); 352 __ lock_object(Lmonitors, O0); 353 } 354 355 356 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 357 Register Rscratch, 358 Register Rscratch2) { 359 const int page_size = os::vm_page_size(); 360 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset()); 361 Label after_frame_check; 362 363 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 364 365 __ set( page_size, Rscratch ); 366 __ cmp( Rframe_size, Rscratch ); 367 368 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check ); 369 __ delayed()->nop(); 370 371 // get the stack base, and in debug, verify it is non-zero 372 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); 373 #ifdef ASSERT 374 Label base_not_zero; 375 __ cmp( Rscratch, G0 ); 376 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero ); 377 __ delayed()->nop(); 378 __ stop("stack base is zero in generate_stack_overflow_check"); 379 __ bind(base_not_zero); 380 #endif 381 382 // get the stack size, and in debug, verify it is non-zero 383 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 384 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); 385 #ifdef ASSERT 386 Label size_not_zero; 387 __ cmp( Rscratch2, G0 ); 388 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero ); 389 __ delayed()->nop(); 390 __ stop("stack size is zero in generate_stack_overflow_check"); 391 __ bind(size_not_zero); 392 #endif 393 394 // compute the beginning of the protected zone minus the requested frame size 395 __ sub( Rscratch, Rscratch2, Rscratch ); 396 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 ); 397 __ add( Rscratch, Rscratch2, Rscratch ); 398 399 // Add in the size of the frame (which is the same as subtracting it from the 400 // SP, which would take another register 401 __ add( Rscratch, Rframe_size, Rscratch ); 402 403 // the frame is greater than one page in size, so check against 404 // the bottom of the stack 405 __ cmp( SP, Rscratch ); 406 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check ); 407 __ delayed()->nop(); 408 409 // Save the return address as the exception pc 410 __ st_ptr(O7, saved_exception_pc); 411 412 // the stack will overflow, throw an exception 413 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 414 415 // if you get to here, then there is enough stack space 416 __ bind( after_frame_check ); 417 } 418 419 420 // 421 // Generate a fixed interpreter frame. This is identical setup for interpreted 422 // methods and for native methods hence the shared code. 423 424 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 425 // 426 // 427 // The entry code sets up a new interpreter frame in 4 steps: 428 // 429 // 1) Increase caller's SP by for the extra local space needed: 430 // (check for overflow) 431 // Efficient implementation of xload/xstore bytecodes requires 432 // that arguments and non-argument locals are in a contigously 433 // addressable memory block => non-argument locals must be 434 // allocated in the caller's frame. 435 // 436 // 2) Create a new stack frame and register window: 437 // The new stack frame must provide space for the standard 438 // register save area, the maximum java expression stack size, 439 // the monitor slots (0 slots initially), and some frame local 440 // scratch locations. 441 // 442 // 3) The following interpreter activation registers must be setup: 443 // Lesp : expression stack pointer 444 // Lbcp : bytecode pointer 445 // Lmethod : method 446 // Llocals : locals pointer 447 // Lmonitors : monitor pointer 448 // LcpoolCache: constant pool cache 449 // 450 // 4) Initialize the non-argument locals if necessary: 451 // Non-argument locals may need to be initialized to NULL 452 // for GC to work. If the oop-map information is accurate 453 // (in the absence of the JSR problem), no initialization 454 // is necessary. 455 // 456 // (gri - 2/25/2000) 457 458 459 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset()); 460 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset()); 461 const Address max_stack (G5_method, methodOopDesc::max_stack_offset()); 462 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 463 464 const int extra_space = 465 rounded_vm_local_words + // frame local scratch space 466 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters 467 frame::memory_parameter_word_sp_offset + // register save area 468 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 469 470 const Register Glocals_size = G3; 471 const Register Otmp1 = O3; 472 const Register Otmp2 = O4; 473 // Lscratch can't be used as a temporary because the call_stub uses 474 // it to assert that the stack frame was setup correctly. 475 476 __ lduh( size_of_parameters, Glocals_size); 477 478 // Gargs points to first local + BytesPerWord 479 // Set the saved SP after the register window save 480 // 481 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 482 __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1); 483 __ add(Gargs, Otmp1, Gargs); 484 485 if (native_call) { 486 __ calc_mem_param_words( Glocals_size, Gframe_size ); 487 __ add( Gframe_size, extra_space, Gframe_size); 488 __ round_to( Gframe_size, WordsPerLong ); 489 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 490 } else { 491 492 // 493 // Compute number of locals in method apart from incoming parameters 494 // 495 __ lduh( size_of_locals, Otmp1 ); 496 __ sub( Otmp1, Glocals_size, Glocals_size ); 497 __ round_to( Glocals_size, WordsPerLong ); 498 __ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size ); 499 500 // see if the frame is greater than one page in size. If so, 501 // then we need to verify there is enough stack space remaining 502 // Frame_size = (max_stack + extra_space) * BytesPerWord; 503 __ lduh( max_stack, Gframe_size ); 504 __ add( Gframe_size, extra_space, Gframe_size ); 505 __ round_to( Gframe_size, WordsPerLong ); 506 __ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size); 507 508 // Add in java locals size for stack overflow check only 509 __ add( Gframe_size, Glocals_size, Gframe_size ); 510 511 const Register Otmp2 = O4; 512 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 513 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); 514 515 __ sub( Gframe_size, Glocals_size, Gframe_size); 516 517 // 518 // bump SP to accomodate the extra locals 519 // 520 __ sub( SP, Glocals_size, SP ); 521 } 522 523 // 524 // now set up a stack frame with the size computed above 525 // 526 __ neg( Gframe_size ); 527 __ save( SP, Gframe_size, SP ); 528 529 // 530 // now set up all the local cache registers 531 // 532 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 533 // that all present references to Lbyte_code initialize the register 534 // immediately before use 535 if (native_call) { 536 __ mov(G0, Lbcp); 537 } else { 538 __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp); 539 __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); 540 } 541 __ mov( G5_method, Lmethod); // set Lmethod 542 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 543 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 544 #ifdef _LP64 545 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias 546 #endif 547 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 548 549 // setup interpreter activation registers 550 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 551 552 if (ProfileInterpreter) { 553 #ifdef FAST_DISPATCH 554 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 555 // they both use I2. 556 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 557 #endif // FAST_DISPATCH 558 __ set_method_data_pointer(); 559 } 560 561 } 562 563 // Empty method, generate a very fast return. 564 565 address InterpreterGenerator::generate_empty_entry(void) { 566 567 // A method that does nother but return... 568 569 address entry = __ pc(); 570 Label slow_path; 571 572 __ verify_oop(G5_method); 573 574 // do nothing for empty methods (do not even increment invocation counter) 575 if ( UseFastEmptyMethods) { 576 // If we need a safepoint check, generate full interpreter entry. 577 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 578 __ set(sync_state, G3_scratch); 579 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 580 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 581 __ delayed()->nop(); 582 583 // Code: _return 584 __ retl(); 585 __ delayed()->mov(O5_savedSP, SP); 586 587 __ bind(slow_path); 588 (void) generate_normal_entry(false); 589 590 return entry; 591 } 592 return NULL; 593 } 594 595 // Call an accessor method (assuming it is resolved, otherwise drop into 596 // vanilla (slow path) entry 597 598 // Generates code to elide accessor methods 599 // Uses G3_scratch and G1_scratch as scratch 600 address InterpreterGenerator::generate_accessor_entry(void) { 601 602 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; 603 // parameter size = 1 604 // Note: We can only use this code if the getfield has been resolved 605 // and if we don't have a null-pointer exception => check for 606 // these conditions first and use slow path if necessary. 607 address entry = __ pc(); 608 Label slow_path; 609 610 611 // XXX: for compressed oops pointer loading and decoding doesn't fit in 612 // delay slot and damages G1 613 if ( UseFastAccessorMethods && !UseCompressedOops ) { 614 // Check if we need to reach a safepoint and generate full interpreter 615 // frame if so. 616 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 617 __ load_contents(sync_state, G3_scratch); 618 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 619 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 620 __ delayed()->nop(); 621 622 // Check if local 0 != NULL 623 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 624 __ tst(Otos_i); // check if local 0 == NULL and go the slow path 625 __ brx(Assembler::zero, false, Assembler::pn, slow_path); 626 __ delayed()->nop(); 627 628 629 // read first instruction word and extract bytecode @ 1 and index @ 2 630 // get first 4 bytes of the bytecodes (big endian!) 631 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch); 632 __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch); 633 634 // move index @ 2 far left then to the right most two bytes. 635 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); 636 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( 637 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); 638 639 // get constant pool cache 640 __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch); 641 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); 642 643 // get specific constant pool cache entry 644 __ add(G3_scratch, G1_scratch, G3_scratch); 645 646 // Check the constant Pool cache entry to see if it has been resolved. 647 // If not, need the slow path. 648 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); 649 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch); 650 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); 651 __ and3(G1_scratch, 0xFF, G1_scratch); 652 __ cmp(G1_scratch, Bytecodes::_getfield); 653 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 654 __ delayed()->nop(); 655 656 // Get the type and return field offset from the constant pool cache 657 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch); 658 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch); 659 660 Label xreturn_path; 661 // Need to differentiate between igetfield, agetfield, bgetfield etc. 662 // because they are different sizes. 663 // Get the type from the constant pool cache 664 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); 665 // Make sure we don't need to mask G1_scratch for tosBits after the above shift 666 ConstantPoolCacheEntry::verify_tosBits(); 667 __ cmp(G1_scratch, atos ); 668 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 669 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); 670 __ cmp(G1_scratch, itos); 671 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 672 __ delayed()->ld(Otos_i, G3_scratch, Otos_i); 673 __ cmp(G1_scratch, stos); 674 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 675 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i); 676 __ cmp(G1_scratch, ctos); 677 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 678 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i); 679 #ifdef ASSERT 680 __ cmp(G1_scratch, btos); 681 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); 682 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i); 683 __ should_not_reach_here(); 684 #endif 685 __ ldsb(Otos_i, G3_scratch, Otos_i); 686 __ bind(xreturn_path); 687 688 // _ireturn/_areturn 689 __ retl(); // return from leaf routine 690 __ delayed()->mov(O5_savedSP, SP); 691 692 // Generate regular method entry 693 __ bind(slow_path); 694 (void) generate_normal_entry(false); 695 return entry; 696 } 697 return NULL; 698 } 699 700 // 701 // Interpreter stub for calling a native method. (asm interpreter) 702 // This sets up a somewhat different looking stack for calling the native method 703 // than the typical interpreter frame setup. 704 // 705 706 address InterpreterGenerator::generate_native_entry(bool synchronized) { 707 address entry = __ pc(); 708 709 // the following temporary registers are used during frame creation 710 const Register Gtmp1 = G3_scratch ; 711 const Register Gtmp2 = G1_scratch; 712 bool inc_counter = UseCompiler || CountCompiledCalls; 713 714 // make sure registers are different! 715 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 716 717 const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset()); 718 719 __ verify_oop(G5_method); 720 721 const Register Glocals_size = G3; 722 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 723 724 // make sure method is native & not abstract 725 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 726 #ifdef ASSERT 727 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1); 728 { 729 Label L; 730 __ btst(JVM_ACC_NATIVE, Gtmp1); 731 __ br(Assembler::notZero, false, Assembler::pt, L); 732 __ delayed()->nop(); 733 __ stop("tried to execute non-native method as native"); 734 __ bind(L); 735 } 736 { Label L; 737 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 738 __ br(Assembler::zero, false, Assembler::pt, L); 739 __ delayed()->nop(); 740 __ stop("tried to execute abstract method as non-abstract"); 741 __ bind(L); 742 } 743 #endif // ASSERT 744 745 // generate the code to allocate the interpreter stack frame 746 generate_fixed_frame(true); 747 748 // 749 // No locals to initialize for native method 750 // 751 752 // this slot will be set later, we initialize it to null here just in 753 // case we get a GC before the actual value is stored later 754 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 755 756 const Address do_not_unlock_if_synchronized(G2_thread, 757 JavaThread::do_not_unlock_if_synchronized_offset()); 758 // Since at this point in the method invocation the exception handler 759 // would try to exit the monitor of synchronized methods which hasn't 760 // been entered yet, we set the thread local variable 761 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 762 // runtime, exception handling i.e. unlock_if_synchronized_method will 763 // check this thread local flag. 764 // This flag has two effects, one is to force an unwind in the topmost 765 // interpreter frame and not perform an unlock while doing so. 766 767 __ movbool(true, G3_scratch); 768 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 769 770 // increment invocation counter and check for overflow 771 // 772 // Note: checking for negative value instead of overflow 773 // so we have a 'sticky' overflow test (may be of 774 // importance as soon as we have true MT/MP) 775 Label invocation_counter_overflow; 776 Label Lcontinue; 777 if (inc_counter) { 778 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 779 780 } 781 __ bind(Lcontinue); 782 783 bang_stack_shadow_pages(true); 784 785 // reset the _do_not_unlock_if_synchronized flag 786 __ stbool(G0, do_not_unlock_if_synchronized); 787 788 // check for synchronized methods 789 // Must happen AFTER invocation_counter check and stack overflow check, 790 // so method is not locked if overflows. 791 792 if (synchronized) { 793 lock_method(); 794 } else { 795 #ifdef ASSERT 796 { Label ok; 797 __ ld(Laccess_flags, O0); 798 __ btst(JVM_ACC_SYNCHRONIZED, O0); 799 __ br( Assembler::zero, false, Assembler::pt, ok); 800 __ delayed()->nop(); 801 __ stop("method needs synchronization"); 802 __ bind(ok); 803 } 804 #endif // ASSERT 805 } 806 807 808 // start execution 809 __ verify_thread(); 810 811 // JVMTI support 812 __ notify_method_entry(); 813 814 // native call 815 816 // (note that O0 is never an oop--at most it is a handle) 817 // It is important not to smash any handles created by this call, 818 // until any oop handle in O0 is dereferenced. 819 820 // (note that the space for outgoing params is preallocated) 821 822 // get signature handler 823 { Label L; 824 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset()); 825 __ ld_ptr(signature_handler, G3_scratch); 826 __ tst(G3_scratch); 827 __ brx(Assembler::notZero, false, Assembler::pt, L); 828 __ delayed()->nop(); 829 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 830 __ ld_ptr(signature_handler, G3_scratch); 831 __ bind(L); 832 } 833 834 // Push a new frame so that the args will really be stored in 835 // Copy a few locals across so the new frame has the variables 836 // we need but these values will be dead at the jni call and 837 // therefore not gc volatile like the values in the current 838 // frame (Lmethod in particular) 839 840 // Flush the method pointer to the register save area 841 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 842 __ mov(Llocals, O1); 843 844 // calculate where the mirror handle body is allocated in the interpreter frame: 845 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 846 847 // Calculate current frame size 848 __ sub(SP, FP, O3); // Calculate negative of current frame size 849 __ save(SP, O3, SP); // Allocate an identical sized frame 850 851 // Note I7 has leftover trash. Slow signature handler will fill it in 852 // should we get there. Normal jni call will set reasonable last_Java_pc 853 // below (and fix I7 so the stack trace doesn't have a meaningless frame 854 // in it). 855 856 // Load interpreter frame's Lmethod into same register here 857 858 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 859 860 __ mov(I1, Llocals); 861 __ mov(I2, Lscratch2); // save the address of the mirror 862 863 864 // ONLY Lmethod and Llocals are valid here! 865 866 // call signature handler, It will move the arg properly since Llocals in current frame 867 // matches that in outer frame 868 869 __ callr(G3_scratch, 0); 870 __ delayed()->nop(); 871 872 // Result handler is in Lscratch 873 874 // Reload interpreter frame's Lmethod since slow signature handler may block 875 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 876 877 { Label not_static; 878 879 __ ld(Laccess_flags, O0); 880 __ btst(JVM_ACC_STATIC, O0); 881 __ br( Assembler::zero, false, Assembler::pt, not_static); 882 // get native function entry point(O0 is a good temp until the very end) 883 __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0); 884 // for static methods insert the mirror argument 885 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 886 887 __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1); 888 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); 889 __ ld_ptr(O1, mirror_offset, O1); 890 #ifdef ASSERT 891 if (!PrintSignatureHandlers) // do not dirty the output with this 892 { Label L; 893 __ tst(O1); 894 __ brx(Assembler::notZero, false, Assembler::pt, L); 895 __ delayed()->nop(); 896 __ stop("mirror is missing"); 897 __ bind(L); 898 } 899 #endif // ASSERT 900 __ st_ptr(O1, Lscratch2, 0); 901 __ mov(Lscratch2, O1); 902 __ bind(not_static); 903 } 904 905 // At this point, arguments have been copied off of stack into 906 // their JNI positions, which are O1..O5 and SP[68..]. 907 // Oops are boxed in-place on the stack, with handles copied to arguments. 908 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 909 910 #ifdef ASSERT 911 { Label L; 912 __ tst(O0); 913 __ brx(Assembler::notZero, false, Assembler::pt, L); 914 __ delayed()->nop(); 915 __ stop("native entry point is missing"); 916 __ bind(L); 917 } 918 #endif // ASSERT 919 920 // 921 // setup the frame anchor 922 // 923 // The scavenge function only needs to know that the PC of this frame is 924 // in the interpreter method entry code, it doesn't need to know the exact 925 // PC and hence we can use O7 which points to the return address from the 926 // previous call in the code stream (signature handler function) 927 // 928 // The other trick is we set last_Java_sp to FP instead of the usual SP because 929 // we have pushed the extra frame in order to protect the volatile register(s) 930 // in that frame when we return from the jni call 931 // 932 933 __ set_last_Java_frame(FP, O7); 934 __ mov(O7, I7); // make dummy interpreter frame look like one above, 935 // not meaningless information that'll confuse me. 936 937 // flush the windows now. We don't care about the current (protection) frame 938 // only the outer frames 939 940 __ flush_windows(); 941 942 // mark windows as flushed 943 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 944 __ set(JavaFrameAnchor::flushed, G3_scratch); 945 __ st(G3_scratch, flags); 946 947 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 948 949 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 950 #ifdef ASSERT 951 { Label L; 952 __ ld(thread_state, G3_scratch); 953 __ cmp(G3_scratch, _thread_in_Java); 954 __ br(Assembler::equal, false, Assembler::pt, L); 955 __ delayed()->nop(); 956 __ stop("Wrong thread state in native stub"); 957 __ bind(L); 958 } 959 #endif // ASSERT 960 __ set(_thread_in_native, G3_scratch); 961 __ st(G3_scratch, thread_state); 962 963 // Call the jni method, using the delay slot to set the JNIEnv* argument. 964 __ save_thread(L7_thread_cache); // save Gthread 965 __ callr(O0, 0); 966 __ delayed()-> 967 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 968 969 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 970 971 __ restore_thread(L7_thread_cache); // restore G2_thread 972 __ reinit_heapbase(); 973 974 // must we block? 975 976 // Block, if necessary, before resuming in _thread_in_Java state. 977 // In order for GC to work, don't clear the last_Java_sp until after blocking. 978 { Label no_block; 979 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 980 981 // Switch thread to "native transition" state before reading the synchronization state. 982 // This additional state is necessary because reading and testing the synchronization 983 // state is not atomic w.r.t. GC, as this scenario demonstrates: 984 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 985 // VM thread changes sync state to synchronizing and suspends threads for GC. 986 // Thread A is resumed to finish this native method, but doesn't block here since it 987 // didn't see any synchronization is progress, and escapes. 988 __ set(_thread_in_native_trans, G3_scratch); 989 __ st(G3_scratch, thread_state); 990 if(os::is_MP()) { 991 if (UseMembar) { 992 // Force this write out before the read below 993 __ membar(Assembler::StoreLoad); 994 } else { 995 // Write serialization page so VM thread can do a pseudo remote membar. 996 // We use the current thread pointer to calculate a thread specific 997 // offset to write to within the page. This minimizes bus traffic 998 // due to cache line collision. 999 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1000 } 1001 } 1002 __ load_contents(sync_state, G3_scratch); 1003 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1004 1005 Label L; 1006 __ br(Assembler::notEqual, false, Assembler::pn, L); 1007 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1008 __ cmp(G3_scratch, 0); 1009 __ br(Assembler::equal, false, Assembler::pt, no_block); 1010 __ delayed()->nop(); 1011 __ bind(L); 1012 1013 // Block. Save any potential method result value before the operation and 1014 // use a leaf call to leave the last_Java_frame setup undisturbed. 1015 save_native_result(); 1016 __ call_VM_leaf(L7_thread_cache, 1017 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1018 G2_thread); 1019 1020 // Restore any method result value 1021 restore_native_result(); 1022 __ bind(no_block); 1023 } 1024 1025 // Clear the frame anchor now 1026 1027 __ reset_last_Java_frame(); 1028 1029 // Move the result handler address 1030 __ mov(Lscratch, G3_scratch); 1031 // return possible result to the outer frame 1032 #ifndef __LP64 1033 __ mov(O0, I0); 1034 __ restore(O1, G0, O1); 1035 #else 1036 __ restore(O0, G0, O0); 1037 #endif /* __LP64 */ 1038 1039 // Move result handler to expected register 1040 __ mov(G3_scratch, Lscratch); 1041 1042 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1043 // switch to thread_in_Java. 1044 1045 __ set(_thread_in_Java, G3_scratch); 1046 __ st(G3_scratch, thread_state); 1047 1048 // reset handle block 1049 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1050 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1051 1052 // If we have an oop result store it where it will be safe for any further gc 1053 // until we return now that we've released the handle it might be protected by 1054 1055 { 1056 Label no_oop, store_result; 1057 1058 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1059 __ cmp(G3_scratch, Lscratch); 1060 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop); 1061 __ delayed()->nop(); 1062 __ addcc(G0, O0, O0); 1063 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: 1064 __ delayed()->ld_ptr(O0, 0, O0); // unbox it 1065 __ mov(G0, O0); 1066 1067 __ bind(store_result); 1068 // Store it where gc will look for it and result handler expects it. 1069 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1070 1071 __ bind(no_oop); 1072 1073 } 1074 1075 1076 // handle exceptions (exception handling will handle unlocking!) 1077 { Label L; 1078 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1079 __ ld_ptr(exception_addr, Gtemp); 1080 __ tst(Gtemp); 1081 __ brx(Assembler::equal, false, Assembler::pt, L); 1082 __ delayed()->nop(); 1083 // Note: This could be handled more efficiently since we know that the native 1084 // method doesn't have an exception handler. We could directly return 1085 // to the exception handler for the caller. 1086 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1087 __ should_not_reach_here(); 1088 __ bind(L); 1089 } 1090 1091 // JVMTI support (preserves thread register) 1092 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1093 1094 if (synchronized) { 1095 // save and restore any potential method result value around the unlocking operation 1096 save_native_result(); 1097 1098 __ add( __ top_most_monitor(), O1); 1099 __ unlock_object(O1); 1100 1101 restore_native_result(); 1102 } 1103 1104 #if defined(COMPILER2) && !defined(_LP64) 1105 1106 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1107 // or compiled so just be safe. 1108 1109 __ sllx(O0, 32, G1); // Shift bits into high G1 1110 __ srl (O1, 0, O1); // Zero extend O1 1111 __ or3 (O1, G1, G1); // OR 64 bits into G1 1112 1113 #endif /* COMPILER2 && !_LP64 */ 1114 1115 // dispose of return address and remove activation 1116 #ifdef ASSERT 1117 { 1118 Label ok; 1119 __ cmp(I5_savedSP, FP); 1120 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); 1121 __ delayed()->nop(); 1122 __ stop("bad I5_savedSP value"); 1123 __ should_not_reach_here(); 1124 __ bind(ok); 1125 } 1126 #endif 1127 if (TraceJumps) { 1128 // Move target to register that is recordable 1129 __ mov(Lscratch, G3_scratch); 1130 __ JMP(G3_scratch, 0); 1131 } else { 1132 __ jmp(Lscratch, 0); 1133 } 1134 __ delayed()->nop(); 1135 1136 1137 if (inc_counter) { 1138 // handle invocation counter overflow 1139 __ bind(invocation_counter_overflow); 1140 generate_counter_overflow(Lcontinue); 1141 } 1142 1143 1144 1145 return entry; 1146 } 1147 1148 1149 // Generic method entry to (asm) interpreter 1150 //------------------------------------------------------------------------------------------------------------------------ 1151 // 1152 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1153 address entry = __ pc(); 1154 1155 bool inc_counter = UseCompiler || CountCompiledCalls; 1156 1157 // the following temporary registers are used during frame creation 1158 const Register Gtmp1 = G3_scratch ; 1159 const Register Gtmp2 = G1_scratch; 1160 1161 // make sure registers are different! 1162 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1163 1164 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset()); 1165 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset()); 1166 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1167 // and use in the asserts. 1168 const Address access_flags (Lmethod, methodOopDesc::access_flags_offset()); 1169 1170 __ verify_oop(G5_method); 1171 1172 const Register Glocals_size = G3; 1173 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1174 1175 // make sure method is not native & not abstract 1176 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1177 #ifdef ASSERT 1178 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1); 1179 { 1180 Label L; 1181 __ btst(JVM_ACC_NATIVE, Gtmp1); 1182 __ br(Assembler::zero, false, Assembler::pt, L); 1183 __ delayed()->nop(); 1184 __ stop("tried to execute native method as non-native"); 1185 __ bind(L); 1186 } 1187 { Label L; 1188 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1189 __ br(Assembler::zero, false, Assembler::pt, L); 1190 __ delayed()->nop(); 1191 __ stop("tried to execute abstract method as non-abstract"); 1192 __ bind(L); 1193 } 1194 #endif // ASSERT 1195 1196 // generate the code to allocate the interpreter stack frame 1197 1198 generate_fixed_frame(false); 1199 1200 #ifdef FAST_DISPATCH 1201 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1202 // set bytecode dispatch table base 1203 #endif 1204 1205 // 1206 // Code to initialize the extra (i.e. non-parm) locals 1207 // 1208 Register init_value = noreg; // will be G0 if we must clear locals 1209 // The way the code was setup before zerolocals was always true for vanilla java entries. 1210 // It could only be false for the specialized entries like accessor or empty which have 1211 // no extra locals so the testing was a waste of time and the extra locals were always 1212 // initialized. We removed this extra complication to already over complicated code. 1213 1214 init_value = G0; 1215 Label clear_loop; 1216 1217 // NOTE: If you change the frame layout, this code will need to 1218 // be updated! 1219 __ lduh( size_of_locals, O2 ); 1220 __ lduh( size_of_parameters, O1 ); 1221 __ sll( O2, Interpreter::logStackElementSize(), O2); 1222 __ sll( O1, Interpreter::logStackElementSize(), O1 ); 1223 __ sub( Llocals, O2, O2 ); 1224 __ sub( Llocals, O1, O1 ); 1225 1226 __ bind( clear_loop ); 1227 __ inc( O2, wordSize ); 1228 1229 __ cmp( O2, O1 ); 1230 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1231 __ delayed()->st_ptr( init_value, O2, 0 ); 1232 1233 const Address do_not_unlock_if_synchronized(G2_thread, 1234 JavaThread::do_not_unlock_if_synchronized_offset()); 1235 // Since at this point in the method invocation the exception handler 1236 // would try to exit the monitor of synchronized methods which hasn't 1237 // been entered yet, we set the thread local variable 1238 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1239 // runtime, exception handling i.e. unlock_if_synchronized_method will 1240 // check this thread local flag. 1241 __ movbool(true, G3_scratch); 1242 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1243 1244 // increment invocation counter and check for overflow 1245 // 1246 // Note: checking for negative value instead of overflow 1247 // so we have a 'sticky' overflow test (may be of 1248 // importance as soon as we have true MT/MP) 1249 Label invocation_counter_overflow; 1250 Label profile_method; 1251 Label profile_method_continue; 1252 Label Lcontinue; 1253 if (inc_counter) { 1254 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1255 if (ProfileInterpreter) { 1256 __ bind(profile_method_continue); 1257 } 1258 } 1259 __ bind(Lcontinue); 1260 1261 bang_stack_shadow_pages(false); 1262 1263 // reset the _do_not_unlock_if_synchronized flag 1264 __ stbool(G0, do_not_unlock_if_synchronized); 1265 1266 // check for synchronized methods 1267 // Must happen AFTER invocation_counter check and stack overflow check, 1268 // so method is not locked if overflows. 1269 1270 if (synchronized) { 1271 lock_method(); 1272 } else { 1273 #ifdef ASSERT 1274 { Label ok; 1275 __ ld(access_flags, O0); 1276 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1277 __ br( Assembler::zero, false, Assembler::pt, ok); 1278 __ delayed()->nop(); 1279 __ stop("method needs synchronization"); 1280 __ bind(ok); 1281 } 1282 #endif // ASSERT 1283 } 1284 1285 // start execution 1286 1287 __ verify_thread(); 1288 1289 // jvmti support 1290 __ notify_method_entry(); 1291 1292 // start executing instructions 1293 __ dispatch_next(vtos); 1294 1295 1296 if (inc_counter) { 1297 if (ProfileInterpreter) { 1298 // We have decided to profile this method in the interpreter 1299 __ bind(profile_method); 1300 1301 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true); 1302 1303 #ifdef ASSERT 1304 __ tst(O0); 1305 __ breakpoint_trap(Assembler::notEqual); 1306 #endif 1307 1308 __ set_method_data_pointer(); 1309 1310 __ ba(false, profile_method_continue); 1311 __ delayed()->nop(); 1312 } 1313 1314 // handle invocation counter overflow 1315 __ bind(invocation_counter_overflow); 1316 generate_counter_overflow(Lcontinue); 1317 } 1318 1319 1320 return entry; 1321 } 1322 1323 1324 //---------------------------------------------------------------------------------------------------- 1325 // Entry points & stack frame layout 1326 // 1327 // Here we generate the various kind of entries into the interpreter. 1328 // The two main entry type are generic bytecode methods and native call method. 1329 // These both come in synchronized and non-synchronized versions but the 1330 // frame layout they create is very similar. The other method entry 1331 // types are really just special purpose entries that are really entry 1332 // and interpretation all in one. These are for trivial methods like 1333 // accessor, empty, or special math methods. 1334 // 1335 // When control flow reaches any of the entry types for the interpreter 1336 // the following holds -> 1337 // 1338 // C2 Calling Conventions: 1339 // 1340 // The entry code below assumes that the following registers are set 1341 // when coming in: 1342 // G5_method: holds the methodOop of the method to call 1343 // Lesp: points to the TOS of the callers expression stack 1344 // after having pushed all the parameters 1345 // 1346 // The entry code does the following to setup an interpreter frame 1347 // pop parameters from the callers stack by adjusting Lesp 1348 // set O0 to Lesp 1349 // compute X = (max_locals - num_parameters) 1350 // bump SP up by X to accomadate the extra locals 1351 // compute X = max_expression_stack 1352 // + vm_local_words 1353 // + 16 words of register save area 1354 // save frame doing a save sp, -X, sp growing towards lower addresses 1355 // set Lbcp, Lmethod, LcpoolCache 1356 // set Llocals to i0 1357 // set Lmonitors to FP - rounded_vm_local_words 1358 // set Lesp to Lmonitors - 4 1359 // 1360 // The frame has now been setup to do the rest of the entry code 1361 1362 // Try this optimization: Most method entries could live in a 1363 // "one size fits all" stack frame without all the dynamic size 1364 // calculations. It might be profitable to do all this calculation 1365 // statically and approximately for "small enough" methods. 1366 1367 //----------------------------------------------------------------------------------------------- 1368 1369 // C1 Calling conventions 1370 // 1371 // Upon method entry, the following registers are setup: 1372 // 1373 // g2 G2_thread: current thread 1374 // g5 G5_method: method to activate 1375 // g4 Gargs : pointer to last argument 1376 // 1377 // 1378 // Stack: 1379 // 1380 // +---------------+ <--- sp 1381 // | | 1382 // : reg save area : 1383 // | | 1384 // +---------------+ <--- sp + 0x40 1385 // | | 1386 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1387 // | | 1388 // +---------------+ <--- sp + 0x5c 1389 // | | 1390 // : free : 1391 // | | 1392 // +---------------+ <--- Gargs 1393 // | | 1394 // : arguments : 1395 // | | 1396 // +---------------+ 1397 // | | 1398 // 1399 // 1400 // 1401 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 1402 // 1403 // +---------------+ <--- sp 1404 // | | 1405 // : reg save area : 1406 // | | 1407 // +---------------+ <--- sp + 0x40 1408 // | | 1409 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1410 // | | 1411 // +---------------+ <--- sp + 0x5c 1412 // | | 1413 // : : 1414 // | | <--- Lesp 1415 // +---------------+ <--- Lmonitors (fp - 0x18) 1416 // | VM locals | 1417 // +---------------+ <--- fp 1418 // | | 1419 // : reg save area : 1420 // | | 1421 // +---------------+ <--- fp + 0x40 1422 // | | 1423 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 1424 // | | 1425 // +---------------+ <--- fp + 0x5c 1426 // | | 1427 // : free : 1428 // | | 1429 // +---------------+ 1430 // | | 1431 // : nonarg locals : 1432 // | | 1433 // +---------------+ 1434 // | | 1435 // : arguments : 1436 // | | <--- Llocals 1437 // +---------------+ <--- Gargs 1438 // | | 1439 1440 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { 1441 1442 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated 1443 // expression stack, the callee will have callee_extra_locals (so we can account for 1444 // frame extension) and monitor_size for monitors. Basically we need to calculate 1445 // this exactly like generate_fixed_frame/generate_compute_interpreter_state. 1446 // 1447 // 1448 // The big complicating thing here is that we must ensure that the stack stays properly 1449 // aligned. This would be even uglier if monitor size wasn't modulo what the stack 1450 // needs to be aligned for). We are given that the sp (fp) is already aligned by 1451 // the caller so we must ensure that it is properly aligned for our callee. 1452 // 1453 const int rounded_vm_local_words = 1454 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1455 // callee_locals and max_stack are counts, not the size in frame. 1456 const int locals_size = 1457 round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong); 1458 const int max_stack_words = max_stack * Interpreter::stackElementWords(); 1459 return (round_to((max_stack_words 1460 //6815692//+ methodOopDesc::extra_stack_words() 1461 + rounded_vm_local_words 1462 + frame::memory_parameter_word_sp_offset), WordsPerLong) 1463 // already rounded 1464 + locals_size + monitor_size); 1465 } 1466 1467 // How much stack a method top interpreter activation needs in words. 1468 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { 1469 1470 // See call_stub code 1471 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, 1472 WordsPerLong); // 7 + register save area 1473 1474 // Save space for one monitor to get into the interpreted method in case 1475 // the method is synchronized 1476 int monitor_size = method->is_synchronized() ? 1477 1*frame::interpreter_frame_monitor_size() : 0; 1478 return size_activation_helper(method->max_locals(), method->max_stack(), 1479 monitor_size) + call_stub_size; 1480 } 1481 1482 int AbstractInterpreter::layout_activation(methodOop method, 1483 int tempcount, 1484 int popframe_extra_args, 1485 int moncount, 1486 int callee_param_count, 1487 int callee_local_count, 1488 frame* caller, 1489 frame* interpreter_frame, 1490 bool is_top_frame) { 1491 // Note: This calculation must exactly parallel the frame setup 1492 // in InterpreterGenerator::generate_fixed_frame. 1493 // If f!=NULL, set up the following variables: 1494 // - Lmethod 1495 // - Llocals 1496 // - Lmonitors (to the indicated number of monitors) 1497 // - Lesp (to the indicated number of temps) 1498 // The frame f (if not NULL) on entry is a description of the caller of the frame 1499 // we are about to layout. We are guaranteed that we will be able to fill in a 1500 // new interpreter frame as its callee (i.e. the stack space is allocated and 1501 // the amount was determined by an earlier call to this method with f == NULL). 1502 // On return f (if not NULL) while describe the interpreter frame we just layed out. 1503 1504 int monitor_size = moncount * frame::interpreter_frame_monitor_size(); 1505 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1506 1507 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); 1508 // 1509 // Note: if you look closely this appears to be doing something much different 1510 // than generate_fixed_frame. What is happening is this. On sparc we have to do 1511 // this dance with interpreter_sp_adjustment because the window save area would 1512 // appear just below the bottom (tos) of the caller's java expression stack. Because 1513 // the interpreter want to have the locals completely contiguous generate_fixed_frame 1514 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size). 1515 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee. 1516 // In this code the opposite occurs the caller adjusts it's own stack base on the callee. 1517 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest) 1518 // because the oldest frame would have adjust its callers frame and yet that frame 1519 // already exists and isn't part of this array of frames we are unpacking. So at first 1520 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() 1521 // will after it calculates all of the frame's on_stack_size()'s will then figure out the 1522 // amount to adjust the caller of the initial (oldest) frame and the calculation will all 1523 // add up. It does seem like it simpler to account for the adjustment here (and remove the 1524 // callee... parameters here). However this would mean that this routine would have to take 1525 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) 1526 // and run the calling loop in the reverse order. This would also would appear to mean making 1527 // this code aware of what the interactions are when that initial caller fram was an osr or 1528 // other adapter frame. deoptimization is complicated enough and hard enough to debug that 1529 // there is no sense in messing working code. 1530 // 1531 1532 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong); 1533 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align"); 1534 1535 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(), 1536 monitor_size); 1537 1538 if (interpreter_frame != NULL) { 1539 // The skeleton frame must already look like an interpreter frame 1540 // even if not fully filled out. 1541 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame"); 1542 1543 intptr_t* fp = interpreter_frame->fp(); 1544 1545 JavaThread* thread = JavaThread::current(); 1546 RegisterMap map(thread, false); 1547 // More verification that skeleton frame is properly walkable 1548 assert(fp == caller->sp(), "fp must match"); 1549 1550 intptr_t* montop = fp - rounded_vm_local_words; 1551 1552 // preallocate monitors (cf. __ add_monitor_to_stack) 1553 intptr_t* monitors = montop - monitor_size; 1554 1555 // preallocate stack space 1556 intptr_t* esp = monitors - 1 - 1557 (tempcount * Interpreter::stackElementWords()) - 1558 popframe_extra_args; 1559 1560 int local_words = method->max_locals() * Interpreter::stackElementWords(); 1561 int parm_words = method->size_of_parameters() * Interpreter::stackElementWords(); 1562 NEEDS_CLEANUP; 1563 intptr_t* locals; 1564 if (caller->is_interpreted_frame()) { 1565 // Can force the locals area to end up properly overlapping the top of the expression stack. 1566 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; 1567 // Note that this computation means we replace size_of_parameters() values from the caller 1568 // interpreter frame's expression stack with our argument locals 1569 locals = Lesp_ptr + parm_words; 1570 int delta = local_words - parm_words; 1571 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; 1572 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; 1573 } else { 1574 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); 1575 // Don't have Lesp available; lay out locals block in the caller 1576 // adjacent to the register window save area. 1577 // 1578 // Compiled frames do not allocate a varargs area which is why this if 1579 // statement is needed. 1580 // 1581 if (caller->is_compiled_frame()) { 1582 locals = fp + frame::register_save_words + local_words - 1; 1583 } else { 1584 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; 1585 } 1586 if (!caller->is_entry_frame()) { 1587 // Caller wants his own SP back 1588 int caller_frame_size = caller->cb()->frame_size(); 1589 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; 1590 } 1591 } 1592 if (TraceDeoptimization) { 1593 if (caller->is_entry_frame()) { 1594 // make sure I5_savedSP and the entry frames notion of saved SP 1595 // agree. This assertion duplicate a check in entry frame code 1596 // but catches the failure earlier. 1597 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP), 1598 "would change callers SP"); 1599 } 1600 if (caller->is_entry_frame()) { 1601 tty->print("entry "); 1602 } 1603 if (caller->is_compiled_frame()) { 1604 tty->print("compiled "); 1605 if (caller->is_deoptimized_frame()) { 1606 tty->print("(deopt) "); 1607 } 1608 } 1609 if (caller->is_interpreted_frame()) { 1610 tty->print("interpreted "); 1611 } 1612 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp()); 1613 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16); 1614 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16); 1615 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp()); 1616 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16); 1617 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16); 1618 tty->print_cr("Llocals = 0x%x", locals); 1619 tty->print_cr("Lesp = 0x%x", esp); 1620 tty->print_cr("Lmonitors = 0x%x", monitors); 1621 } 1622 1623 if (method->max_locals() > 0) { 1624 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area"); 1625 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area"); 1626 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); 1627 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); 1628 } 1629 #ifdef _LP64 1630 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); 1631 #endif 1632 1633 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method; 1634 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals; 1635 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors; 1636 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp; 1637 // Llast_SP will be same as SP as there is no adapter space 1638 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS; 1639 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); 1640 #ifdef FAST_DISPATCH 1641 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table(); 1642 #endif 1643 1644 1645 #ifdef ASSERT 1646 BasicObjectLock* mp = (BasicObjectLock*)monitors; 1647 1648 assert(interpreter_frame->interpreter_frame_method() == method, "method matches"); 1649 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match"); 1650 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches"); 1651 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches"); 1652 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches"); 1653 1654 // check bounds 1655 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1); 1656 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words; 1657 assert(lo < monitors && montop <= hi, "monitors in bounds"); 1658 assert(lo <= esp && esp < monitors, "esp in bounds"); 1659 #endif // ASSERT 1660 } 1661 1662 return raw_frame_size; 1663 } 1664 1665 //---------------------------------------------------------------------------------------------------- 1666 // Exceptions 1667 void TemplateInterpreterGenerator::generate_throw_exception() { 1668 1669 // Entry point in previous activation (i.e., if the caller was interpreted) 1670 Interpreter::_rethrow_exception_entry = __ pc(); 1671 // O0: exception 1672 1673 // entry point for exceptions thrown within interpreter code 1674 Interpreter::_throw_exception_entry = __ pc(); 1675 __ verify_thread(); 1676 // expression stack is undefined here 1677 // O0: exception, i.e. Oexception 1678 // Lbcp: exception bcx 1679 __ verify_oop(Oexception); 1680 1681 1682 // expression stack must be empty before entering the VM in case of an exception 1683 __ empty_expression_stack(); 1684 // find exception handler address and preserve exception oop 1685 // call C routine to find handler and jump to it 1686 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1687 __ push_ptr(O1); // push exception for exception handler bytecodes 1688 1689 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1690 __ delayed()->nop(); 1691 1692 1693 // if the exception is not handled in the current frame 1694 // the frame is removed and the exception is rethrown 1695 // (i.e. exception continuation is _rethrow_exception) 1696 // 1697 // Note: At this point the bci is still the bxi for the instruction which caused 1698 // the exception and the expression stack is empty. Thus, for any VM calls 1699 // at this point, GC will find a legal oop map (with empty expression stack). 1700 1701 // in current activation 1702 // tos: exception 1703 // Lbcp: exception bcp 1704 1705 // 1706 // JVMTI PopFrame support 1707 // 1708 1709 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1710 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1711 // Set the popframe_processing bit in popframe_condition indicating that we are 1712 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1713 // popframe handling cycles. 1714 1715 __ ld(popframe_condition_addr, G3_scratch); 1716 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1717 __ stw(G3_scratch, popframe_condition_addr); 1718 1719 // Empty the expression stack, as in normal exception handling 1720 __ empty_expression_stack(); 1721 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1722 1723 { 1724 // Check to see whether we are returning to a deoptimized frame. 1725 // (The PopFrame call ensures that the caller of the popped frame is 1726 // either interpreted or compiled and deoptimizes it if compiled.) 1727 // In this case, we can't call dispatch_next() after the frame is 1728 // popped, but instead must save the incoming arguments and restore 1729 // them after deoptimization has occurred. 1730 // 1731 // Note that we don't compare the return PC against the 1732 // deoptimization blob's unpack entry because of the presence of 1733 // adapter frames in C2. 1734 Label caller_not_deoptimized; 1735 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1736 __ tst(O0); 1737 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized); 1738 __ delayed()->nop(); 1739 1740 const Register Gtmp1 = G3_scratch; 1741 const Register Gtmp2 = G1_scratch; 1742 1743 // Compute size of arguments for saving when returning to deoptimized caller 1744 __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1); 1745 __ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1); 1746 __ sub(Llocals, Gtmp1, Gtmp2); 1747 __ add(Gtmp2, wordSize, Gtmp2); 1748 // Save these arguments 1749 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1750 // Inform deoptimization that it is responsible for restoring these arguments 1751 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1752 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1753 __ st(Gtmp1, popframe_condition_addr); 1754 1755 // Return from the current method 1756 // The caller's SP was adjusted upon method entry to accomodate 1757 // the callee's non-argument locals. Undo that adjustment. 1758 __ ret(); 1759 __ delayed()->restore(I5_savedSP, G0, SP); 1760 1761 __ bind(caller_not_deoptimized); 1762 } 1763 1764 // Clear the popframe condition flag 1765 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1766 1767 // Get out of the current method (how this is done depends on the particular compiler calling 1768 // convention that the interpreter currently follows) 1769 // The caller's SP was adjusted upon method entry to accomodate 1770 // the callee's non-argument locals. Undo that adjustment. 1771 __ restore(I5_savedSP, G0, SP); 1772 // The method data pointer was incremented already during 1773 // call profiling. We have to restore the mdp for the current bcp. 1774 if (ProfileInterpreter) { 1775 __ set_method_data_pointer_for_bcp(); 1776 } 1777 // Resume bytecode interpretation at the current bcp 1778 __ dispatch_next(vtos); 1779 // end of JVMTI PopFrame support 1780 1781 Interpreter::_remove_activation_entry = __ pc(); 1782 1783 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1784 __ pop_ptr(Oexception); // get exception 1785 1786 // Intel has the following comment: 1787 //// remove the activation (without doing throws on illegalMonitorExceptions) 1788 // They remove the activation without checking for bad monitor state. 1789 // %%% We should make sure this is the right semantics before implementing. 1790 1791 // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here? 1792 __ set_vm_result(Oexception); 1793 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1794 1795 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1796 1797 __ get_vm_result(Oexception); 1798 __ verify_oop(Oexception); 1799 1800 const int return_reg_adjustment = frame::pc_return_offset; 1801 Address issuing_pc_addr(I7, return_reg_adjustment); 1802 1803 // We are done with this activation frame; find out where to go next. 1804 // The continuation point will be an exception handler, which expects 1805 // the following registers set up: 1806 // 1807 // Oexception: exception 1808 // Oissuing_pc: the local call that threw exception 1809 // Other On: garbage 1810 // In/Ln: the contents of the caller's register window 1811 // 1812 // We do the required restore at the last possible moment, because we 1813 // need to preserve some state across a runtime call. 1814 // (Remember that the caller activation is unknown--it might not be 1815 // interpreted, so things like Lscratch are useless in the caller.) 1816 1817 // Although the Intel version uses call_C, we can use the more 1818 // compact call_VM. (The only real difference on SPARC is a 1819 // harmlessly ignored [re]set_last_Java_frame, compared with 1820 // the Intel code which lacks this.) 1821 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1822 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1823 __ super_call_VM_leaf(L7_thread_cache, 1824 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1825 Oissuing_pc->after_save()); 1826 1827 // The caller's SP was adjusted upon method entry to accomodate 1828 // the callee's non-argument locals. Undo that adjustment. 1829 __ JMP(O0, 0); // return exception handler in caller 1830 __ delayed()->restore(I5_savedSP, G0, SP); 1831 1832 // (same old exception object is already in Oexception; see above) 1833 // Note that an "issuing PC" is actually the next PC after the call 1834 } 1835 1836 1837 // 1838 // JVMTI ForceEarlyReturn support 1839 // 1840 1841 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1842 address entry = __ pc(); 1843 1844 __ empty_expression_stack(); 1845 __ load_earlyret_value(state); 1846 1847 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1848 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1849 1850 // Clear the earlyret state 1851 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1852 1853 __ remove_activation(state, 1854 /* throw_monitor_exception */ false, 1855 /* install_monitor_exception */ false); 1856 1857 // The caller's SP was adjusted upon method entry to accomodate 1858 // the callee's non-argument locals. Undo that adjustment. 1859 __ ret(); // return to caller 1860 __ delayed()->restore(I5_savedSP, G0, SP); 1861 1862 return entry; 1863 } // end of JVMTI ForceEarlyReturn support 1864 1865 1866 //------------------------------------------------------------------------------------------------------------------------ 1867 // Helper for vtos entry point generation 1868 1869 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1870 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1871 Label L; 1872 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); 1873 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop(); 1874 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop(); 1875 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop(); 1876 iep = __ pc(); __ push_i(); 1877 bep = cep = sep = iep; // there aren't any 1878 vep = __ pc(); __ bind(L); // fall through 1879 generate_and_dispatch(t); 1880 } 1881 1882 // -------------------------------------------------------------------------------- 1883 1884 1885 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1886 : TemplateInterpreterGenerator(code) { 1887 generate_all(); // down here so it can be "virtual" 1888 } 1889 1890 // -------------------------------------------------------------------------------- 1891 1892 // Non-product code 1893 #ifndef PRODUCT 1894 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1895 address entry = __ pc(); 1896 1897 __ push(state); 1898 __ mov(O7, Lscratch); // protect return address within interpreter 1899 1900 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 1901 __ mov( Otos_l2, G3_scratch ); 1902 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 1903 __ mov(Lscratch, O7); // restore return address 1904 __ pop(state); 1905 __ retl(); 1906 __ delayed()->nop(); 1907 1908 return entry; 1909 } 1910 1911 1912 // helpers for generate_and_dispatch 1913 1914 void TemplateInterpreterGenerator::count_bytecode() { 1915 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 1916 } 1917 1918 1919 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1920 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 1921 } 1922 1923 1924 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1925 AddressLiteral index (&BytecodePairHistogram::_index); 1926 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 1927 1928 // get index, shift out old bytecode, bring in new bytecode, and store it 1929 // _index = (_index >> log2_number_of_codes) | 1930 // (bytecode << log2_number_of_codes); 1931 1932 __ load_contents(index, G4_scratch); 1933 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 1934 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 1935 __ or3( G3_scratch, G4_scratch, G4_scratch ); 1936 __ store_contents(G4_scratch, index, G3_scratch); 1937 1938 // bump bucket contents 1939 // _counters[_index] ++; 1940 1941 __ set(counters, G3_scratch); // loads into G3_scratch 1942 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 1943 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 1944 __ ld (G3_scratch, 0, G4_scratch); 1945 __ inc (G4_scratch); 1946 __ st (G4_scratch, 0, G3_scratch); 1947 } 1948 1949 1950 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1951 // Call a little run-time stub to avoid blow-up for each bytecode. 1952 // The run-time runtime saves the right registers, depending on 1953 // the tosca in-state for the given template. 1954 address entry = Interpreter::trace_code(t->tos_in()); 1955 guarantee(entry != NULL, "entry must have been generated"); 1956 __ call(entry, relocInfo::none); 1957 __ delayed()->nop(); 1958 } 1959 1960 1961 void TemplateInterpreterGenerator::stop_interpreter_at() { 1962 AddressLiteral counter(&BytecodeCounter::_counter_value); 1963 __ load_contents(counter, G3_scratch); 1964 AddressLiteral stop_at(&StopInterpreterAt); 1965 __ load_ptr_contents(stop_at, G4_scratch); 1966 __ cmp(G3_scratch, G4_scratch); 1967 __ breakpoint_trap(Assembler::equal); 1968 } 1969 #endif // not PRODUCT 1970 #endif // !CC_INTERP