1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #ifndef CC_INTERP 51 #ifndef FAST_DISPATCH 52 #define FAST_DISPATCH 1 53 #endif 54 #undef FAST_DISPATCH 55 56 57 // Generation of Interpreter 58 // 59 // The InterpreterGenerator generates the interpreter into Interpreter::_code. 60 61 62 #define __ _masm-> 63 64 65 //---------------------------------------------------------------------------------------------------- 66 67 68 void InterpreterGenerator::save_native_result(void) { 69 // result potentially in O0/O1: save it across calls 70 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 71 72 // result potentially in F0/F1: save it across calls 73 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 74 75 // save and restore any potential method result value around the unlocking operation 76 __ stf(FloatRegisterImpl::D, F0, d_tmp); 77 #ifdef _LP64 78 __ stx(O0, l_tmp); 79 #else 80 __ std(O0, l_tmp); 81 #endif 82 } 83 84 void InterpreterGenerator::restore_native_result(void) { 85 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 86 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 87 88 // Restore any method result value 89 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 90 #ifdef _LP64 91 __ ldx(l_tmp, O0); 92 #else 93 __ ldd(l_tmp, O0); 94 #endif 95 } 96 97 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 98 assert(!pass_oop || message == NULL, "either oop or message but not both"); 99 address entry = __ pc(); 100 // expression stack must be empty before entering the VM if an exception happened 101 __ empty_expression_stack(); 102 // load exception object 103 __ set((intptr_t)name, G3_scratch); 104 if (pass_oop) { 105 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 106 } else { 107 __ set((intptr_t)message, G4_scratch); 108 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 109 } 110 // throw exception 111 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 112 AddressLiteral thrower(Interpreter::throw_exception_entry()); 113 __ jump_to(thrower, G3_scratch); 114 __ delayed()->nop(); 115 return entry; 116 } 117 118 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 119 address entry = __ pc(); 120 // expression stack must be empty before entering the VM if an exception 121 // happened 122 __ empty_expression_stack(); 123 // load exception object 124 __ call_VM(Oexception, 125 CAST_FROM_FN_PTR(address, 126 InterpreterRuntime::throw_ClassCastException), 127 Otos_i); 128 __ should_not_reach_here(); 129 return entry; 130 } 131 132 133 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 134 address entry = __ pc(); 135 // expression stack must be empty before entering the VM if an exception happened 136 __ empty_expression_stack(); 137 // convention: expect aberrant index in register G3_scratch, then shuffle the 138 // index to G4_scratch for the VM call 139 __ mov(G3_scratch, G4_scratch); 140 __ set((intptr_t)name, G3_scratch); 141 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 142 __ should_not_reach_here(); 143 return entry; 144 } 145 146 147 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 148 address entry = __ pc(); 149 // expression stack must be empty before entering the VM if an exception happened 150 __ empty_expression_stack(); 151 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 152 __ should_not_reach_here(); 153 return entry; 154 } 155 156 157 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 158 address entry = __ pc(); 159 160 if (state == atos) { 161 __ profile_return_type(O0, G3_scratch, G1_scratch); 162 } 163 164 #if !defined(_LP64) && defined(COMPILER2) 165 // All return values are where we want them, except for Longs. C2 returns 166 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 167 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 168 // build even if we are returning from interpreted we just do a little 169 // stupid shuffing. 170 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 171 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 172 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 173 174 if (state == ltos) { 175 __ srl (G1, 0, O1); 176 __ srlx(G1, 32, O0); 177 } 178 #endif // !_LP64 && COMPILER2 179 180 // The callee returns with the stack possibly adjusted by adapter transition 181 // We remove that possible adjustment here. 182 // All interpreter local registers are untouched. Any result is passed back 183 // in the O0/O1 or float registers. Before continuing, the arguments must be 184 // popped from the java expression stack; i.e., Lesp must be adjusted. 185 186 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 187 188 const Register cache = G3_scratch; 189 const Register index = G1_scratch; 190 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 191 192 const Register flags = cache; 193 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); 194 const Register parameter_size = flags; 195 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words 196 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes 197 __ add(Lesp, parameter_size, Lesp); // pop arguments 198 __ dispatch_next(state, step); 199 200 return entry; 201 } 202 203 204 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 205 address entry = __ pc(); 206 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 207 { Label L; 208 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 209 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 210 __ br_null_short(Gtemp, Assembler::pt, L); 211 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 212 __ should_not_reach_here(); 213 __ bind(L); 214 } 215 __ dispatch_next(state, step); 216 return entry; 217 } 218 219 // A result handler converts/unboxes a native call result into 220 // a java interpreter/compiler result. The current frame is an 221 // interpreter frame. The activation frame unwind code must be 222 // consistent with that of TemplateTable::_return(...). In the 223 // case of native methods, the caller's SP was not modified. 224 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 225 address entry = __ pc(); 226 Register Itos_i = Otos_i ->after_save(); 227 Register Itos_l = Otos_l ->after_save(); 228 Register Itos_l1 = Otos_l1->after_save(); 229 Register Itos_l2 = Otos_l2->after_save(); 230 switch (type) { 231 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 232 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 233 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 234 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 235 case T_LONG : 236 #ifndef _LP64 237 __ mov(O1, Itos_l2); // move other half of long 238 #endif // ifdef or no ifdef, fall through to the T_INT case 239 case T_INT : __ mov(O0, Itos_i); break; 240 case T_VOID : /* nothing to do */ break; 241 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 242 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 243 case T_OBJECT : 244 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 245 __ verify_oop(Itos_i); 246 break; 247 default : ShouldNotReachHere(); 248 } 249 __ ret(); // return from interpreter activation 250 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 251 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly 252 return entry; 253 } 254 255 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 256 address entry = __ pc(); 257 __ push(state); 258 __ call_VM(noreg, runtime_entry); 259 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 260 return entry; 261 } 262 263 264 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 265 address entry = __ pc(); 266 __ dispatch_next(state); 267 return entry; 268 } 269 270 // 271 // Helpers for commoning out cases in the various type of method entries. 272 // 273 274 // increment invocation count & check for overflow 275 // 276 // Note: checking for negative value instead of overflow 277 // so we have a 'sticky' overflow test 278 // 279 // Lmethod: method 280 // ??: invocation counter 281 // 282 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 283 // Note: In tiered we increment either counters in MethodCounters* or in 284 // MDO depending if we're profiling or not. 285 const Register G3_method_counters = G3_scratch; 286 Label done; 287 288 if (TieredCompilation) { 289 const int increment = InvocationCounter::count_increment; 290 Label no_mdo; 291 if (ProfileInterpreter) { 292 // If no method data exists, go to profile_continue. 293 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 294 __ br_null_short(G4_scratch, Assembler::pn, no_mdo); 295 // Increment counter 296 Address mdo_invocation_counter(G4_scratch, 297 in_bytes(MethodData::invocation_counter_offset()) + 298 in_bytes(InvocationCounter::counter_offset())); 299 Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset())); 300 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 301 G3_scratch, Lscratch, 302 Assembler::zero, overflow); 303 __ ba_short(done); 304 } 305 306 // Increment counter in MethodCounters* 307 __ bind(no_mdo); 308 Address invocation_counter(G3_method_counters, 309 in_bytes(MethodCounters::invocation_counter_offset()) + 310 in_bytes(InvocationCounter::counter_offset())); 311 __ get_method_counters(Lmethod, G3_method_counters, done); 312 Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset())); 313 __ increment_mask_and_jump(invocation_counter, increment, mask, 314 G4_scratch, Lscratch, 315 Assembler::zero, overflow); 316 __ bind(done); 317 } else { // not TieredCompilation 318 // Update standard invocation counters 319 __ get_method_counters(Lmethod, G3_method_counters, done); 320 __ increment_invocation_counter(G3_method_counters, O0, G4_scratch); 321 if (ProfileInterpreter) { 322 Address interpreter_invocation_counter(G3_method_counters, 323 in_bytes(MethodCounters::interpreter_invocation_counter_offset())); 324 __ ld(interpreter_invocation_counter, G4_scratch); 325 __ inc(G4_scratch); 326 __ st(G4_scratch, interpreter_invocation_counter); 327 } 328 329 if (ProfileInterpreter && profile_method != NULL) { 330 // Test to see if we should create a method data oop 331 Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 332 __ ld(profile_limit, G1_scratch); 333 __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); 334 335 // if no method data exists, go to profile_method 336 __ test_method_data_pointer(*profile_method); 337 } 338 339 Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 340 __ ld(invocation_limit, G3_scratch); 341 __ cmp(O0, G3_scratch); 342 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance 343 __ delayed()->nop(); 344 __ bind(done); 345 } 346 347 } 348 349 // Allocate monitor and lock method (asm interpreter) 350 // ebx - Method* 351 // 352 void InterpreterGenerator::lock_method(void) { 353 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags. 354 355 #ifdef ASSERT 356 { Label ok; 357 __ btst(JVM_ACC_SYNCHRONIZED, O0); 358 __ br( Assembler::notZero, false, Assembler::pt, ok); 359 __ delayed()->nop(); 360 __ stop("method doesn't need synchronization"); 361 __ bind(ok); 362 } 363 #endif // ASSERT 364 365 // get synchronization object to O0 366 { Label done; 367 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 368 __ btst(JVM_ACC_STATIC, O0); 369 __ br( Assembler::zero, true, Assembler::pt, done); 370 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 371 372 __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0); 373 __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0); 374 __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0); 375 376 // lock the mirror, not the Klass* 377 __ ld_ptr( O0, mirror_offset, O0); 378 379 #ifdef ASSERT 380 __ tst(O0); 381 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 382 #endif // ASSERT 383 384 __ bind(done); 385 } 386 387 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 388 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 389 // __ untested("lock_object from method entry"); 390 __ lock_object(Lmonitors, O0); 391 } 392 393 394 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 395 Register Rscratch, 396 Register Rscratch2) { 397 const int page_size = os::vm_page_size(); 398 Label after_frame_check; 399 400 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 401 402 __ set(page_size, Rscratch); 403 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); 404 405 // get the stack base, and in debug, verify it is non-zero 406 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); 407 #ifdef ASSERT 408 Label base_not_zero; 409 __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero); 410 __ stop("stack base is zero in generate_stack_overflow_check"); 411 __ bind(base_not_zero); 412 #endif 413 414 // get the stack size, and in debug, verify it is non-zero 415 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 416 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); 417 #ifdef ASSERT 418 Label size_not_zero; 419 __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero); 420 __ stop("stack size is zero in generate_stack_overflow_check"); 421 __ bind(size_not_zero); 422 #endif 423 424 // compute the beginning of the protected zone minus the requested frame size 425 __ sub( Rscratch, Rscratch2, Rscratch ); 426 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 ); 427 __ add( Rscratch, Rscratch2, Rscratch ); 428 429 // Add in the size of the frame (which is the same as subtracting it from the 430 // SP, which would take another register 431 __ add( Rscratch, Rframe_size, Rscratch ); 432 433 // the frame is greater than one page in size, so check against 434 // the bottom of the stack 435 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check); 436 437 // the stack will overflow, throw an exception 438 439 // Note that SP is restored to sender's sp (in the delay slot). This 440 // is necessary if the sender's frame is an extended compiled frame 441 // (see gen_c2i_adapter()) and safer anyway in case of JSR292 442 // adaptations. 443 444 // Note also that the restored frame is not necessarily interpreted. 445 // Use the shared runtime version of the StackOverflowError. 446 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 447 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); 448 __ jump_to(stub, Rscratch); 449 __ delayed()->mov(O5_savedSP, SP); 450 451 // if you get to here, then there is enough stack space 452 __ bind( after_frame_check ); 453 } 454 455 456 // 457 // Generate a fixed interpreter frame. This is identical setup for interpreted 458 // methods and for native methods hence the shared code. 459 460 461 //---------------------------------------------------------------------------------------------------- 462 // Stack frame layout 463 // 464 // When control flow reaches any of the entry types for the interpreter 465 // the following holds -> 466 // 467 // C2 Calling Conventions: 468 // 469 // The entry code below assumes that the following registers are set 470 // when coming in: 471 // G5_method: holds the Method* of the method to call 472 // Lesp: points to the TOS of the callers expression stack 473 // after having pushed all the parameters 474 // 475 // The entry code does the following to setup an interpreter frame 476 // pop parameters from the callers stack by adjusting Lesp 477 // set O0 to Lesp 478 // compute X = (max_locals - num_parameters) 479 // bump SP up by X to accomadate the extra locals 480 // compute X = max_expression_stack 481 // + vm_local_words 482 // + 16 words of register save area 483 // save frame doing a save sp, -X, sp growing towards lower addresses 484 // set Lbcp, Lmethod, LcpoolCache 485 // set Llocals to i0 486 // set Lmonitors to FP - rounded_vm_local_words 487 // set Lesp to Lmonitors - 4 488 // 489 // The frame has now been setup to do the rest of the entry code 490 491 // Try this optimization: Most method entries could live in a 492 // "one size fits all" stack frame without all the dynamic size 493 // calculations. It might be profitable to do all this calculation 494 // statically and approximately for "small enough" methods. 495 496 //----------------------------------------------------------------------------------------------- 497 498 // C1 Calling conventions 499 // 500 // Upon method entry, the following registers are setup: 501 // 502 // g2 G2_thread: current thread 503 // g5 G5_method: method to activate 504 // g4 Gargs : pointer to last argument 505 // 506 // 507 // Stack: 508 // 509 // +---------------+ <--- sp 510 // | | 511 // : reg save area : 512 // | | 513 // +---------------+ <--- sp + 0x40 514 // | | 515 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 516 // | | 517 // +---------------+ <--- sp + 0x5c 518 // | | 519 // : free : 520 // | | 521 // +---------------+ <--- Gargs 522 // | | 523 // : arguments : 524 // | | 525 // +---------------+ 526 // | | 527 // 528 // 529 // 530 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 531 // 532 // +---------------+ <--- sp 533 // | | 534 // : reg save area : 535 // | | 536 // +---------------+ <--- sp + 0x40 537 // | | 538 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 539 // | | 540 // +---------------+ <--- sp + 0x5c 541 // | | 542 // : : 543 // | | <--- Lesp 544 // +---------------+ <--- Lmonitors (fp - 0x18) 545 // | VM locals | 546 // +---------------+ <--- fp 547 // | | 548 // : reg save area : 549 // | | 550 // +---------------+ <--- fp + 0x40 551 // | | 552 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 553 // | | 554 // +---------------+ <--- fp + 0x5c 555 // | | 556 // : free : 557 // | | 558 // +---------------+ 559 // | | 560 // : nonarg locals : 561 // | | 562 // +---------------+ 563 // | | 564 // : arguments : 565 // | | <--- Llocals 566 // +---------------+ <--- Gargs 567 // | | 568 569 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 570 // 571 // 572 // The entry code sets up a new interpreter frame in 4 steps: 573 // 574 // 1) Increase caller's SP by for the extra local space needed: 575 // (check for overflow) 576 // Efficient implementation of xload/xstore bytecodes requires 577 // that arguments and non-argument locals are in a contigously 578 // addressable memory block => non-argument locals must be 579 // allocated in the caller's frame. 580 // 581 // 2) Create a new stack frame and register window: 582 // The new stack frame must provide space for the standard 583 // register save area, the maximum java expression stack size, 584 // the monitor slots (0 slots initially), and some frame local 585 // scratch locations. 586 // 587 // 3) The following interpreter activation registers must be setup: 588 // Lesp : expression stack pointer 589 // Lbcp : bytecode pointer 590 // Lmethod : method 591 // Llocals : locals pointer 592 // Lmonitors : monitor pointer 593 // LcpoolCache: constant pool cache 594 // 595 // 4) Initialize the non-argument locals if necessary: 596 // Non-argument locals may need to be initialized to NULL 597 // for GC to work. If the oop-map information is accurate 598 // (in the absence of the JSR problem), no initialization 599 // is necessary. 600 // 601 // (gri - 2/25/2000) 602 603 604 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 605 606 const int extra_space = 607 rounded_vm_local_words + // frame local scratch space 608 Method::extra_stack_entries() + // extra stack for jsr 292 609 frame::memory_parameter_word_sp_offset + // register save area 610 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 611 612 const Register Glocals_size = G3; 613 const Register RconstMethod = Glocals_size; 614 const Register Otmp1 = O3; 615 const Register Otmp2 = O4; 616 // Lscratch can't be used as a temporary because the call_stub uses 617 // it to assert that the stack frame was setup correctly. 618 const Address constMethod (G5_method, Method::const_offset()); 619 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 620 621 __ ld_ptr( constMethod, RconstMethod ); 622 __ lduh( size_of_parameters, Glocals_size); 623 624 // Gargs points to first local + BytesPerWord 625 // Set the saved SP after the register window save 626 // 627 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 628 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 629 __ add(Gargs, Otmp1, Gargs); 630 631 if (native_call) { 632 __ calc_mem_param_words( Glocals_size, Gframe_size ); 633 __ add( Gframe_size, extra_space, Gframe_size); 634 __ round_to( Gframe_size, WordsPerLong ); 635 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 636 } else { 637 638 // 639 // Compute number of locals in method apart from incoming parameters 640 // 641 const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset()); 642 __ ld_ptr( constMethod, Otmp1 ); 643 __ lduh( size_of_locals, Otmp1 ); 644 __ sub( Otmp1, Glocals_size, Glocals_size ); 645 __ round_to( Glocals_size, WordsPerLong ); 646 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size ); 647 648 // see if the frame is greater than one page in size. If so, 649 // then we need to verify there is enough stack space remaining 650 // Frame_size = (max_stack + extra_space) * BytesPerWord; 651 __ ld_ptr( constMethod, Gframe_size ); 652 __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size ); 653 __ add( Gframe_size, extra_space, Gframe_size ); 654 __ round_to( Gframe_size, WordsPerLong ); 655 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size); 656 657 // Add in java locals size for stack overflow check only 658 __ add( Gframe_size, Glocals_size, Gframe_size ); 659 660 const Register Otmp2 = O4; 661 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 662 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); 663 664 __ sub( Gframe_size, Glocals_size, Gframe_size); 665 666 // 667 // bump SP to accomodate the extra locals 668 // 669 __ sub( SP, Glocals_size, SP ); 670 } 671 672 // 673 // now set up a stack frame with the size computed above 674 // 675 __ neg( Gframe_size ); 676 __ save( SP, Gframe_size, SP ); 677 678 // 679 // now set up all the local cache registers 680 // 681 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 682 // that all present references to Lbyte_code initialize the register 683 // immediately before use 684 if (native_call) { 685 __ mov(G0, Lbcp); 686 } else { 687 __ ld_ptr(G5_method, Method::const_offset(), Lbcp); 688 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp); 689 } 690 __ mov( G5_method, Lmethod); // set Lmethod 691 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 692 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 693 #ifdef _LP64 694 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias 695 #endif 696 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 697 698 // setup interpreter activation registers 699 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 700 701 if (ProfileInterpreter) { 702 #ifdef FAST_DISPATCH 703 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 704 // they both use I2. 705 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 706 #endif // FAST_DISPATCH 707 __ set_method_data_pointer(); 708 } 709 710 } 711 712 // Method entry for java.lang.ref.Reference.get. 713 address InterpreterGenerator::generate_Reference_get_entry(void) { 714 #if INCLUDE_ALL_GCS 715 // Code: _aload_0, _getfield, _areturn 716 // parameter size = 1 717 // 718 // The code that gets generated by this routine is split into 2 parts: 719 // 1. The "intrinsified" code for G1 (or any SATB based GC), 720 // 2. The slow path - which is an expansion of the regular method entry. 721 // 722 // Notes:- 723 // * In the G1 code we do not check whether we need to block for 724 // a safepoint. If G1 is enabled then we must execute the specialized 725 // code for Reference.get (except when the Reference object is null) 726 // so that we can log the value in the referent field with an SATB 727 // update buffer. 728 // If the code for the getfield template is modified so that the 729 // G1 pre-barrier code is executed when the current method is 730 // Reference.get() then going through the normal method entry 731 // will be fine. 732 // * The G1 code can, however, check the receiver object (the instance 733 // of java.lang.Reference) and jump to the slow path if null. If the 734 // Reference object is null then we obviously cannot fetch the referent 735 // and so we don't need to call the G1 pre-barrier. Thus we can use the 736 // regular method entry code to generate the NPE. 737 // 738 // This code is based on generate_accessor_enty. 739 740 address entry = __ pc(); 741 742 const int referent_offset = java_lang_ref_Reference::referent_offset; 743 guarantee(referent_offset > 0, "referent offset not initialized"); 744 745 if (UseG1GC) { 746 Label slow_path; 747 748 // In the G1 code we don't check if we need to reach a safepoint. We 749 // continue and the thread will safepoint at the next bytecode dispatch. 750 751 // Check if local 0 != NULL 752 // If the receiver is null then it is OK to jump to the slow path. 753 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 754 // check if local 0 == NULL and go the slow path 755 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); 756 757 758 // Load the value of the referent field. 759 if (Assembler::is_simm13(referent_offset)) { 760 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 761 } else { 762 __ set(referent_offset, G3_scratch); 763 __ load_heap_oop(Otos_i, G3_scratch, Otos_i); 764 } 765 766 // Generate the G1 pre-barrier code to log the value of 767 // the referent field in an SATB buffer. Note with 768 // these parameters the pre-barrier does not generate 769 // the load of the previous value 770 771 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, 772 Otos_i /* pre_val */, 773 G3_scratch /* tmp */, 774 true /* preserve_o_regs */); 775 776 // _areturn 777 __ retl(); // return from leaf routine 778 __ delayed()->mov(O5_savedSP, SP); 779 780 // Generate regular method entry 781 __ bind(slow_path); 782 (void) generate_normal_entry(false); 783 return entry; 784 } 785 #endif // INCLUDE_ALL_GCS 786 787 // If G1 is not enabled then attempt to go through the accessor entry point 788 // Reference.get is an accessor 789 return generate_jump_to_normal_entry(); 790 } 791 792 // 793 // Interpreter stub for calling a native method. (asm interpreter) 794 // This sets up a somewhat different looking stack for calling the native method 795 // than the typical interpreter frame setup. 796 // 797 798 address InterpreterGenerator::generate_native_entry(bool synchronized) { 799 address entry = __ pc(); 800 801 // the following temporary registers are used during frame creation 802 const Register Gtmp1 = G3_scratch ; 803 const Register Gtmp2 = G1_scratch; 804 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 805 806 // make sure registers are different! 807 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 808 809 const Address Laccess_flags(Lmethod, Method::access_flags_offset()); 810 811 const Register Glocals_size = G3; 812 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 813 814 // make sure method is native & not abstract 815 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 816 #ifdef ASSERT 817 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 818 { 819 Label L; 820 __ btst(JVM_ACC_NATIVE, Gtmp1); 821 __ br(Assembler::notZero, false, Assembler::pt, L); 822 __ delayed()->nop(); 823 __ stop("tried to execute non-native method as native"); 824 __ bind(L); 825 } 826 { Label L; 827 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 828 __ br(Assembler::zero, false, Assembler::pt, L); 829 __ delayed()->nop(); 830 __ stop("tried to execute abstract method as non-abstract"); 831 __ bind(L); 832 } 833 #endif // ASSERT 834 835 // generate the code to allocate the interpreter stack frame 836 generate_fixed_frame(true); 837 838 // 839 // No locals to initialize for native method 840 // 841 842 // this slot will be set later, we initialize it to null here just in 843 // case we get a GC before the actual value is stored later 844 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 845 846 const Address do_not_unlock_if_synchronized(G2_thread, 847 JavaThread::do_not_unlock_if_synchronized_offset()); 848 // Since at this point in the method invocation the exception handler 849 // would try to exit the monitor of synchronized methods which hasn't 850 // been entered yet, we set the thread local variable 851 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 852 // runtime, exception handling i.e. unlock_if_synchronized_method will 853 // check this thread local flag. 854 // This flag has two effects, one is to force an unwind in the topmost 855 // interpreter frame and not perform an unlock while doing so. 856 857 __ movbool(true, G3_scratch); 858 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 859 860 // increment invocation counter and check for overflow 861 // 862 // Note: checking for negative value instead of overflow 863 // so we have a 'sticky' overflow test (may be of 864 // importance as soon as we have true MT/MP) 865 Label invocation_counter_overflow; 866 Label Lcontinue; 867 if (inc_counter) { 868 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 869 870 } 871 __ bind(Lcontinue); 872 873 bang_stack_shadow_pages(true); 874 875 // reset the _do_not_unlock_if_synchronized flag 876 __ stbool(G0, do_not_unlock_if_synchronized); 877 878 // check for synchronized methods 879 // Must happen AFTER invocation_counter check and stack overflow check, 880 // so method is not locked if overflows. 881 882 if (synchronized) { 883 lock_method(); 884 } else { 885 #ifdef ASSERT 886 { Label ok; 887 __ ld(Laccess_flags, O0); 888 __ btst(JVM_ACC_SYNCHRONIZED, O0); 889 __ br( Assembler::zero, false, Assembler::pt, ok); 890 __ delayed()->nop(); 891 __ stop("method needs synchronization"); 892 __ bind(ok); 893 } 894 #endif // ASSERT 895 } 896 897 898 // start execution 899 __ verify_thread(); 900 901 // JVMTI support 902 __ notify_method_entry(); 903 904 // native call 905 906 // (note that O0 is never an oop--at most it is a handle) 907 // It is important not to smash any handles created by this call, 908 // until any oop handle in O0 is dereferenced. 909 910 // (note that the space for outgoing params is preallocated) 911 912 // get signature handler 913 { Label L; 914 Address signature_handler(Lmethod, Method::signature_handler_offset()); 915 __ ld_ptr(signature_handler, G3_scratch); 916 __ br_notnull_short(G3_scratch, Assembler::pt, L); 917 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 918 __ ld_ptr(signature_handler, G3_scratch); 919 __ bind(L); 920 } 921 922 // Push a new frame so that the args will really be stored in 923 // Copy a few locals across so the new frame has the variables 924 // we need but these values will be dead at the jni call and 925 // therefore not gc volatile like the values in the current 926 // frame (Lmethod in particular) 927 928 // Flush the method pointer to the register save area 929 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 930 __ mov(Llocals, O1); 931 932 // calculate where the mirror handle body is allocated in the interpreter frame: 933 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 934 935 // Calculate current frame size 936 __ sub(SP, FP, O3); // Calculate negative of current frame size 937 __ save(SP, O3, SP); // Allocate an identical sized frame 938 939 // Note I7 has leftover trash. Slow signature handler will fill it in 940 // should we get there. Normal jni call will set reasonable last_Java_pc 941 // below (and fix I7 so the stack trace doesn't have a meaningless frame 942 // in it). 943 944 // Load interpreter frame's Lmethod into same register here 945 946 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 947 948 __ mov(I1, Llocals); 949 __ mov(I2, Lscratch2); // save the address of the mirror 950 951 952 // ONLY Lmethod and Llocals are valid here! 953 954 // call signature handler, It will move the arg properly since Llocals in current frame 955 // matches that in outer frame 956 957 __ callr(G3_scratch, 0); 958 __ delayed()->nop(); 959 960 // Result handler is in Lscratch 961 962 // Reload interpreter frame's Lmethod since slow signature handler may block 963 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 964 965 { Label not_static; 966 967 __ ld(Laccess_flags, O0); 968 __ btst(JVM_ACC_STATIC, O0); 969 __ br( Assembler::zero, false, Assembler::pt, not_static); 970 // get native function entry point(O0 is a good temp until the very end) 971 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0); 972 // for static methods insert the mirror argument 973 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 974 975 __ ld_ptr(Lmethod, Method:: const_offset(), O1); 976 __ ld_ptr(O1, ConstMethod::constants_offset(), O1); 977 __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1); 978 __ ld_ptr(O1, mirror_offset, O1); 979 #ifdef ASSERT 980 if (!PrintSignatureHandlers) // do not dirty the output with this 981 { Label L; 982 __ br_notnull_short(O1, Assembler::pt, L); 983 __ stop("mirror is missing"); 984 __ bind(L); 985 } 986 #endif // ASSERT 987 __ st_ptr(O1, Lscratch2, 0); 988 __ mov(Lscratch2, O1); 989 __ bind(not_static); 990 } 991 992 // At this point, arguments have been copied off of stack into 993 // their JNI positions, which are O1..O5 and SP[68..]. 994 // Oops are boxed in-place on the stack, with handles copied to arguments. 995 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 996 997 #ifdef ASSERT 998 { Label L; 999 __ br_notnull_short(O0, Assembler::pt, L); 1000 __ stop("native entry point is missing"); 1001 __ bind(L); 1002 } 1003 #endif // ASSERT 1004 1005 // 1006 // setup the frame anchor 1007 // 1008 // The scavenge function only needs to know that the PC of this frame is 1009 // in the interpreter method entry code, it doesn't need to know the exact 1010 // PC and hence we can use O7 which points to the return address from the 1011 // previous call in the code stream (signature handler function) 1012 // 1013 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1014 // we have pushed the extra frame in order to protect the volatile register(s) 1015 // in that frame when we return from the jni call 1016 // 1017 1018 __ set_last_Java_frame(FP, O7); 1019 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1020 // not meaningless information that'll confuse me. 1021 1022 // flush the windows now. We don't care about the current (protection) frame 1023 // only the outer frames 1024 1025 __ flushw(); 1026 1027 // mark windows as flushed 1028 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1029 __ set(JavaFrameAnchor::flushed, G3_scratch); 1030 __ st(G3_scratch, flags); 1031 1032 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1033 1034 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1035 #ifdef ASSERT 1036 { Label L; 1037 __ ld(thread_state, G3_scratch); 1038 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); 1039 __ stop("Wrong thread state in native stub"); 1040 __ bind(L); 1041 } 1042 #endif // ASSERT 1043 __ set(_thread_in_native, G3_scratch); 1044 __ st(G3_scratch, thread_state); 1045 1046 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1047 __ save_thread(L7_thread_cache); // save Gthread 1048 __ callr(O0, 0); 1049 __ delayed()-> 1050 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1051 1052 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1053 1054 __ restore_thread(L7_thread_cache); // restore G2_thread 1055 __ reinit_heapbase(); 1056 1057 // must we block? 1058 1059 // Block, if necessary, before resuming in _thread_in_Java state. 1060 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1061 { Label no_block; 1062 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 1063 1064 // Switch thread to "native transition" state before reading the synchronization state. 1065 // This additional state is necessary because reading and testing the synchronization 1066 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1067 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1068 // VM thread changes sync state to synchronizing and suspends threads for GC. 1069 // Thread A is resumed to finish this native method, but doesn't block here since it 1070 // didn't see any synchronization is progress, and escapes. 1071 __ set(_thread_in_native_trans, G3_scratch); 1072 __ st(G3_scratch, thread_state); 1073 if(os::is_MP()) { 1074 if (UseMembar) { 1075 // Force this write out before the read below 1076 __ membar(Assembler::StoreLoad); 1077 } else { 1078 // Write serialization page so VM thread can do a pseudo remote membar. 1079 // We use the current thread pointer to calculate a thread specific 1080 // offset to write to within the page. This minimizes bus traffic 1081 // due to cache line collision. 1082 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1083 } 1084 } 1085 __ load_contents(sync_state, G3_scratch); 1086 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1087 1088 Label L; 1089 __ br(Assembler::notEqual, false, Assembler::pn, L); 1090 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1091 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 1092 __ bind(L); 1093 1094 // Block. Save any potential method result value before the operation and 1095 // use a leaf call to leave the last_Java_frame setup undisturbed. 1096 save_native_result(); 1097 __ call_VM_leaf(L7_thread_cache, 1098 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1099 G2_thread); 1100 1101 // Restore any method result value 1102 restore_native_result(); 1103 __ bind(no_block); 1104 } 1105 1106 // Clear the frame anchor now 1107 1108 __ reset_last_Java_frame(); 1109 1110 // Move the result handler address 1111 __ mov(Lscratch, G3_scratch); 1112 // return possible result to the outer frame 1113 #ifndef __LP64 1114 __ mov(O0, I0); 1115 __ restore(O1, G0, O1); 1116 #else 1117 __ restore(O0, G0, O0); 1118 #endif /* __LP64 */ 1119 1120 // Move result handler to expected register 1121 __ mov(G3_scratch, Lscratch); 1122 1123 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1124 // switch to thread_in_Java. 1125 1126 __ set(_thread_in_Java, G3_scratch); 1127 __ st(G3_scratch, thread_state); 1128 1129 // reset handle block 1130 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1131 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1132 1133 // If we have an oop result store it where it will be safe for any further gc 1134 // until we return now that we've released the handle it might be protected by 1135 1136 { 1137 Label no_oop, store_result; 1138 1139 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1140 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); 1141 __ addcc(G0, O0, O0); 1142 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: 1143 __ delayed()->ld_ptr(O0, 0, O0); // unbox it 1144 __ mov(G0, O0); 1145 1146 __ bind(store_result); 1147 // Store it where gc will look for it and result handler expects it. 1148 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1149 1150 __ bind(no_oop); 1151 1152 } 1153 1154 1155 // handle exceptions (exception handling will handle unlocking!) 1156 { Label L; 1157 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1158 __ ld_ptr(exception_addr, Gtemp); 1159 __ br_null_short(Gtemp, Assembler::pt, L); 1160 // Note: This could be handled more efficiently since we know that the native 1161 // method doesn't have an exception handler. We could directly return 1162 // to the exception handler for the caller. 1163 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1164 __ should_not_reach_here(); 1165 __ bind(L); 1166 } 1167 1168 // JVMTI support (preserves thread register) 1169 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1170 1171 if (synchronized) { 1172 // save and restore any potential method result value around the unlocking operation 1173 save_native_result(); 1174 1175 __ add( __ top_most_monitor(), O1); 1176 __ unlock_object(O1); 1177 1178 restore_native_result(); 1179 } 1180 1181 #if defined(COMPILER2) && !defined(_LP64) 1182 1183 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1184 // or compiled so just be safe. 1185 1186 __ sllx(O0, 32, G1); // Shift bits into high G1 1187 __ srl (O1, 0, O1); // Zero extend O1 1188 __ or3 (O1, G1, G1); // OR 64 bits into G1 1189 1190 #endif /* COMPILER2 && !_LP64 */ 1191 1192 // dispose of return address and remove activation 1193 #ifdef ASSERT 1194 { 1195 Label ok; 1196 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); 1197 __ stop("bad I5_savedSP value"); 1198 __ should_not_reach_here(); 1199 __ bind(ok); 1200 } 1201 #endif 1202 if (TraceJumps) { 1203 // Move target to register that is recordable 1204 __ mov(Lscratch, G3_scratch); 1205 __ JMP(G3_scratch, 0); 1206 } else { 1207 __ jmp(Lscratch, 0); 1208 } 1209 __ delayed()->nop(); 1210 1211 1212 if (inc_counter) { 1213 // handle invocation counter overflow 1214 __ bind(invocation_counter_overflow); 1215 generate_counter_overflow(Lcontinue); 1216 } 1217 1218 1219 1220 return entry; 1221 } 1222 1223 1224 // Generic method entry to (asm) interpreter 1225 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1226 address entry = __ pc(); 1227 1228 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1229 1230 // the following temporary registers are used during frame creation 1231 const Register Gtmp1 = G3_scratch ; 1232 const Register Gtmp2 = G1_scratch; 1233 1234 // make sure registers are different! 1235 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1236 1237 const Address constMethod (G5_method, Method::const_offset()); 1238 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1239 // and use in the asserts. 1240 const Address access_flags (Lmethod, Method::access_flags_offset()); 1241 1242 const Register Glocals_size = G3; 1243 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1244 1245 // make sure method is not native & not abstract 1246 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1247 #ifdef ASSERT 1248 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1249 { 1250 Label L; 1251 __ btst(JVM_ACC_NATIVE, Gtmp1); 1252 __ br(Assembler::zero, false, Assembler::pt, L); 1253 __ delayed()->nop(); 1254 __ stop("tried to execute native method as non-native"); 1255 __ bind(L); 1256 } 1257 { Label L; 1258 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1259 __ br(Assembler::zero, false, Assembler::pt, L); 1260 __ delayed()->nop(); 1261 __ stop("tried to execute abstract method as non-abstract"); 1262 __ bind(L); 1263 } 1264 #endif // ASSERT 1265 1266 // generate the code to allocate the interpreter stack frame 1267 1268 generate_fixed_frame(false); 1269 1270 #ifdef FAST_DISPATCH 1271 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1272 // set bytecode dispatch table base 1273 #endif 1274 1275 // 1276 // Code to initialize the extra (i.e. non-parm) locals 1277 // 1278 Register init_value = noreg; // will be G0 if we must clear locals 1279 // The way the code was setup before zerolocals was always true for vanilla java entries. 1280 // It could only be false for the specialized entries like accessor or empty which have 1281 // no extra locals so the testing was a waste of time and the extra locals were always 1282 // initialized. We removed this extra complication to already over complicated code. 1283 1284 init_value = G0; 1285 Label clear_loop; 1286 1287 const Register RconstMethod = O1; 1288 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1289 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset()); 1290 1291 // NOTE: If you change the frame layout, this code will need to 1292 // be updated! 1293 __ ld_ptr( constMethod, RconstMethod ); 1294 __ lduh( size_of_locals, O2 ); 1295 __ lduh( size_of_parameters, O1 ); 1296 __ sll( O2, Interpreter::logStackElementSize, O2); 1297 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1298 __ sub( Llocals, O2, O2 ); 1299 __ sub( Llocals, O1, O1 ); 1300 1301 __ bind( clear_loop ); 1302 __ inc( O2, wordSize ); 1303 1304 __ cmp( O2, O1 ); 1305 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1306 __ delayed()->st_ptr( init_value, O2, 0 ); 1307 1308 const Address do_not_unlock_if_synchronized(G2_thread, 1309 JavaThread::do_not_unlock_if_synchronized_offset()); 1310 // Since at this point in the method invocation the exception handler 1311 // would try to exit the monitor of synchronized methods which hasn't 1312 // been entered yet, we set the thread local variable 1313 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1314 // runtime, exception handling i.e. unlock_if_synchronized_method will 1315 // check this thread local flag. 1316 __ movbool(true, G3_scratch); 1317 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1318 1319 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); 1320 // increment invocation counter and check for overflow 1321 // 1322 // Note: checking for negative value instead of overflow 1323 // so we have a 'sticky' overflow test (may be of 1324 // importance as soon as we have true MT/MP) 1325 Label invocation_counter_overflow; 1326 Label profile_method; 1327 Label profile_method_continue; 1328 Label Lcontinue; 1329 if (inc_counter) { 1330 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1331 if (ProfileInterpreter) { 1332 __ bind(profile_method_continue); 1333 } 1334 } 1335 __ bind(Lcontinue); 1336 1337 bang_stack_shadow_pages(false); 1338 1339 // reset the _do_not_unlock_if_synchronized flag 1340 __ stbool(G0, do_not_unlock_if_synchronized); 1341 1342 // check for synchronized methods 1343 // Must happen AFTER invocation_counter check and stack overflow check, 1344 // so method is not locked if overflows. 1345 1346 if (synchronized) { 1347 lock_method(); 1348 } else { 1349 #ifdef ASSERT 1350 { Label ok; 1351 __ ld(access_flags, O0); 1352 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1353 __ br( Assembler::zero, false, Assembler::pt, ok); 1354 __ delayed()->nop(); 1355 __ stop("method needs synchronization"); 1356 __ bind(ok); 1357 } 1358 #endif // ASSERT 1359 } 1360 1361 // start execution 1362 1363 __ verify_thread(); 1364 1365 // jvmti support 1366 __ notify_method_entry(); 1367 1368 // start executing instructions 1369 __ dispatch_next(vtos); 1370 1371 1372 if (inc_counter) { 1373 if (ProfileInterpreter) { 1374 // We have decided to profile this method in the interpreter 1375 __ bind(profile_method); 1376 1377 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1378 __ set_method_data_pointer_for_bcp(); 1379 __ ba_short(profile_method_continue); 1380 } 1381 1382 // handle invocation counter overflow 1383 __ bind(invocation_counter_overflow); 1384 generate_counter_overflow(Lcontinue); 1385 } 1386 1387 1388 return entry; 1389 } 1390 1391 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { 1392 1393 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated 1394 // expression stack, the callee will have callee_extra_locals (so we can account for 1395 // frame extension) and monitor_size for monitors. Basically we need to calculate 1396 // this exactly like generate_fixed_frame/generate_compute_interpreter_state. 1397 // 1398 // 1399 // The big complicating thing here is that we must ensure that the stack stays properly 1400 // aligned. This would be even uglier if monitor size wasn't modulo what the stack 1401 // needs to be aligned for). We are given that the sp (fp) is already aligned by 1402 // the caller so we must ensure that it is properly aligned for our callee. 1403 // 1404 const int rounded_vm_local_words = 1405 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1406 // callee_locals and max_stack are counts, not the size in frame. 1407 const int locals_size = 1408 round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong); 1409 const int max_stack_words = max_stack * Interpreter::stackElementWords; 1410 return (round_to((max_stack_words 1411 + rounded_vm_local_words 1412 + frame::memory_parameter_word_sp_offset), WordsPerLong) 1413 // already rounded 1414 + locals_size + monitor_size); 1415 } 1416 1417 // How much stack a method top interpreter activation needs in words. 1418 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1419 1420 // See call_stub code 1421 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, 1422 WordsPerLong); // 7 + register save area 1423 1424 // Save space for one monitor to get into the interpreted method in case 1425 // the method is synchronized 1426 int monitor_size = method->is_synchronized() ? 1427 1*frame::interpreter_frame_monitor_size() : 0; 1428 return size_activation_helper(method->max_locals(), method->max_stack(), 1429 monitor_size) + call_stub_size; 1430 } 1431 1432 int AbstractInterpreter::size_activation(int max_stack, 1433 int temps, 1434 int extra_args, 1435 int monitors, 1436 int callee_params, 1437 int callee_locals, 1438 bool is_top_frame) { 1439 // Note: This calculation must exactly parallel the frame setup 1440 // in InterpreterGenerator::generate_fixed_frame. 1441 1442 int monitor_size = monitors * frame::interpreter_frame_monitor_size(); 1443 1444 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); 1445 1446 // 1447 // Note: if you look closely this appears to be doing something much different 1448 // than generate_fixed_frame. What is happening is this. On sparc we have to do 1449 // this dance with interpreter_sp_adjustment because the window save area would 1450 // appear just below the bottom (tos) of the caller's java expression stack. Because 1451 // the interpreter want to have the locals completely contiguous generate_fixed_frame 1452 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size). 1453 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee. 1454 // In this code the opposite occurs the caller adjusts it's own stack base on the callee. 1455 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest) 1456 // because the oldest frame would have adjust its callers frame and yet that frame 1457 // already exists and isn't part of this array of frames we are unpacking. So at first 1458 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() 1459 // will after it calculates all of the frame's on_stack_size()'s will then figure out the 1460 // amount to adjust the caller of the initial (oldest) frame and the calculation will all 1461 // add up. It does seem like it simpler to account for the adjustment here (and remove the 1462 // callee... parameters here). However this would mean that this routine would have to take 1463 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) 1464 // and run the calling loop in the reverse order. This would also would appear to mean making 1465 // this code aware of what the interactions are when that initial caller fram was an osr or 1466 // other adapter frame. deoptimization is complicated enough and hard enough to debug that 1467 // there is no sense in messing working code. 1468 // 1469 1470 int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong); 1471 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align"); 1472 1473 int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size); 1474 1475 return raw_frame_size; 1476 } 1477 1478 void AbstractInterpreter::layout_activation(Method* method, 1479 int tempcount, 1480 int popframe_extra_args, 1481 int moncount, 1482 int caller_actual_parameters, 1483 int callee_param_count, 1484 int callee_local_count, 1485 frame* caller, 1486 frame* interpreter_frame, 1487 bool is_top_frame, 1488 bool is_bottom_frame) { 1489 // Set up the following variables: 1490 // - Lmethod 1491 // - Llocals 1492 // - Lmonitors (to the indicated number of monitors) 1493 // - Lesp (to the indicated number of temps) 1494 // The frame caller on entry is a description of the caller of the 1495 // frame we are about to layout. We are guaranteed that we will be 1496 // able to fill in a new interpreter frame as its callee (i.e. the 1497 // stack space is allocated and the amount was determined by an 1498 // earlier call to the size_activation() method). On return caller 1499 // while describe the interpreter frame we just layed out. 1500 1501 // The skeleton frame must already look like an interpreter frame 1502 // even if not fully filled out. 1503 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame"); 1504 1505 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); 1506 int monitor_size = moncount * frame::interpreter_frame_monitor_size(); 1507 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); 1508 1509 intptr_t* fp = interpreter_frame->fp(); 1510 1511 JavaThread* thread = JavaThread::current(); 1512 RegisterMap map(thread, false); 1513 // More verification that skeleton frame is properly walkable 1514 assert(fp == caller->sp(), "fp must match"); 1515 1516 intptr_t* montop = fp - rounded_vm_local_words; 1517 1518 // preallocate monitors (cf. __ add_monitor_to_stack) 1519 intptr_t* monitors = montop - monitor_size; 1520 1521 // preallocate stack space 1522 intptr_t* esp = monitors - 1 - 1523 (tempcount * Interpreter::stackElementWords) - 1524 popframe_extra_args; 1525 1526 int local_words = method->max_locals() * Interpreter::stackElementWords; 1527 NEEDS_CLEANUP; 1528 intptr_t* locals; 1529 if (caller->is_interpreted_frame()) { 1530 // Can force the locals area to end up properly overlapping the top of the expression stack. 1531 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; 1532 // Note that this computation means we replace size_of_parameters() values from the caller 1533 // interpreter frame's expression stack with our argument locals 1534 int parm_words = caller_actual_parameters * Interpreter::stackElementWords; 1535 locals = Lesp_ptr + parm_words; 1536 int delta = local_words - parm_words; 1537 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; 1538 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; 1539 if (!is_bottom_frame) { 1540 // Llast_SP is set below for the current frame to SP (with the 1541 // extra space for the callee's locals). Here we adjust 1542 // Llast_SP for the caller's frame, removing the extra space 1543 // for the current method's locals. 1544 *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP); 1545 } else { 1546 assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP"); 1547 } 1548 } else { 1549 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); 1550 // Don't have Lesp available; lay out locals block in the caller 1551 // adjacent to the register window save area. 1552 // 1553 // Compiled frames do not allocate a varargs area which is why this if 1554 // statement is needed. 1555 // 1556 if (caller->is_compiled_frame()) { 1557 locals = fp + frame::register_save_words + local_words - 1; 1558 } else { 1559 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; 1560 } 1561 if (!caller->is_entry_frame()) { 1562 // Caller wants his own SP back 1563 int caller_frame_size = caller->cb()->frame_size(); 1564 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; 1565 } 1566 } 1567 if (TraceDeoptimization) { 1568 if (caller->is_entry_frame()) { 1569 // make sure I5_savedSP and the entry frames notion of saved SP 1570 // agree. This assertion duplicate a check in entry frame code 1571 // but catches the failure earlier. 1572 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP), 1573 "would change callers SP"); 1574 } 1575 if (caller->is_entry_frame()) { 1576 tty->print("entry "); 1577 } 1578 if (caller->is_compiled_frame()) { 1579 tty->print("compiled "); 1580 if (caller->is_deoptimized_frame()) { 1581 tty->print("(deopt) "); 1582 } 1583 } 1584 if (caller->is_interpreted_frame()) { 1585 tty->print("interpreted "); 1586 } 1587 tty->print_cr("caller fp=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->sp())); 1588 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->sp()), p2i(caller->sp() + 16)); 1589 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->fp() + 16)); 1590 tty->print_cr("interpreter fp=" INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->sp())); 1591 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->sp()), p2i(interpreter_frame->sp() + 16)); 1592 tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->fp() + 16)); 1593 tty->print_cr("Llocals = " INTPTR_FORMAT, p2i(locals)); 1594 tty->print_cr("Lesp = " INTPTR_FORMAT, p2i(esp)); 1595 tty->print_cr("Lmonitors = " INTPTR_FORMAT, p2i(monitors)); 1596 } 1597 1598 if (method->max_locals() > 0) { 1599 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area"); 1600 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area"); 1601 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); 1602 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); 1603 } 1604 #ifdef _LP64 1605 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); 1606 #endif 1607 1608 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method; 1609 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals; 1610 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors; 1611 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp; 1612 // Llast_SP will be same as SP as there is no adapter space 1613 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS; 1614 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); 1615 #ifdef FAST_DISPATCH 1616 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table(); 1617 #endif 1618 1619 1620 #ifdef ASSERT 1621 BasicObjectLock* mp = (BasicObjectLock*)monitors; 1622 1623 assert(interpreter_frame->interpreter_frame_method() == method, "method matches"); 1624 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match"); 1625 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches"); 1626 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches"); 1627 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches"); 1628 1629 // check bounds 1630 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1); 1631 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words; 1632 assert(lo < monitors && montop <= hi, "monitors in bounds"); 1633 assert(lo <= esp && esp < monitors, "esp in bounds"); 1634 #endif // ASSERT 1635 } 1636 1637 //---------------------------------------------------------------------------------------------------- 1638 // Exceptions 1639 void TemplateInterpreterGenerator::generate_throw_exception() { 1640 1641 // Entry point in previous activation (i.e., if the caller was interpreted) 1642 Interpreter::_rethrow_exception_entry = __ pc(); 1643 // O0: exception 1644 1645 // entry point for exceptions thrown within interpreter code 1646 Interpreter::_throw_exception_entry = __ pc(); 1647 __ verify_thread(); 1648 // expression stack is undefined here 1649 // O0: exception, i.e. Oexception 1650 // Lbcp: exception bcp 1651 __ verify_oop(Oexception); 1652 1653 1654 // expression stack must be empty before entering the VM in case of an exception 1655 __ empty_expression_stack(); 1656 // find exception handler address and preserve exception oop 1657 // call C routine to find handler and jump to it 1658 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1659 __ push_ptr(O1); // push exception for exception handler bytecodes 1660 1661 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1662 __ delayed()->nop(); 1663 1664 1665 // if the exception is not handled in the current frame 1666 // the frame is removed and the exception is rethrown 1667 // (i.e. exception continuation is _rethrow_exception) 1668 // 1669 // Note: At this point the bci is still the bxi for the instruction which caused 1670 // the exception and the expression stack is empty. Thus, for any VM calls 1671 // at this point, GC will find a legal oop map (with empty expression stack). 1672 1673 // in current activation 1674 // tos: exception 1675 // Lbcp: exception bcp 1676 1677 // 1678 // JVMTI PopFrame support 1679 // 1680 1681 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1682 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1683 // Set the popframe_processing bit in popframe_condition indicating that we are 1684 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1685 // popframe handling cycles. 1686 1687 __ ld(popframe_condition_addr, G3_scratch); 1688 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1689 __ stw(G3_scratch, popframe_condition_addr); 1690 1691 // Empty the expression stack, as in normal exception handling 1692 __ empty_expression_stack(); 1693 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1694 1695 { 1696 // Check to see whether we are returning to a deoptimized frame. 1697 // (The PopFrame call ensures that the caller of the popped frame is 1698 // either interpreted or compiled and deoptimizes it if compiled.) 1699 // In this case, we can't call dispatch_next() after the frame is 1700 // popped, but instead must save the incoming arguments and restore 1701 // them after deoptimization has occurred. 1702 // 1703 // Note that we don't compare the return PC against the 1704 // deoptimization blob's unpack entry because of the presence of 1705 // adapter frames in C2. 1706 Label caller_not_deoptimized; 1707 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1708 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); 1709 1710 const Register Gtmp1 = G3_scratch; 1711 const Register Gtmp2 = G1_scratch; 1712 const Register RconstMethod = Gtmp1; 1713 const Address constMethod(Lmethod, Method::const_offset()); 1714 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1715 1716 // Compute size of arguments for saving when returning to deoptimized caller 1717 __ ld_ptr(constMethod, RconstMethod); 1718 __ lduh(size_of_parameters, Gtmp1); 1719 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1720 __ sub(Llocals, Gtmp1, Gtmp2); 1721 __ add(Gtmp2, wordSize, Gtmp2); 1722 // Save these arguments 1723 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1724 // Inform deoptimization that it is responsible for restoring these arguments 1725 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1726 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1727 __ st(Gtmp1, popframe_condition_addr); 1728 1729 // Return from the current method 1730 // The caller's SP was adjusted upon method entry to accomodate 1731 // the callee's non-argument locals. Undo that adjustment. 1732 __ ret(); 1733 __ delayed()->restore(I5_savedSP, G0, SP); 1734 1735 __ bind(caller_not_deoptimized); 1736 } 1737 1738 // Clear the popframe condition flag 1739 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1740 1741 // Get out of the current method (how this is done depends on the particular compiler calling 1742 // convention that the interpreter currently follows) 1743 // The caller's SP was adjusted upon method entry to accomodate 1744 // the callee's non-argument locals. Undo that adjustment. 1745 __ restore(I5_savedSP, G0, SP); 1746 // The method data pointer was incremented already during 1747 // call profiling. We have to restore the mdp for the current bcp. 1748 if (ProfileInterpreter) { 1749 __ set_method_data_pointer_for_bcp(); 1750 } 1751 1752 #if INCLUDE_JVMTI 1753 { 1754 Label L_done; 1755 1756 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode 1757 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done); 1758 1759 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1760 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1761 1762 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp); 1763 1764 __ br_null(G1_scratch, false, Assembler::pn, L_done); 1765 __ delayed()->nop(); 1766 1767 __ st_ptr(G1_scratch, Lesp, wordSize); 1768 __ bind(L_done); 1769 } 1770 #endif // INCLUDE_JVMTI 1771 1772 // Resume bytecode interpretation at the current bcp 1773 __ dispatch_next(vtos); 1774 // end of JVMTI PopFrame support 1775 1776 Interpreter::_remove_activation_entry = __ pc(); 1777 1778 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1779 __ pop_ptr(Oexception); // get exception 1780 1781 // Intel has the following comment: 1782 //// remove the activation (without doing throws on illegalMonitorExceptions) 1783 // They remove the activation without checking for bad monitor state. 1784 // %%% We should make sure this is the right semantics before implementing. 1785 1786 __ set_vm_result(Oexception); 1787 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1788 1789 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1790 1791 __ get_vm_result(Oexception); 1792 __ verify_oop(Oexception); 1793 1794 const int return_reg_adjustment = frame::pc_return_offset; 1795 Address issuing_pc_addr(I7, return_reg_adjustment); 1796 1797 // We are done with this activation frame; find out where to go next. 1798 // The continuation point will be an exception handler, which expects 1799 // the following registers set up: 1800 // 1801 // Oexception: exception 1802 // Oissuing_pc: the local call that threw exception 1803 // Other On: garbage 1804 // In/Ln: the contents of the caller's register window 1805 // 1806 // We do the required restore at the last possible moment, because we 1807 // need to preserve some state across a runtime call. 1808 // (Remember that the caller activation is unknown--it might not be 1809 // interpreted, so things like Lscratch are useless in the caller.) 1810 1811 // Although the Intel version uses call_C, we can use the more 1812 // compact call_VM. (The only real difference on SPARC is a 1813 // harmlessly ignored [re]set_last_Java_frame, compared with 1814 // the Intel code which lacks this.) 1815 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1816 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1817 __ super_call_VM_leaf(L7_thread_cache, 1818 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1819 G2_thread, Oissuing_pc->after_save()); 1820 1821 // The caller's SP was adjusted upon method entry to accomodate 1822 // the callee's non-argument locals. Undo that adjustment. 1823 __ JMP(O0, 0); // return exception handler in caller 1824 __ delayed()->restore(I5_savedSP, G0, SP); 1825 1826 // (same old exception object is already in Oexception; see above) 1827 // Note that an "issuing PC" is actually the next PC after the call 1828 } 1829 1830 1831 // 1832 // JVMTI ForceEarlyReturn support 1833 // 1834 1835 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1836 address entry = __ pc(); 1837 1838 __ empty_expression_stack(); 1839 __ load_earlyret_value(state); 1840 1841 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1842 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1843 1844 // Clear the earlyret state 1845 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1846 1847 __ remove_activation(state, 1848 /* throw_monitor_exception */ false, 1849 /* install_monitor_exception */ false); 1850 1851 // The caller's SP was adjusted upon method entry to accomodate 1852 // the callee's non-argument locals. Undo that adjustment. 1853 __ ret(); // return to caller 1854 __ delayed()->restore(I5_savedSP, G0, SP); 1855 1856 return entry; 1857 } // end of JVMTI ForceEarlyReturn support 1858 1859 1860 //------------------------------------------------------------------------------------------------------------------------ 1861 // Helper for vtos entry point generation 1862 1863 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1864 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1865 Label L; 1866 aep = __ pc(); __ push_ptr(); __ ba_short(L); 1867 fep = __ pc(); __ push_f(); __ ba_short(L); 1868 dep = __ pc(); __ push_d(); __ ba_short(L); 1869 lep = __ pc(); __ push_l(); __ ba_short(L); 1870 iep = __ pc(); __ push_i(); 1871 bep = cep = sep = iep; // there aren't any 1872 vep = __ pc(); __ bind(L); // fall through 1873 generate_and_dispatch(t); 1874 } 1875 1876 // -------------------------------------------------------------------------------- 1877 1878 1879 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1880 : TemplateInterpreterGenerator(code) { 1881 generate_all(); // down here so it can be "virtual" 1882 } 1883 1884 // -------------------------------------------------------------------------------- 1885 1886 // Non-product code 1887 #ifndef PRODUCT 1888 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1889 address entry = __ pc(); 1890 1891 __ push(state); 1892 __ mov(O7, Lscratch); // protect return address within interpreter 1893 1894 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 1895 __ mov( Otos_l2, G3_scratch ); 1896 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 1897 __ mov(Lscratch, O7); // restore return address 1898 __ pop(state); 1899 __ retl(); 1900 __ delayed()->nop(); 1901 1902 return entry; 1903 } 1904 1905 1906 // helpers for generate_and_dispatch 1907 1908 void TemplateInterpreterGenerator::count_bytecode() { 1909 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 1910 } 1911 1912 1913 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1914 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 1915 } 1916 1917 1918 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1919 AddressLiteral index (&BytecodePairHistogram::_index); 1920 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 1921 1922 // get index, shift out old bytecode, bring in new bytecode, and store it 1923 // _index = (_index >> log2_number_of_codes) | 1924 // (bytecode << log2_number_of_codes); 1925 1926 __ load_contents(index, G4_scratch); 1927 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 1928 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 1929 __ or3( G3_scratch, G4_scratch, G4_scratch ); 1930 __ store_contents(G4_scratch, index, G3_scratch); 1931 1932 // bump bucket contents 1933 // _counters[_index] ++; 1934 1935 __ set(counters, G3_scratch); // loads into G3_scratch 1936 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 1937 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 1938 __ ld (G3_scratch, 0, G4_scratch); 1939 __ inc (G4_scratch); 1940 __ st (G4_scratch, 0, G3_scratch); 1941 } 1942 1943 1944 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1945 // Call a little run-time stub to avoid blow-up for each bytecode. 1946 // The run-time runtime saves the right registers, depending on 1947 // the tosca in-state for the given template. 1948 address entry = Interpreter::trace_code(t->tos_in()); 1949 guarantee(entry != NULL, "entry must have been generated"); 1950 __ call(entry, relocInfo::none); 1951 __ delayed()->nop(); 1952 } 1953 1954 1955 void TemplateInterpreterGenerator::stop_interpreter_at() { 1956 AddressLiteral counter(&BytecodeCounter::_counter_value); 1957 __ load_contents(counter, G3_scratch); 1958 AddressLiteral stop_at(&StopInterpreterAt); 1959 __ load_ptr_contents(stop_at, G4_scratch); 1960 __ cmp(G3_scratch, G4_scratch); 1961 __ breakpoint_trap(Assembler::equal, Assembler::icc); 1962 } 1963 #endif // not PRODUCT 1964 #endif // !CC_INTERP