1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #ifndef FAST_DISPATCH 51 #define FAST_DISPATCH 1 52 #endif 53 #undef FAST_DISPATCH 54 55 56 // Generation of Interpreter 57 // 58 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code. 59 60 61 #define __ _masm-> 62 63 64 //---------------------------------------------------------------------------------------------------- 65 66 67 void TemplateInterpreterGenerator::save_native_result(void) { 68 // result potentially in O0/O1: save it across calls 69 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 70 71 // result potentially in F0/F1: save it across calls 72 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 73 74 // save and restore any potential method result value around the unlocking operation 75 __ stf(FloatRegisterImpl::D, F0, d_tmp); 76 #ifdef _LP64 77 __ stx(O0, l_tmp); 78 #else 79 __ std(O0, l_tmp); 80 #endif 81 } 82 83 void TemplateInterpreterGenerator::restore_native_result(void) { 84 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; 85 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; 86 87 // Restore any method result value 88 __ ldf(FloatRegisterImpl::D, d_tmp, F0); 89 #ifdef _LP64 90 __ ldx(l_tmp, O0); 91 #else 92 __ ldd(l_tmp, O0); 93 #endif 94 } 95 96 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 97 assert(!pass_oop || message == NULL, "either oop or message but not both"); 98 address entry = __ pc(); 99 // expression stack must be empty before entering the VM if an exception happened 100 __ empty_expression_stack(); 101 // load exception object 102 __ set((intptr_t)name, G3_scratch); 103 if (pass_oop) { 104 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); 105 } else { 106 __ set((intptr_t)message, G4_scratch); 107 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 108 } 109 // throw exception 110 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 111 AddressLiteral thrower(Interpreter::throw_exception_entry()); 112 __ jump_to(thrower, G3_scratch); 113 __ delayed()->nop(); 114 return entry; 115 } 116 117 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 118 address entry = __ pc(); 119 // expression stack must be empty before entering the VM if an exception 120 // happened 121 __ empty_expression_stack(); 122 // load exception object 123 __ call_VM(Oexception, 124 CAST_FROM_FN_PTR(address, 125 InterpreterRuntime::throw_ClassCastException), 126 Otos_i); 127 __ should_not_reach_here(); 128 return entry; 129 } 130 131 132 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 133 address entry = __ pc(); 134 // expression stack must be empty before entering the VM if an exception happened 135 __ empty_expression_stack(); 136 // convention: expect aberrant index in register G3_scratch, then shuffle the 137 // index to G4_scratch for the VM call 138 __ mov(G3_scratch, G4_scratch); 139 __ set((intptr_t)name, G3_scratch); 140 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); 141 __ should_not_reach_here(); 142 return entry; 143 } 144 145 146 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 147 address entry = __ pc(); 148 // expression stack must be empty before entering the VM if an exception happened 149 __ empty_expression_stack(); 150 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 151 __ should_not_reach_here(); 152 return entry; 153 } 154 155 156 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 157 address entry = __ pc(); 158 159 if (state == atos) { 160 __ profile_return_type(O0, G3_scratch, G1_scratch); 161 } 162 163 #if !defined(_LP64) && defined(COMPILER2) 164 // All return values are where we want them, except for Longs. C2 returns 165 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. 166 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit 167 // build even if we are returning from interpreted we just do a little 168 // stupid shuffing. 169 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to 170 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node 171 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. 172 173 if (state == ltos) { 174 __ srl (G1, 0, O1); 175 __ srlx(G1, 32, O0); 176 } 177 #endif // !_LP64 && COMPILER2 178 179 // The callee returns with the stack possibly adjusted by adapter transition 180 // We remove that possible adjustment here. 181 // All interpreter local registers are untouched. Any result is passed back 182 // in the O0/O1 or float registers. Before continuing, the arguments must be 183 // popped from the java expression stack; i.e., Lesp must be adjusted. 184 185 __ mov(Llast_SP, SP); // Remove any adapter added stack space. 186 187 const Register cache = G3_scratch; 188 const Register index = G1_scratch; 189 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 190 191 const Register flags = cache; 192 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); 193 const Register parameter_size = flags; 194 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words 195 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes 196 __ add(Lesp, parameter_size, Lesp); // pop arguments 197 __ dispatch_next(state, step); 198 199 return entry; 200 } 201 202 203 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 204 address entry = __ pc(); 205 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 206 #if INCLUDE_JVMCI 207 // Check if we need to take lock at entry of synchronized method. 208 if (UseJVMCICompiler) { 209 Label L; 210 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset()); 211 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter 212 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L); 213 // Clear flag. 214 __ stbool(G0, pending_monitor_enter_addr); 215 // Take lock. 216 lock_method(); 217 __ bind(L); 218 } 219 #endif 220 { Label L; 221 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 222 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 223 __ br_null_short(Gtemp, Assembler::pt, L); 224 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 225 __ should_not_reach_here(); 226 __ bind(L); 227 } 228 __ dispatch_next(state, step); 229 return entry; 230 } 231 232 // A result handler converts/unboxes a native call result into 233 // a java interpreter/compiler result. The current frame is an 234 // interpreter frame. The activation frame unwind code must be 235 // consistent with that of TemplateTable::_return(...). In the 236 // case of native methods, the caller's SP was not modified. 237 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 238 address entry = __ pc(); 239 Register Itos_i = Otos_i ->after_save(); 240 Register Itos_l = Otos_l ->after_save(); 241 Register Itos_l1 = Otos_l1->after_save(); 242 Register Itos_l2 = Otos_l2->after_save(); 243 switch (type) { 244 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false 245 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! 246 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; 247 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; 248 case T_LONG : 249 #ifndef _LP64 250 __ mov(O1, Itos_l2); // move other half of long 251 #endif // ifdef or no ifdef, fall through to the T_INT case 252 case T_INT : __ mov(O0, Itos_i); break; 253 case T_VOID : /* nothing to do */ break; 254 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; 255 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; 256 case T_OBJECT : 257 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); 258 __ verify_oop(Itos_i); 259 break; 260 default : ShouldNotReachHere(); 261 } 262 __ ret(); // return from interpreter activation 263 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame 264 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly 265 return entry; 266 } 267 268 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 269 address entry = __ pc(); 270 __ push(state); 271 __ call_VM(noreg, runtime_entry); 272 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); 273 return entry; 274 } 275 276 277 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 278 address entry = __ pc(); 279 __ dispatch_next(state); 280 return entry; 281 } 282 283 // 284 // Helpers for commoning out cases in the various type of method entries. 285 // 286 287 // increment invocation count & check for overflow 288 // 289 // Note: checking for negative value instead of overflow 290 // so we have a 'sticky' overflow test 291 // 292 // Lmethod: method 293 // ??: invocation counter 294 // 295 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 296 // Note: In tiered we increment either counters in MethodCounters* or in 297 // MDO depending if we're profiling or not. 298 const Register G3_method_counters = G3_scratch; 299 Label done; 300 301 if (TieredCompilation) { 302 const int increment = InvocationCounter::count_increment; 303 Label no_mdo; 304 if (ProfileInterpreter) { 305 // If no method data exists, go to profile_continue. 306 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 307 __ br_null_short(G4_scratch, Assembler::pn, no_mdo); 308 // Increment counter 309 Address mdo_invocation_counter(G4_scratch, 310 in_bytes(MethodData::invocation_counter_offset()) + 311 in_bytes(InvocationCounter::counter_offset())); 312 Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset())); 313 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 314 G3_scratch, Lscratch, 315 Assembler::zero, overflow); 316 __ ba_short(done); 317 } 318 319 // Increment counter in MethodCounters* 320 __ bind(no_mdo); 321 Address invocation_counter(G3_method_counters, 322 in_bytes(MethodCounters::invocation_counter_offset()) + 323 in_bytes(InvocationCounter::counter_offset())); 324 __ get_method_counters(Lmethod, G3_method_counters, done); 325 Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset())); 326 __ increment_mask_and_jump(invocation_counter, increment, mask, 327 G4_scratch, Lscratch, 328 Assembler::zero, overflow); 329 __ bind(done); 330 } else { // not TieredCompilation 331 // Update standard invocation counters 332 __ get_method_counters(Lmethod, G3_method_counters, done); 333 __ increment_invocation_counter(G3_method_counters, O0, G4_scratch); 334 if (ProfileInterpreter) { 335 Address interpreter_invocation_counter(G3_method_counters, 336 in_bytes(MethodCounters::interpreter_invocation_counter_offset())); 337 __ ld(interpreter_invocation_counter, G4_scratch); 338 __ inc(G4_scratch); 339 __ st(G4_scratch, interpreter_invocation_counter); 340 } 341 342 if (ProfileInterpreter && profile_method != NULL) { 343 // Test to see if we should create a method data oop 344 Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 345 __ ld(profile_limit, G1_scratch); 346 __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); 347 348 // if no method data exists, go to profile_method 349 __ test_method_data_pointer(*profile_method); 350 } 351 352 Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 353 __ ld(invocation_limit, G3_scratch); 354 __ cmp(O0, G3_scratch); 355 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance 356 __ delayed()->nop(); 357 __ bind(done); 358 } 359 360 } 361 362 // Allocate monitor and lock method (asm interpreter) 363 // ebx - Method* 364 // 365 void TemplateInterpreterGenerator::lock_method() { 366 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags. 367 368 #ifdef ASSERT 369 { Label ok; 370 __ btst(JVM_ACC_SYNCHRONIZED, O0); 371 __ br( Assembler::notZero, false, Assembler::pt, ok); 372 __ delayed()->nop(); 373 __ stop("method doesn't need synchronization"); 374 __ bind(ok); 375 } 376 #endif // ASSERT 377 378 // get synchronization object to O0 379 { Label done; 380 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 381 __ btst(JVM_ACC_STATIC, O0); 382 __ br( Assembler::zero, true, Assembler::pt, done); 383 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case 384 385 __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0); 386 __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0); 387 __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0); 388 389 // lock the mirror, not the Klass* 390 __ ld_ptr( O0, mirror_offset, O0); 391 392 #ifdef ASSERT 393 __ tst(O0); 394 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 395 #endif // ASSERT 396 397 __ bind(done); 398 } 399 400 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem 401 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object 402 // __ untested("lock_object from method entry"); 403 __ lock_object(Lmonitors, O0); 404 } 405 406 407 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 408 Register Rscratch, 409 Register Rscratch2) { 410 const int page_size = os::vm_page_size(); 411 Label after_frame_check; 412 413 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 414 415 __ set(page_size, Rscratch); 416 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); 417 418 // get the stack base, and in debug, verify it is non-zero 419 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); 420 #ifdef ASSERT 421 Label base_not_zero; 422 __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero); 423 __ stop("stack base is zero in generate_stack_overflow_check"); 424 __ bind(base_not_zero); 425 #endif 426 427 // get the stack size, and in debug, verify it is non-zero 428 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 429 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); 430 #ifdef ASSERT 431 Label size_not_zero; 432 __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero); 433 __ stop("stack size is zero in generate_stack_overflow_check"); 434 __ bind(size_not_zero); 435 #endif 436 437 // compute the beginning of the protected zone minus the requested frame size 438 __ sub( Rscratch, Rscratch2, Rscratch ); 439 __ set( JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size(), Rscratch2 ); 440 __ add( Rscratch, Rscratch2, Rscratch ); 441 442 // Add in the size of the frame (which is the same as subtracting it from the 443 // SP, which would take another register 444 __ add( Rscratch, Rframe_size, Rscratch ); 445 446 // the frame is greater than one page in size, so check against 447 // the bottom of the stack 448 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check); 449 450 // the stack will overflow, throw an exception 451 452 // Note that SP is restored to sender's sp (in the delay slot). This 453 // is necessary if the sender's frame is an extended compiled frame 454 // (see gen_c2i_adapter()) and safer anyway in case of JSR292 455 // adaptations. 456 457 // Note also that the restored frame is not necessarily interpreted. 458 // Use the shared runtime version of the StackOverflowError. 459 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 460 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); 461 __ jump_to(stub, Rscratch); 462 __ delayed()->mov(O5_savedSP, SP); 463 464 // if you get to here, then there is enough stack space 465 __ bind( after_frame_check ); 466 } 467 468 469 // 470 // Generate a fixed interpreter frame. This is identical setup for interpreted 471 // methods and for native methods hence the shared code. 472 473 474 //---------------------------------------------------------------------------------------------------- 475 // Stack frame layout 476 // 477 // When control flow reaches any of the entry types for the interpreter 478 // the following holds -> 479 // 480 // C2 Calling Conventions: 481 // 482 // The entry code below assumes that the following registers are set 483 // when coming in: 484 // G5_method: holds the Method* of the method to call 485 // Lesp: points to the TOS of the callers expression stack 486 // after having pushed all the parameters 487 // 488 // The entry code does the following to setup an interpreter frame 489 // pop parameters from the callers stack by adjusting Lesp 490 // set O0 to Lesp 491 // compute X = (max_locals - num_parameters) 492 // bump SP up by X to accomadate the extra locals 493 // compute X = max_expression_stack 494 // + vm_local_words 495 // + 16 words of register save area 496 // save frame doing a save sp, -X, sp growing towards lower addresses 497 // set Lbcp, Lmethod, LcpoolCache 498 // set Llocals to i0 499 // set Lmonitors to FP - rounded_vm_local_words 500 // set Lesp to Lmonitors - 4 501 // 502 // The frame has now been setup to do the rest of the entry code 503 504 // Try this optimization: Most method entries could live in a 505 // "one size fits all" stack frame without all the dynamic size 506 // calculations. It might be profitable to do all this calculation 507 // statically and approximately for "small enough" methods. 508 509 //----------------------------------------------------------------------------------------------- 510 511 // C1 Calling conventions 512 // 513 // Upon method entry, the following registers are setup: 514 // 515 // g2 G2_thread: current thread 516 // g5 G5_method: method to activate 517 // g4 Gargs : pointer to last argument 518 // 519 // 520 // Stack: 521 // 522 // +---------------+ <--- sp 523 // | | 524 // : reg save area : 525 // | | 526 // +---------------+ <--- sp + 0x40 527 // | | 528 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 529 // | | 530 // +---------------+ <--- sp + 0x5c 531 // | | 532 // : free : 533 // | | 534 // +---------------+ <--- Gargs 535 // | | 536 // : arguments : 537 // | | 538 // +---------------+ 539 // | | 540 // 541 // 542 // 543 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: 544 // 545 // +---------------+ <--- sp 546 // | | 547 // : reg save area : 548 // | | 549 // +---------------+ <--- sp + 0x40 550 // | | 551 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 552 // | | 553 // +---------------+ <--- sp + 0x5c 554 // | | 555 // : : 556 // | | <--- Lesp 557 // +---------------+ <--- Lmonitors (fp - 0x18) 558 // | VM locals | 559 // +---------------+ <--- fp 560 // | | 561 // : reg save area : 562 // | | 563 // +---------------+ <--- fp + 0x40 564 // | | 565 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) 566 // | | 567 // +---------------+ <--- fp + 0x5c 568 // | | 569 // : free : 570 // | | 571 // +---------------+ 572 // | | 573 // : nonarg locals : 574 // | | 575 // +---------------+ 576 // | | 577 // : arguments : 578 // | | <--- Llocals 579 // +---------------+ <--- Gargs 580 // | | 581 582 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 583 // 584 // 585 // The entry code sets up a new interpreter frame in 4 steps: 586 // 587 // 1) Increase caller's SP by for the extra local space needed: 588 // (check for overflow) 589 // Efficient implementation of xload/xstore bytecodes requires 590 // that arguments and non-argument locals are in a contigously 591 // addressable memory block => non-argument locals must be 592 // allocated in the caller's frame. 593 // 594 // 2) Create a new stack frame and register window: 595 // The new stack frame must provide space for the standard 596 // register save area, the maximum java expression stack size, 597 // the monitor slots (0 slots initially), and some frame local 598 // scratch locations. 599 // 600 // 3) The following interpreter activation registers must be setup: 601 // Lesp : expression stack pointer 602 // Lbcp : bytecode pointer 603 // Lmethod : method 604 // Llocals : locals pointer 605 // Lmonitors : monitor pointer 606 // LcpoolCache: constant pool cache 607 // 608 // 4) Initialize the non-argument locals if necessary: 609 // Non-argument locals may need to be initialized to NULL 610 // for GC to work. If the oop-map information is accurate 611 // (in the absence of the JSR problem), no initialization 612 // is necessary. 613 // 614 // (gri - 2/25/2000) 615 616 617 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 618 619 const int extra_space = 620 rounded_vm_local_words + // frame local scratch space 621 Method::extra_stack_entries() + // extra stack for jsr 292 622 frame::memory_parameter_word_sp_offset + // register save area 623 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); 624 625 const Register Glocals_size = G3; 626 const Register RconstMethod = Glocals_size; 627 const Register Otmp1 = O3; 628 const Register Otmp2 = O4; 629 // Lscratch can't be used as a temporary because the call_stub uses 630 // it to assert that the stack frame was setup correctly. 631 const Address constMethod (G5_method, Method::const_offset()); 632 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 633 634 __ ld_ptr( constMethod, RconstMethod ); 635 __ lduh( size_of_parameters, Glocals_size); 636 637 // Gargs points to first local + BytesPerWord 638 // Set the saved SP after the register window save 639 // 640 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); 641 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1); 642 __ add(Gargs, Otmp1, Gargs); 643 644 if (native_call) { 645 __ calc_mem_param_words( Glocals_size, Gframe_size ); 646 __ add( Gframe_size, extra_space, Gframe_size); 647 __ round_to( Gframe_size, WordsPerLong ); 648 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); 649 } else { 650 651 // 652 // Compute number of locals in method apart from incoming parameters 653 // 654 const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset()); 655 __ ld_ptr( constMethod, Otmp1 ); 656 __ lduh( size_of_locals, Otmp1 ); 657 __ sub( Otmp1, Glocals_size, Glocals_size ); 658 __ round_to( Glocals_size, WordsPerLong ); 659 __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size ); 660 661 // see if the frame is greater than one page in size. If so, 662 // then we need to verify there is enough stack space remaining 663 // Frame_size = (max_stack + extra_space) * BytesPerWord; 664 __ ld_ptr( constMethod, Gframe_size ); 665 __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size ); 666 __ add( Gframe_size, extra_space, Gframe_size ); 667 __ round_to( Gframe_size, WordsPerLong ); 668 __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size); 669 670 // Add in java locals size for stack overflow check only 671 __ add( Gframe_size, Glocals_size, Gframe_size ); 672 673 const Register Otmp2 = O4; 674 assert_different_registers(Otmp1, Otmp2, O5_savedSP); 675 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); 676 677 __ sub( Gframe_size, Glocals_size, Gframe_size); 678 679 // 680 // bump SP to accomodate the extra locals 681 // 682 __ sub( SP, Glocals_size, SP ); 683 } 684 685 // 686 // now set up a stack frame with the size computed above 687 // 688 __ neg( Gframe_size ); 689 __ save( SP, Gframe_size, SP ); 690 691 // 692 // now set up all the local cache registers 693 // 694 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note 695 // that all present references to Lbyte_code initialize the register 696 // immediately before use 697 if (native_call) { 698 __ mov(G0, Lbcp); 699 } else { 700 __ ld_ptr(G5_method, Method::const_offset(), Lbcp); 701 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp); 702 } 703 __ mov( G5_method, Lmethod); // set Lmethod 704 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 705 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 706 #ifdef _LP64 707 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias 708 #endif 709 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp 710 711 // setup interpreter activation registers 712 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals 713 714 if (ProfileInterpreter) { 715 #ifdef FAST_DISPATCH 716 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 717 // they both use I2. 718 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 719 #endif // FAST_DISPATCH 720 __ set_method_data_pointer(); 721 } 722 723 } 724 725 // Method entry for java.lang.ref.Reference.get. 726 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 727 #if INCLUDE_ALL_GCS 728 // Code: _aload_0, _getfield, _areturn 729 // parameter size = 1 730 // 731 // The code that gets generated by this routine is split into 2 parts: 732 // 1. The "intrinsified" code for G1 (or any SATB based GC), 733 // 2. The slow path - which is an expansion of the regular method entry. 734 // 735 // Notes:- 736 // * In the G1 code we do not check whether we need to block for 737 // a safepoint. If G1 is enabled then we must execute the specialized 738 // code for Reference.get (except when the Reference object is null) 739 // so that we can log the value in the referent field with an SATB 740 // update buffer. 741 // If the code for the getfield template is modified so that the 742 // G1 pre-barrier code is executed when the current method is 743 // Reference.get() then going through the normal method entry 744 // will be fine. 745 // * The G1 code can, however, check the receiver object (the instance 746 // of java.lang.Reference) and jump to the slow path if null. If the 747 // Reference object is null then we obviously cannot fetch the referent 748 // and so we don't need to call the G1 pre-barrier. Thus we can use the 749 // regular method entry code to generate the NPE. 750 // 751 // This code is based on generate_accessor_enty. 752 753 address entry = __ pc(); 754 755 const int referent_offset = java_lang_ref_Reference::referent_offset; 756 guarantee(referent_offset > 0, "referent offset not initialized"); 757 758 if (UseG1GC) { 759 Label slow_path; 760 761 // In the G1 code we don't check if we need to reach a safepoint. We 762 // continue and the thread will safepoint at the next bytecode dispatch. 763 764 // Check if local 0 != NULL 765 // If the receiver is null then it is OK to jump to the slow path. 766 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 767 // check if local 0 == NULL and go the slow path 768 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); 769 770 771 // Load the value of the referent field. 772 if (Assembler::is_simm13(referent_offset)) { 773 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 774 } else { 775 __ set(referent_offset, G3_scratch); 776 __ load_heap_oop(Otos_i, G3_scratch, Otos_i); 777 } 778 779 // Generate the G1 pre-barrier code to log the value of 780 // the referent field in an SATB buffer. Note with 781 // these parameters the pre-barrier does not generate 782 // the load of the previous value 783 784 __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, 785 Otos_i /* pre_val */, 786 G3_scratch /* tmp */, 787 true /* preserve_o_regs */); 788 789 // _areturn 790 __ retl(); // return from leaf routine 791 __ delayed()->mov(O5_savedSP, SP); 792 793 // Generate regular method entry 794 __ bind(slow_path); 795 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 796 return entry; 797 } 798 #endif // INCLUDE_ALL_GCS 799 800 // If G1 is not enabled then attempt to go through the accessor entry point 801 // Reference.get is an accessor 802 return NULL; 803 } 804 805 /** 806 * Method entry for static native methods: 807 * int java.util.zip.CRC32.update(int crc, int b) 808 */ 809 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 810 811 if (UseCRC32Intrinsics) { 812 address entry = __ pc(); 813 814 Label L_slow_path; 815 // If we need a safepoint check, generate full interpreter entry. 816 ExternalAddress state(SafepointSynchronize::address_of_state()); 817 __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); 818 __ set(SafepointSynchronize::_not_synchronized, O3); 819 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path); 820 821 // Load parameters 822 const Register crc = O0; // initial crc 823 const Register val = O1; // byte to update with 824 const Register table = O2; // address of 256-entry lookup table 825 826 __ ldub(Gargs, 3, val); 827 __ lduw(Gargs, 8, crc); 828 829 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table); 830 831 __ not1(crc); // ~crc 832 __ clruwu(crc); 833 __ update_byte_crc32(crc, val, table); 834 __ not1(crc); // ~crc 835 836 // result in O0 837 __ retl(); 838 __ delayed()->nop(); 839 840 // generate a vanilla native entry as the slow path 841 __ bind(L_slow_path); 842 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 843 return entry; 844 } 845 return NULL; 846 } 847 848 /** 849 * Method entry for static native methods: 850 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 851 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 852 */ 853 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 854 855 if (UseCRC32Intrinsics) { 856 address entry = __ pc(); 857 858 Label L_slow_path; 859 // If we need a safepoint check, generate full interpreter entry. 860 ExternalAddress state(SafepointSynchronize::address_of_state()); 861 __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); 862 __ set(SafepointSynchronize::_not_synchronized, O3); 863 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path); 864 865 // Load parameters from the stack 866 const Register crc = O0; // initial crc 867 const Register buf = O1; // source java byte array address 868 const Register len = O2; // len 869 const Register offset = O3; // offset 870 871 // Arguments are reversed on java expression stack 872 // Calculate address of start element 873 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 874 __ lduw(Gargs, 0, len); 875 __ lduw(Gargs, 8, offset); 876 __ ldx( Gargs, 16, buf); 877 __ lduw(Gargs, 32, crc); 878 __ add(buf, offset, buf); 879 } else { 880 __ lduw(Gargs, 0, len); 881 __ lduw(Gargs, 8, offset); 882 __ ldx( Gargs, 16, buf); 883 __ lduw(Gargs, 24, crc); 884 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size 885 __ add(buf ,offset, buf); 886 } 887 888 // Call the crc32 kernel 889 __ MacroAssembler::save_thread(L7_thread_cache); 890 __ kernel_crc32(crc, buf, len, O3); 891 __ MacroAssembler::restore_thread(L7_thread_cache); 892 893 // result in O0 894 __ retl(); 895 __ delayed()->nop(); 896 897 // generate a vanilla native entry as the slow path 898 __ bind(L_slow_path); 899 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 900 return entry; 901 } 902 return NULL; 903 } 904 905 // Not supported 906 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 907 return NULL; 908 } 909 910 // Not supported 911 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 912 return NULL; 913 } 914 // 915 // Interpreter stub for calling a native method. (asm interpreter) 916 // This sets up a somewhat different looking stack for calling the native method 917 // than the typical interpreter frame setup. 918 // 919 920 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 921 address entry = __ pc(); 922 923 // the following temporary registers are used during frame creation 924 const Register Gtmp1 = G3_scratch ; 925 const Register Gtmp2 = G1_scratch; 926 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 927 928 // make sure registers are different! 929 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 930 931 const Address Laccess_flags(Lmethod, Method::access_flags_offset()); 932 933 const Register Glocals_size = G3; 934 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 935 936 // make sure method is native & not abstract 937 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 938 #ifdef ASSERT 939 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 940 { 941 Label L; 942 __ btst(JVM_ACC_NATIVE, Gtmp1); 943 __ br(Assembler::notZero, false, Assembler::pt, L); 944 __ delayed()->nop(); 945 __ stop("tried to execute non-native method as native"); 946 __ bind(L); 947 } 948 { Label L; 949 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 950 __ br(Assembler::zero, false, Assembler::pt, L); 951 __ delayed()->nop(); 952 __ stop("tried to execute abstract method as non-abstract"); 953 __ bind(L); 954 } 955 #endif // ASSERT 956 957 // generate the code to allocate the interpreter stack frame 958 generate_fixed_frame(true); 959 960 // 961 // No locals to initialize for native method 962 // 963 964 // this slot will be set later, we initialize it to null here just in 965 // case we get a GC before the actual value is stored later 966 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); 967 968 const Address do_not_unlock_if_synchronized(G2_thread, 969 JavaThread::do_not_unlock_if_synchronized_offset()); 970 // Since at this point in the method invocation the exception handler 971 // would try to exit the monitor of synchronized methods which hasn't 972 // been entered yet, we set the thread local variable 973 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 974 // runtime, exception handling i.e. unlock_if_synchronized_method will 975 // check this thread local flag. 976 // This flag has two effects, one is to force an unwind in the topmost 977 // interpreter frame and not perform an unlock while doing so. 978 979 __ movbool(true, G3_scratch); 980 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 981 982 // increment invocation counter and check for overflow 983 // 984 // Note: checking for negative value instead of overflow 985 // so we have a 'sticky' overflow test (may be of 986 // importance as soon as we have true MT/MP) 987 Label invocation_counter_overflow; 988 Label Lcontinue; 989 if (inc_counter) { 990 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 991 992 } 993 __ bind(Lcontinue); 994 995 bang_stack_shadow_pages(true); 996 997 // reset the _do_not_unlock_if_synchronized flag 998 __ stbool(G0, do_not_unlock_if_synchronized); 999 1000 // check for synchronized methods 1001 // Must happen AFTER invocation_counter check and stack overflow check, 1002 // so method is not locked if overflows. 1003 1004 if (synchronized) { 1005 lock_method(); 1006 } else { 1007 #ifdef ASSERT 1008 { Label ok; 1009 __ ld(Laccess_flags, O0); 1010 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1011 __ br( Assembler::zero, false, Assembler::pt, ok); 1012 __ delayed()->nop(); 1013 __ stop("method needs synchronization"); 1014 __ bind(ok); 1015 } 1016 #endif // ASSERT 1017 } 1018 1019 1020 // start execution 1021 __ verify_thread(); 1022 1023 // JVMTI support 1024 __ notify_method_entry(); 1025 1026 // native call 1027 1028 // (note that O0 is never an oop--at most it is a handle) 1029 // It is important not to smash any handles created by this call, 1030 // until any oop handle in O0 is dereferenced. 1031 1032 // (note that the space for outgoing params is preallocated) 1033 1034 // get signature handler 1035 { Label L; 1036 Address signature_handler(Lmethod, Method::signature_handler_offset()); 1037 __ ld_ptr(signature_handler, G3_scratch); 1038 __ br_notnull_short(G3_scratch, Assembler::pt, L); 1039 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 1040 __ ld_ptr(signature_handler, G3_scratch); 1041 __ bind(L); 1042 } 1043 1044 // Push a new frame so that the args will really be stored in 1045 // Copy a few locals across so the new frame has the variables 1046 // we need but these values will be dead at the jni call and 1047 // therefore not gc volatile like the values in the current 1048 // frame (Lmethod in particular) 1049 1050 // Flush the method pointer to the register save area 1051 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 1052 __ mov(Llocals, O1); 1053 1054 // calculate where the mirror handle body is allocated in the interpreter frame: 1055 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); 1056 1057 // Calculate current frame size 1058 __ sub(SP, FP, O3); // Calculate negative of current frame size 1059 __ save(SP, O3, SP); // Allocate an identical sized frame 1060 1061 // Note I7 has leftover trash. Slow signature handler will fill it in 1062 // should we get there. Normal jni call will set reasonable last_Java_pc 1063 // below (and fix I7 so the stack trace doesn't have a meaningless frame 1064 // in it). 1065 1066 // Load interpreter frame's Lmethod into same register here 1067 1068 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1069 1070 __ mov(I1, Llocals); 1071 __ mov(I2, Lscratch2); // save the address of the mirror 1072 1073 1074 // ONLY Lmethod and Llocals are valid here! 1075 1076 // call signature handler, It will move the arg properly since Llocals in current frame 1077 // matches that in outer frame 1078 1079 __ callr(G3_scratch, 0); 1080 __ delayed()->nop(); 1081 1082 // Result handler is in Lscratch 1083 1084 // Reload interpreter frame's Lmethod since slow signature handler may block 1085 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); 1086 1087 { Label not_static; 1088 1089 __ ld(Laccess_flags, O0); 1090 __ btst(JVM_ACC_STATIC, O0); 1091 __ br( Assembler::zero, false, Assembler::pt, not_static); 1092 // get native function entry point(O0 is a good temp until the very end) 1093 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0); 1094 // for static methods insert the mirror argument 1095 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1096 1097 __ ld_ptr(Lmethod, Method:: const_offset(), O1); 1098 __ ld_ptr(O1, ConstMethod::constants_offset(), O1); 1099 __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1); 1100 __ ld_ptr(O1, mirror_offset, O1); 1101 #ifdef ASSERT 1102 if (!PrintSignatureHandlers) // do not dirty the output with this 1103 { Label L; 1104 __ br_notnull_short(O1, Assembler::pt, L); 1105 __ stop("mirror is missing"); 1106 __ bind(L); 1107 } 1108 #endif // ASSERT 1109 __ st_ptr(O1, Lscratch2, 0); 1110 __ mov(Lscratch2, O1); 1111 __ bind(not_static); 1112 } 1113 1114 // At this point, arguments have been copied off of stack into 1115 // their JNI positions, which are O1..O5 and SP[68..]. 1116 // Oops are boxed in-place on the stack, with handles copied to arguments. 1117 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1118 1119 #ifdef ASSERT 1120 { Label L; 1121 __ br_notnull_short(O0, Assembler::pt, L); 1122 __ stop("native entry point is missing"); 1123 __ bind(L); 1124 } 1125 #endif // ASSERT 1126 1127 // 1128 // setup the frame anchor 1129 // 1130 // The scavenge function only needs to know that the PC of this frame is 1131 // in the interpreter method entry code, it doesn't need to know the exact 1132 // PC and hence we can use O7 which points to the return address from the 1133 // previous call in the code stream (signature handler function) 1134 // 1135 // The other trick is we set last_Java_sp to FP instead of the usual SP because 1136 // we have pushed the extra frame in order to protect the volatile register(s) 1137 // in that frame when we return from the jni call 1138 // 1139 1140 __ set_last_Java_frame(FP, O7); 1141 __ mov(O7, I7); // make dummy interpreter frame look like one above, 1142 // not meaningless information that'll confuse me. 1143 1144 // flush the windows now. We don't care about the current (protection) frame 1145 // only the outer frames 1146 1147 __ flushw(); 1148 1149 // mark windows as flushed 1150 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 1151 __ set(JavaFrameAnchor::flushed, G3_scratch); 1152 __ st(G3_scratch, flags); 1153 1154 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 1155 1156 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1157 #ifdef ASSERT 1158 { Label L; 1159 __ ld(thread_state, G3_scratch); 1160 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); 1161 __ stop("Wrong thread state in native stub"); 1162 __ bind(L); 1163 } 1164 #endif // ASSERT 1165 __ set(_thread_in_native, G3_scratch); 1166 __ st(G3_scratch, thread_state); 1167 1168 // Call the jni method, using the delay slot to set the JNIEnv* argument. 1169 __ save_thread(L7_thread_cache); // save Gthread 1170 __ callr(O0, 0); 1171 __ delayed()-> 1172 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); 1173 1174 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD 1175 1176 __ restore_thread(L7_thread_cache); // restore G2_thread 1177 __ reinit_heapbase(); 1178 1179 // must we block? 1180 1181 // Block, if necessary, before resuming in _thread_in_Java state. 1182 // In order for GC to work, don't clear the last_Java_sp until after blocking. 1183 { Label no_block; 1184 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 1185 1186 // Switch thread to "native transition" state before reading the synchronization state. 1187 // This additional state is necessary because reading and testing the synchronization 1188 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1189 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1190 // VM thread changes sync state to synchronizing and suspends threads for GC. 1191 // Thread A is resumed to finish this native method, but doesn't block here since it 1192 // didn't see any synchronization is progress, and escapes. 1193 __ set(_thread_in_native_trans, G3_scratch); 1194 __ st(G3_scratch, thread_state); 1195 if(os::is_MP()) { 1196 if (UseMembar) { 1197 // Force this write out before the read below 1198 __ membar(Assembler::StoreLoad); 1199 } else { 1200 // Write serialization page so VM thread can do a pseudo remote membar. 1201 // We use the current thread pointer to calculate a thread specific 1202 // offset to write to within the page. This minimizes bus traffic 1203 // due to cache line collision. 1204 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 1205 } 1206 } 1207 __ load_contents(sync_state, G3_scratch); 1208 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1209 1210 Label L; 1211 __ br(Assembler::notEqual, false, Assembler::pn, L); 1212 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1213 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 1214 __ bind(L); 1215 1216 // Block. Save any potential method result value before the operation and 1217 // use a leaf call to leave the last_Java_frame setup undisturbed. 1218 save_native_result(); 1219 __ call_VM_leaf(L7_thread_cache, 1220 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1221 G2_thread); 1222 1223 // Restore any method result value 1224 restore_native_result(); 1225 __ bind(no_block); 1226 } 1227 1228 // Clear the frame anchor now 1229 1230 __ reset_last_Java_frame(); 1231 1232 // Move the result handler address 1233 __ mov(Lscratch, G3_scratch); 1234 // return possible result to the outer frame 1235 #ifndef __LP64 1236 __ mov(O0, I0); 1237 __ restore(O1, G0, O1); 1238 #else 1239 __ restore(O0, G0, O0); 1240 #endif /* __LP64 */ 1241 1242 // Move result handler to expected register 1243 __ mov(G3_scratch, Lscratch); 1244 1245 // Back in normal (native) interpreter frame. State is thread_in_native_trans 1246 // switch to thread_in_Java. 1247 1248 __ set(_thread_in_Java, G3_scratch); 1249 __ st(G3_scratch, thread_state); 1250 1251 // reset handle block 1252 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); 1253 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1254 1255 // If we have an oop result store it where it will be safe for any further gc 1256 // until we return now that we've released the handle it might be protected by 1257 1258 { 1259 Label no_oop, store_result; 1260 1261 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1262 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); 1263 __ addcc(G0, O0, O0); 1264 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: 1265 __ delayed()->ld_ptr(O0, 0, O0); // unbox it 1266 __ mov(G0, O0); 1267 1268 __ bind(store_result); 1269 // Store it where gc will look for it and result handler expects it. 1270 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); 1271 1272 __ bind(no_oop); 1273 1274 } 1275 1276 1277 // handle exceptions (exception handling will handle unlocking!) 1278 { Label L; 1279 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1280 __ ld_ptr(exception_addr, Gtemp); 1281 __ br_null_short(Gtemp, Assembler::pt, L); 1282 // Note: This could be handled more efficiently since we know that the native 1283 // method doesn't have an exception handler. We could directly return 1284 // to the exception handler for the caller. 1285 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1286 __ should_not_reach_here(); 1287 __ bind(L); 1288 } 1289 1290 // JVMTI support (preserves thread register) 1291 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); 1292 1293 if (synchronized) { 1294 // save and restore any potential method result value around the unlocking operation 1295 save_native_result(); 1296 1297 __ add( __ top_most_monitor(), O1); 1298 __ unlock_object(O1); 1299 1300 restore_native_result(); 1301 } 1302 1303 #if defined(COMPILER2) && !defined(_LP64) 1304 1305 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1306 // or compiled so just be safe. 1307 1308 __ sllx(O0, 32, G1); // Shift bits into high G1 1309 __ srl (O1, 0, O1); // Zero extend O1 1310 __ or3 (O1, G1, G1); // OR 64 bits into G1 1311 1312 #endif /* COMPILER2 && !_LP64 */ 1313 1314 // dispose of return address and remove activation 1315 #ifdef ASSERT 1316 { 1317 Label ok; 1318 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); 1319 __ stop("bad I5_savedSP value"); 1320 __ should_not_reach_here(); 1321 __ bind(ok); 1322 } 1323 #endif 1324 if (TraceJumps) { 1325 // Move target to register that is recordable 1326 __ mov(Lscratch, G3_scratch); 1327 __ JMP(G3_scratch, 0); 1328 } else { 1329 __ jmp(Lscratch, 0); 1330 } 1331 __ delayed()->nop(); 1332 1333 1334 if (inc_counter) { 1335 // handle invocation counter overflow 1336 __ bind(invocation_counter_overflow); 1337 generate_counter_overflow(Lcontinue); 1338 } 1339 1340 1341 1342 return entry; 1343 } 1344 1345 1346 // Generic method entry to (asm) interpreter 1347 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1348 address entry = __ pc(); 1349 1350 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1351 1352 // the following temporary registers are used during frame creation 1353 const Register Gtmp1 = G3_scratch ; 1354 const Register Gtmp2 = G1_scratch; 1355 1356 // make sure registers are different! 1357 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1358 1359 const Address constMethod (G5_method, Method::const_offset()); 1360 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1361 // and use in the asserts. 1362 const Address access_flags (Lmethod, Method::access_flags_offset()); 1363 1364 const Register Glocals_size = G3; 1365 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1366 1367 // make sure method is not native & not abstract 1368 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1369 #ifdef ASSERT 1370 __ ld(G5_method, Method::access_flags_offset(), Gtmp1); 1371 { 1372 Label L; 1373 __ btst(JVM_ACC_NATIVE, Gtmp1); 1374 __ br(Assembler::zero, false, Assembler::pt, L); 1375 __ delayed()->nop(); 1376 __ stop("tried to execute native method as non-native"); 1377 __ bind(L); 1378 } 1379 { Label L; 1380 __ btst(JVM_ACC_ABSTRACT, Gtmp1); 1381 __ br(Assembler::zero, false, Assembler::pt, L); 1382 __ delayed()->nop(); 1383 __ stop("tried to execute abstract method as non-abstract"); 1384 __ bind(L); 1385 } 1386 #endif // ASSERT 1387 1388 // generate the code to allocate the interpreter stack frame 1389 1390 generate_fixed_frame(false); 1391 1392 #ifdef FAST_DISPATCH 1393 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); 1394 // set bytecode dispatch table base 1395 #endif 1396 1397 // 1398 // Code to initialize the extra (i.e. non-parm) locals 1399 // 1400 Register init_value = noreg; // will be G0 if we must clear locals 1401 // The way the code was setup before zerolocals was always true for vanilla java entries. 1402 // It could only be false for the specialized entries like accessor or empty which have 1403 // no extra locals so the testing was a waste of time and the extra locals were always 1404 // initialized. We removed this extra complication to already over complicated code. 1405 1406 init_value = G0; 1407 Label clear_loop; 1408 1409 const Register RconstMethod = O1; 1410 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1411 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset()); 1412 1413 // NOTE: If you change the frame layout, this code will need to 1414 // be updated! 1415 __ ld_ptr( constMethod, RconstMethod ); 1416 __ lduh( size_of_locals, O2 ); 1417 __ lduh( size_of_parameters, O1 ); 1418 __ sll( O2, Interpreter::logStackElementSize, O2); 1419 __ sll( O1, Interpreter::logStackElementSize, O1 ); 1420 __ sub( Llocals, O2, O2 ); 1421 __ sub( Llocals, O1, O1 ); 1422 1423 __ bind( clear_loop ); 1424 __ inc( O2, wordSize ); 1425 1426 __ cmp( O2, O1 ); 1427 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1428 __ delayed()->st_ptr( init_value, O2, 0 ); 1429 1430 const Address do_not_unlock_if_synchronized(G2_thread, 1431 JavaThread::do_not_unlock_if_synchronized_offset()); 1432 // Since at this point in the method invocation the exception handler 1433 // would try to exit the monitor of synchronized methods which hasn't 1434 // been entered yet, we set the thread local variable 1435 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1436 // runtime, exception handling i.e. unlock_if_synchronized_method will 1437 // check this thread local flag. 1438 __ movbool(true, G3_scratch); 1439 __ stbool(G3_scratch, do_not_unlock_if_synchronized); 1440 1441 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch); 1442 // increment invocation counter and check for overflow 1443 // 1444 // Note: checking for negative value instead of overflow 1445 // so we have a 'sticky' overflow test (may be of 1446 // importance as soon as we have true MT/MP) 1447 Label invocation_counter_overflow; 1448 Label profile_method; 1449 Label profile_method_continue; 1450 Label Lcontinue; 1451 if (inc_counter) { 1452 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1453 if (ProfileInterpreter) { 1454 __ bind(profile_method_continue); 1455 } 1456 } 1457 __ bind(Lcontinue); 1458 1459 bang_stack_shadow_pages(false); 1460 1461 // reset the _do_not_unlock_if_synchronized flag 1462 __ stbool(G0, do_not_unlock_if_synchronized); 1463 1464 // check for synchronized methods 1465 // Must happen AFTER invocation_counter check and stack overflow check, 1466 // so method is not locked if overflows. 1467 1468 if (synchronized) { 1469 lock_method(); 1470 } else { 1471 #ifdef ASSERT 1472 { Label ok; 1473 __ ld(access_flags, O0); 1474 __ btst(JVM_ACC_SYNCHRONIZED, O0); 1475 __ br( Assembler::zero, false, Assembler::pt, ok); 1476 __ delayed()->nop(); 1477 __ stop("method needs synchronization"); 1478 __ bind(ok); 1479 } 1480 #endif // ASSERT 1481 } 1482 1483 // start execution 1484 1485 __ verify_thread(); 1486 1487 // jvmti support 1488 __ notify_method_entry(); 1489 1490 // start executing instructions 1491 __ dispatch_next(vtos); 1492 1493 1494 if (inc_counter) { 1495 if (ProfileInterpreter) { 1496 // We have decided to profile this method in the interpreter 1497 __ bind(profile_method); 1498 1499 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1500 __ set_method_data_pointer_for_bcp(); 1501 __ ba_short(profile_method_continue); 1502 } 1503 1504 // handle invocation counter overflow 1505 __ bind(invocation_counter_overflow); 1506 generate_counter_overflow(Lcontinue); 1507 } 1508 1509 1510 return entry; 1511 } 1512 1513 //---------------------------------------------------------------------------------------------------- 1514 // Exceptions 1515 void TemplateInterpreterGenerator::generate_throw_exception() { 1516 1517 // Entry point in previous activation (i.e., if the caller was interpreted) 1518 Interpreter::_rethrow_exception_entry = __ pc(); 1519 // O0: exception 1520 1521 // entry point for exceptions thrown within interpreter code 1522 Interpreter::_throw_exception_entry = __ pc(); 1523 __ verify_thread(); 1524 // expression stack is undefined here 1525 // O0: exception, i.e. Oexception 1526 // Lbcp: exception bcp 1527 __ verify_oop(Oexception); 1528 1529 1530 // expression stack must be empty before entering the VM in case of an exception 1531 __ empty_expression_stack(); 1532 // find exception handler address and preserve exception oop 1533 // call C routine to find handler and jump to it 1534 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); 1535 __ push_ptr(O1); // push exception for exception handler bytecodes 1536 1537 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) 1538 __ delayed()->nop(); 1539 1540 1541 // if the exception is not handled in the current frame 1542 // the frame is removed and the exception is rethrown 1543 // (i.e. exception continuation is _rethrow_exception) 1544 // 1545 // Note: At this point the bci is still the bxi for the instruction which caused 1546 // the exception and the expression stack is empty. Thus, for any VM calls 1547 // at this point, GC will find a legal oop map (with empty expression stack). 1548 1549 // in current activation 1550 // tos: exception 1551 // Lbcp: exception bcp 1552 1553 // 1554 // JVMTI PopFrame support 1555 // 1556 1557 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1558 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1559 // Set the popframe_processing bit in popframe_condition indicating that we are 1560 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1561 // popframe handling cycles. 1562 1563 __ ld(popframe_condition_addr, G3_scratch); 1564 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); 1565 __ stw(G3_scratch, popframe_condition_addr); 1566 1567 // Empty the expression stack, as in normal exception handling 1568 __ empty_expression_stack(); 1569 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1570 1571 { 1572 // Check to see whether we are returning to a deoptimized frame. 1573 // (The PopFrame call ensures that the caller of the popped frame is 1574 // either interpreted or compiled and deoptimizes it if compiled.) 1575 // In this case, we can't call dispatch_next() after the frame is 1576 // popped, but instead must save the incoming arguments and restore 1577 // them after deoptimization has occurred. 1578 // 1579 // Note that we don't compare the return PC against the 1580 // deoptimization blob's unpack entry because of the presence of 1581 // adapter frames in C2. 1582 Label caller_not_deoptimized; 1583 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1584 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); 1585 1586 const Register Gtmp1 = G3_scratch; 1587 const Register Gtmp2 = G1_scratch; 1588 const Register RconstMethod = Gtmp1; 1589 const Address constMethod(Lmethod, Method::const_offset()); 1590 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset()); 1591 1592 // Compute size of arguments for saving when returning to deoptimized caller 1593 __ ld_ptr(constMethod, RconstMethod); 1594 __ lduh(size_of_parameters, Gtmp1); 1595 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1); 1596 __ sub(Llocals, Gtmp1, Gtmp2); 1597 __ add(Gtmp2, wordSize, Gtmp2); 1598 // Save these arguments 1599 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1600 // Inform deoptimization that it is responsible for restoring these arguments 1601 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1602 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); 1603 __ st(Gtmp1, popframe_condition_addr); 1604 1605 // Return from the current method 1606 // The caller's SP was adjusted upon method entry to accomodate 1607 // the callee's non-argument locals. Undo that adjustment. 1608 __ ret(); 1609 __ delayed()->restore(I5_savedSP, G0, SP); 1610 1611 __ bind(caller_not_deoptimized); 1612 } 1613 1614 // Clear the popframe condition flag 1615 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); 1616 1617 // Get out of the current method (how this is done depends on the particular compiler calling 1618 // convention that the interpreter currently follows) 1619 // The caller's SP was adjusted upon method entry to accomodate 1620 // the callee's non-argument locals. Undo that adjustment. 1621 __ restore(I5_savedSP, G0, SP); 1622 // The method data pointer was incremented already during 1623 // call profiling. We have to restore the mdp for the current bcp. 1624 if (ProfileInterpreter) { 1625 __ set_method_data_pointer_for_bcp(); 1626 } 1627 1628 #if INCLUDE_JVMTI 1629 { 1630 Label L_done; 1631 1632 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode 1633 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done); 1634 1635 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1636 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1637 1638 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp); 1639 1640 __ br_null(G1_scratch, false, Assembler::pn, L_done); 1641 __ delayed()->nop(); 1642 1643 __ st_ptr(G1_scratch, Lesp, wordSize); 1644 __ bind(L_done); 1645 } 1646 #endif // INCLUDE_JVMTI 1647 1648 // Resume bytecode interpretation at the current bcp 1649 __ dispatch_next(vtos); 1650 // end of JVMTI PopFrame support 1651 1652 Interpreter::_remove_activation_entry = __ pc(); 1653 1654 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) 1655 __ pop_ptr(Oexception); // get exception 1656 1657 // Intel has the following comment: 1658 //// remove the activation (without doing throws on illegalMonitorExceptions) 1659 // They remove the activation without checking for bad monitor state. 1660 // %%% We should make sure this is the right semantics before implementing. 1661 1662 __ set_vm_result(Oexception); 1663 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); 1664 1665 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); 1666 1667 __ get_vm_result(Oexception); 1668 __ verify_oop(Oexception); 1669 1670 const int return_reg_adjustment = frame::pc_return_offset; 1671 Address issuing_pc_addr(I7, return_reg_adjustment); 1672 1673 // We are done with this activation frame; find out where to go next. 1674 // The continuation point will be an exception handler, which expects 1675 // the following registers set up: 1676 // 1677 // Oexception: exception 1678 // Oissuing_pc: the local call that threw exception 1679 // Other On: garbage 1680 // In/Ln: the contents of the caller's register window 1681 // 1682 // We do the required restore at the last possible moment, because we 1683 // need to preserve some state across a runtime call. 1684 // (Remember that the caller activation is unknown--it might not be 1685 // interpreted, so things like Lscratch are useless in the caller.) 1686 1687 // Although the Intel version uses call_C, we can use the more 1688 // compact call_VM. (The only real difference on SPARC is a 1689 // harmlessly ignored [re]set_last_Java_frame, compared with 1690 // the Intel code which lacks this.) 1691 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore 1692 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller 1693 __ super_call_VM_leaf(L7_thread_cache, 1694 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1695 G2_thread, Oissuing_pc->after_save()); 1696 1697 // The caller's SP was adjusted upon method entry to accomodate 1698 // the callee's non-argument locals. Undo that adjustment. 1699 __ JMP(O0, 0); // return exception handler in caller 1700 __ delayed()->restore(I5_savedSP, G0, SP); 1701 1702 // (same old exception object is already in Oexception; see above) 1703 // Note that an "issuing PC" is actually the next PC after the call 1704 } 1705 1706 1707 // 1708 // JVMTI ForceEarlyReturn support 1709 // 1710 1711 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1712 address entry = __ pc(); 1713 1714 __ empty_expression_stack(); 1715 __ load_earlyret_value(state); 1716 1717 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); 1718 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); 1719 1720 // Clear the earlyret state 1721 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1722 1723 __ remove_activation(state, 1724 /* throw_monitor_exception */ false, 1725 /* install_monitor_exception */ false); 1726 1727 // The caller's SP was adjusted upon method entry to accomodate 1728 // the callee's non-argument locals. Undo that adjustment. 1729 __ ret(); // return to caller 1730 __ delayed()->restore(I5_savedSP, G0, SP); 1731 1732 return entry; 1733 } // end of JVMTI ForceEarlyReturn support 1734 1735 1736 //------------------------------------------------------------------------------------------------------------------------ 1737 // Helper for vtos entry point generation 1738 1739 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1740 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1741 Label L; 1742 aep = __ pc(); __ push_ptr(); __ ba_short(L); 1743 fep = __ pc(); __ push_f(); __ ba_short(L); 1744 dep = __ pc(); __ push_d(); __ ba_short(L); 1745 lep = __ pc(); __ push_l(); __ ba_short(L); 1746 iep = __ pc(); __ push_i(); 1747 bep = cep = sep = iep; // there aren't any 1748 vep = __ pc(); __ bind(L); // fall through 1749 generate_and_dispatch(t); 1750 } 1751 1752 // -------------------------------------------------------------------------------- 1753 1754 // Non-product code 1755 #ifndef PRODUCT 1756 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1757 address entry = __ pc(); 1758 1759 __ push(state); 1760 __ mov(O7, Lscratch); // protect return address within interpreter 1761 1762 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer 1763 __ mov( Otos_l2, G3_scratch ); 1764 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); 1765 __ mov(Lscratch, O7); // restore return address 1766 __ pop(state); 1767 __ retl(); 1768 __ delayed()->nop(); 1769 1770 return entry; 1771 } 1772 1773 1774 // helpers for generate_and_dispatch 1775 1776 void TemplateInterpreterGenerator::count_bytecode() { 1777 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); 1778 } 1779 1780 1781 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1782 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); 1783 } 1784 1785 1786 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1787 AddressLiteral index (&BytecodePairHistogram::_index); 1788 AddressLiteral counters((address) &BytecodePairHistogram::_counters); 1789 1790 // get index, shift out old bytecode, bring in new bytecode, and store it 1791 // _index = (_index >> log2_number_of_codes) | 1792 // (bytecode << log2_number_of_codes); 1793 1794 __ load_contents(index, G4_scratch); 1795 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 1796 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 1797 __ or3( G3_scratch, G4_scratch, G4_scratch ); 1798 __ store_contents(G4_scratch, index, G3_scratch); 1799 1800 // bump bucket contents 1801 // _counters[_index] ++; 1802 1803 __ set(counters, G3_scratch); // loads into G3_scratch 1804 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 1805 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 1806 __ ld (G3_scratch, 0, G4_scratch); 1807 __ inc (G4_scratch); 1808 __ st (G4_scratch, 0, G3_scratch); 1809 } 1810 1811 1812 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1813 // Call a little run-time stub to avoid blow-up for each bytecode. 1814 // The run-time runtime saves the right registers, depending on 1815 // the tosca in-state for the given template. 1816 address entry = Interpreter::trace_code(t->tos_in()); 1817 guarantee(entry != NULL, "entry must have been generated"); 1818 __ call(entry, relocInfo::none); 1819 __ delayed()->nop(); 1820 } 1821 1822 1823 void TemplateInterpreterGenerator::stop_interpreter_at() { 1824 AddressLiteral counter(&BytecodeCounter::_counter_value); 1825 __ load_contents(counter, G3_scratch); 1826 AddressLiteral stop_at(&StopInterpreterAt); 1827 __ load_ptr_contents(stop_at, G4_scratch); 1828 __ cmp(G3_scratch, G4_scratch); 1829 __ breakpoint_trap(Assembler::equal, Assembler::icc); 1830 } 1831 #endif // not PRODUCT