1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_sparc.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "oops/arrayOop.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/methodDataOop.hpp" 32 #include "oops/methodOop.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "prims/jvmtiRedefineClassesTrace.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #ifdef TARGET_OS_FAMILY_linux 40 # include "thread_linux.inline.hpp" 41 #endif 42 #ifdef TARGET_OS_FAMILY_solaris 43 # include "thread_solaris.inline.hpp" 44 #endif 45 46 #ifndef CC_INTERP 47 #ifndef FAST_DISPATCH 48 #define FAST_DISPATCH 1 49 #endif 50 #undef FAST_DISPATCH 51 52 // Implementation of InterpreterMacroAssembler 53 54 // This file specializes the assember with interpreter-specific macros 55 56 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); 57 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); 58 59 #else // CC_INTERP 60 #ifndef STATE 61 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) 62 #endif // STATE 63 64 #endif // CC_INTERP 65 66 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { 67 // Note: this algorithm is also used by C1's OSR entry sequence. 68 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). 69 assert_different_registers(args_size, locals_size); 70 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. 71 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words 72 // Use br/mov combination because it works on both V8 and V9 and is 73 // faster. 74 Label skip_move; 75 br(Assembler::negative, true, Assembler::pt, skip_move); 76 delayed()->mov(G0, delta); 77 bind(skip_move); 78 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) 79 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes 80 } 81 82 #ifndef CC_INTERP 83 84 // Dispatch code executed in the prolog of a bytecode which does not do it's 85 // own dispatch. The dispatch address is computed and placed in IdispatchAddress 86 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 87 assert_not_delayed(); 88 #ifdef FAST_DISPATCH 89 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 90 // they both use I2. 91 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 92 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode 93 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 94 // add offset to correct dispatch table 95 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 96 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr 97 #else 98 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 99 // dispatch table to use 100 AddressLiteral tbl(Interpreter::dispatch_table(state)); 101 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 102 set(tbl, G3_scratch); // compute addr of table 103 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr 104 #endif 105 } 106 107 108 // Dispatch code executed in the epilog of a bytecode which does not do it's 109 // own dispatch. The dispatch address in IdispatchAddress is used for the 110 // dispatch. 111 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 112 assert_not_delayed(); 113 verify_FPU(1, state); 114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 115 jmp( IdispatchAddress, 0 ); 116 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 117 else delayed()->nop(); 118 } 119 120 121 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 122 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 123 assert_not_delayed(); 124 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 125 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); 126 } 127 128 129 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { 130 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 131 assert_not_delayed(); 132 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 133 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); 134 } 135 136 137 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 138 // load current bytecode 139 assert_not_delayed(); 140 ldub( Lbcp, 0, Lbyte_code); // load next bytecode 141 dispatch_base(state, table); 142 } 143 144 145 void InterpreterMacroAssembler::call_VM_leaf_base( 146 Register java_thread, 147 address entry_point, 148 int number_of_arguments 149 ) { 150 if (!java_thread->is_valid()) 151 java_thread = L7_thread_cache; 152 // super call 153 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); 154 } 155 156 157 void InterpreterMacroAssembler::call_VM_base( 158 Register oop_result, 159 Register java_thread, 160 Register last_java_sp, 161 address entry_point, 162 int number_of_arguments, 163 bool check_exception 164 ) { 165 if (!java_thread->is_valid()) 166 java_thread = L7_thread_cache; 167 // See class ThreadInVMfromInterpreter, which assumes that the interpreter 168 // takes responsibility for setting its own thread-state on call-out. 169 // However, ThreadInVMfromInterpreter resets the state to "in_Java". 170 171 //save_bcp(); // save bcp 172 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); 173 //restore_bcp(); // restore bcp 174 //restore_locals(); // restore locals pointer 175 } 176 177 178 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 179 if (JvmtiExport::can_pop_frame()) { 180 Label L; 181 182 // Check the "pending popframe condition" flag in the current thread 183 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); 184 185 // Initiate popframe handling only if it is not already being processed. If the flag 186 // has the popframe_processing bit set, it means that this code is called *during* popframe 187 // handling - we don't want to reenter. 188 btst(JavaThread::popframe_pending_bit, scratch_reg); 189 br(zero, false, pt, L); 190 delayed()->nop(); 191 btst(JavaThread::popframe_processing_bit, scratch_reg); 192 br(notZero, false, pt, L); 193 delayed()->nop(); 194 195 // Call Interpreter::remove_activation_preserving_args_entry() to get the 196 // address of the same-named entrypoint in the generated interpreter code. 197 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 198 199 // Jump to Interpreter::_remove_activation_preserving_args_entry 200 jmpl(O0, G0, G0); 201 delayed()->nop(); 202 bind(L); 203 } 204 } 205 206 207 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 208 Register thr_state = G4_scratch; 209 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 210 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); 211 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); 212 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); 213 switch (state) { 214 case ltos: ld_long(val_addr, Otos_l); break; 215 case atos: ld_ptr(oop_addr, Otos_l); 216 st_ptr(G0, oop_addr); break; 217 case btos: // fall through 218 case ctos: // fall through 219 case stos: // fall through 220 case itos: ld(val_addr, Otos_l1); break; 221 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; 222 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; 223 case vtos: /* nothing to do */ break; 224 default : ShouldNotReachHere(); 225 } 226 // Clean up tos value in the jvmti thread state 227 or3(G0, ilgl, G3_scratch); 228 stw(G3_scratch, tos_addr); 229 st_long(G0, val_addr); 230 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 231 } 232 233 234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 235 if (JvmtiExport::can_force_early_return()) { 236 Label L; 237 Register thr_state = G3_scratch; 238 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 239 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; 240 241 // Initiate earlyret handling only if it is not already being processed. 242 // If the flag has the earlyret_processing bit set, it means that this code 243 // is called *during* earlyret handling - we don't want to reenter. 244 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); 245 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); 246 247 // Call Interpreter::remove_activation_early_entry() to get the address of the 248 // same-named entrypoint in the generated interpreter code 249 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); 250 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); 251 252 // Jump to Interpreter::_remove_activation_early_entry 253 jmpl(O0, G0, G0); 254 delayed()->nop(); 255 bind(L); 256 } 257 } 258 259 260 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 261 mov(arg_1, O0); 262 mov(arg_2, O1); 263 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); 264 } 265 #endif /* CC_INTERP */ 266 267 268 #ifndef CC_INTERP 269 270 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { 271 assert_not_delayed(); 272 dispatch_Lbyte_code(state, table); 273 } 274 275 276 void InterpreterMacroAssembler::dispatch_normal(TosState state) { 277 dispatch_base(state, Interpreter::normal_table(state)); 278 } 279 280 281 void InterpreterMacroAssembler::dispatch_only(TosState state) { 282 dispatch_base(state, Interpreter::dispatch_table(state)); 283 } 284 285 286 // common code to dispatch and dispatch_only 287 // dispatch value in Lbyte_code and increment Lbcp 288 289 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { 290 verify_FPU(1, state); 291 // %%%%% maybe implement +VerifyActivationFrameSize here 292 //verify_thread(); //too slow; we will just verify on method entry & exit 293 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 294 #ifdef FAST_DISPATCH 295 if (table == Interpreter::dispatch_table(state)) { 296 // use IdispatchTables 297 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 298 // add offset to correct dispatch table 299 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 300 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr 301 } else { 302 #endif 303 // dispatch table to use 304 AddressLiteral tbl(table); 305 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 306 set(tbl, G3_scratch); // compute addr of table 307 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr 308 #ifdef FAST_DISPATCH 309 } 310 #endif 311 jmp( G3_scratch, 0 ); 312 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 313 else delayed()->nop(); 314 } 315 316 317 // Helpers for expression stack 318 319 // Longs and doubles are Category 2 computational types in the 320 // JVM specification (section 3.11.1) and take 2 expression stack or 321 // local slots. 322 // Aligning them on 32 bit with tagged stacks is hard because the code generated 323 // for the dup* bytecodes depends on what types are already on the stack. 324 // If the types are split into the two stack/local slots, that is much easier 325 // (and we can use 0 for non-reference tags). 326 327 // Known good alignment in _LP64 but unknown otherwise 328 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { 329 assert_not_delayed(); 330 331 #ifdef _LP64 332 ldf(FloatRegisterImpl::D, r1, offset, d); 333 #else 334 ldf(FloatRegisterImpl::S, r1, offset, d); 335 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); 336 #endif 337 } 338 339 // Known good alignment in _LP64 but unknown otherwise 340 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { 341 assert_not_delayed(); 342 343 #ifdef _LP64 344 stf(FloatRegisterImpl::D, d, r1, offset); 345 // store something more useful here 346 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 347 #else 348 stf(FloatRegisterImpl::S, d, r1, offset); 349 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); 350 #endif 351 } 352 353 354 // Known good alignment in _LP64 but unknown otherwise 355 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { 356 assert_not_delayed(); 357 #ifdef _LP64 358 ldx(r1, offset, rd); 359 #else 360 ld(r1, offset, rd); 361 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); 362 #endif 363 } 364 365 // Known good alignment in _LP64 but unknown otherwise 366 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { 367 assert_not_delayed(); 368 369 #ifdef _LP64 370 stx(l, r1, offset); 371 // store something more useful here 372 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 373 #else 374 st(l, r1, offset); 375 st(l->successor(), r1, offset + Interpreter::stackElementSize); 376 #endif 377 } 378 379 void InterpreterMacroAssembler::pop_i(Register r) { 380 assert_not_delayed(); 381 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); 382 inc(Lesp, Interpreter::stackElementSize); 383 debug_only(verify_esp(Lesp)); 384 } 385 386 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { 387 assert_not_delayed(); 388 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); 389 inc(Lesp, Interpreter::stackElementSize); 390 debug_only(verify_esp(Lesp)); 391 } 392 393 void InterpreterMacroAssembler::pop_l(Register r) { 394 assert_not_delayed(); 395 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); 396 inc(Lesp, 2*Interpreter::stackElementSize); 397 debug_only(verify_esp(Lesp)); 398 } 399 400 401 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { 402 assert_not_delayed(); 403 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); 404 inc(Lesp, Interpreter::stackElementSize); 405 debug_only(verify_esp(Lesp)); 406 } 407 408 409 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { 410 assert_not_delayed(); 411 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); 412 inc(Lesp, 2*Interpreter::stackElementSize); 413 debug_only(verify_esp(Lesp)); 414 } 415 416 417 void InterpreterMacroAssembler::push_i(Register r) { 418 assert_not_delayed(); 419 debug_only(verify_esp(Lesp)); 420 st(r, Lesp, 0); 421 dec(Lesp, Interpreter::stackElementSize); 422 } 423 424 void InterpreterMacroAssembler::push_ptr(Register r) { 425 assert_not_delayed(); 426 st_ptr(r, Lesp, 0); 427 dec(Lesp, Interpreter::stackElementSize); 428 } 429 430 // remember: our convention for longs in SPARC is: 431 // O0 (Otos_l1) has high-order part in first word, 432 // O1 (Otos_l2) has low-order part in second word 433 434 void InterpreterMacroAssembler::push_l(Register r) { 435 assert_not_delayed(); 436 debug_only(verify_esp(Lesp)); 437 // Longs are stored in memory-correct order, even if unaligned. 438 int offset = -Interpreter::stackElementSize; 439 store_unaligned_long(r, Lesp, offset); 440 dec(Lesp, 2 * Interpreter::stackElementSize); 441 } 442 443 444 void InterpreterMacroAssembler::push_f(FloatRegister f) { 445 assert_not_delayed(); 446 debug_only(verify_esp(Lesp)); 447 stf(FloatRegisterImpl::S, f, Lesp, 0); 448 dec(Lesp, Interpreter::stackElementSize); 449 } 450 451 452 void InterpreterMacroAssembler::push_d(FloatRegister d) { 453 assert_not_delayed(); 454 debug_only(verify_esp(Lesp)); 455 // Longs are stored in memory-correct order, even if unaligned. 456 int offset = -Interpreter::stackElementSize; 457 store_unaligned_double(d, Lesp, offset); 458 dec(Lesp, 2 * Interpreter::stackElementSize); 459 } 460 461 462 void InterpreterMacroAssembler::push(TosState state) { 463 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 464 switch (state) { 465 case atos: push_ptr(); break; 466 case btos: push_i(); break; 467 case ctos: 468 case stos: push_i(); break; 469 case itos: push_i(); break; 470 case ltos: push_l(); break; 471 case ftos: push_f(); break; 472 case dtos: push_d(); break; 473 case vtos: /* nothing to do */ break; 474 default : ShouldNotReachHere(); 475 } 476 } 477 478 479 void InterpreterMacroAssembler::pop(TosState state) { 480 switch (state) { 481 case atos: pop_ptr(); break; 482 case btos: pop_i(); break; 483 case ctos: 484 case stos: pop_i(); break; 485 case itos: pop_i(); break; 486 case ltos: pop_l(); break; 487 case ftos: pop_f(); break; 488 case dtos: pop_d(); break; 489 case vtos: /* nothing to do */ break; 490 default : ShouldNotReachHere(); 491 } 492 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 493 } 494 495 496 // Helpers for swap and dup 497 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 498 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); 499 } 500 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 501 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); 502 } 503 504 505 void InterpreterMacroAssembler::load_receiver(Register param_count, 506 Register recv) { 507 sll(param_count, Interpreter::logStackElementSize, param_count); 508 ld_ptr(Lesp, param_count, recv); // gets receiver Oop 509 } 510 511 void InterpreterMacroAssembler::empty_expression_stack() { 512 // Reset Lesp. 513 sub( Lmonitors, wordSize, Lesp ); 514 515 // Reset SP by subtracting more space from Lesp. 516 Label done; 517 verify_oop(Lmethod); 518 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 519 520 // A native does not need to do this, since its callee does not change SP. 521 ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags. 522 btst(JVM_ACC_NATIVE, Gframe_size); 523 br(Assembler::notZero, false, Assembler::pt, done); 524 delayed()->nop(); 525 526 // Compute max expression stack+register save area 527 lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack. 528 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); 529 530 // 531 // now set up a stack frame with the size computed above 532 // 533 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below 534 sll( Gframe_size, LogBytesPerWord, Gframe_size ); 535 sub( Lesp, Gframe_size, Gframe_size ); 536 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary 537 debug_only(verify_sp(Gframe_size, G4_scratch)); 538 #ifdef _LP64 539 sub(Gframe_size, STACK_BIAS, Gframe_size ); 540 #endif 541 mov(Gframe_size, SP); 542 543 bind(done); 544 } 545 546 547 #ifdef ASSERT 548 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { 549 Label Bad, OK; 550 551 // Saved SP must be aligned. 552 #ifdef _LP64 553 btst(2*BytesPerWord-1, Rsp); 554 #else 555 btst(LongAlignmentMask, Rsp); 556 #endif 557 br(Assembler::notZero, false, Assembler::pn, Bad); 558 delayed()->nop(); 559 560 // Saved SP, plus register window size, must not be above FP. 561 add(Rsp, frame::register_save_words * wordSize, Rtemp); 562 #ifdef _LP64 563 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP 564 #endif 565 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); 566 567 // Saved SP must not be ridiculously below current SP. 568 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); 569 set(maxstack, Rtemp); 570 sub(SP, Rtemp, Rtemp); 571 #ifdef _LP64 572 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp 573 #endif 574 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); 575 576 ba_short(OK); 577 578 bind(Bad); 579 stop("on return to interpreted call, restored SP is corrupted"); 580 581 bind(OK); 582 } 583 584 585 void InterpreterMacroAssembler::verify_esp(Register Resp) { 586 // about to read or write Resp[0] 587 // make sure it is not in the monitors or the register save area 588 Label OK1, OK2; 589 590 cmp(Resp, Lmonitors); 591 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); 592 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); 593 stop("too many pops: Lesp points into monitor area"); 594 bind(OK1); 595 #ifdef _LP64 596 sub(Resp, STACK_BIAS, Resp); 597 #endif 598 cmp(Resp, SP); 599 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); 600 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); 601 stop("too many pushes: Lesp points into register window"); 602 bind(OK2); 603 } 604 #endif // ASSERT 605 606 // Load compiled (i2c) or interpreter entry when calling from interpreted and 607 // do the call. Centralized so that all interpreter calls will do the same actions. 608 // If jvmti single stepping is on for a thread we must not call compiled code. 609 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 610 611 // Assume we want to go compiled if available 612 613 ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); 614 615 if (JvmtiExport::can_post_interpreter_events()) { 616 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 617 // compiled code in threads for which the event is enabled. Check here for 618 // interp_only_mode if these events CAN be enabled. 619 verify_thread(); 620 Label skip_compiled_code; 621 622 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 623 ld(interp_only, scratch); 624 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 625 delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); 626 bind(skip_compiled_code); 627 } 628 629 // the i2c_adapters need methodOop in G5_method (right? %%%) 630 // do the call 631 #ifdef ASSERT 632 { 633 Label ok; 634 br_notnull_short(target, Assembler::pt, ok); 635 stop("null entry point"); 636 bind(ok); 637 } 638 #endif // ASSERT 639 640 // Adjust Rret first so Llast_SP can be same as Rret 641 add(Rret, -frame::pc_return_offset, O7); 642 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer 643 // Record SP so we can remove any stack space allocated by adapter transition 644 jmp(target, 0); 645 delayed()->mov(SP, Llast_SP); 646 } 647 648 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { 649 assert_not_delayed(); 650 651 Label not_taken; 652 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); 653 else br (cc, false, Assembler::pn, not_taken); 654 delayed()->nop(); 655 656 TemplateTable::branch(false,false); 657 658 bind(not_taken); 659 660 profile_not_taken_branch(G3_scratch); 661 } 662 663 664 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( 665 int bcp_offset, 666 Register Rtmp, 667 Register Rdst, 668 signedOrNot is_signed, 669 setCCOrNot should_set_CC ) { 670 assert(Rtmp != Rdst, "need separate temp register"); 671 assert_not_delayed(); 672 switch (is_signed) { 673 default: ShouldNotReachHere(); 674 675 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte 676 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte 677 } 678 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte 679 sll( Rdst, BitsPerByte, Rdst); 680 switch (should_set_CC ) { 681 default: ShouldNotReachHere(); 682 683 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; 684 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; 685 } 686 } 687 688 689 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( 690 int bcp_offset, 691 Register Rtmp, 692 Register Rdst, 693 setCCOrNot should_set_CC ) { 694 assert(Rtmp != Rdst, "need separate temp register"); 695 assert_not_delayed(); 696 add( Lbcp, bcp_offset, Rtmp); 697 andcc( Rtmp, 3, G0); 698 Label aligned; 699 switch (should_set_CC ) { 700 default: ShouldNotReachHere(); 701 702 case set_CC: break; 703 case dont_set_CC: break; 704 } 705 706 br(Assembler::zero, true, Assembler::pn, aligned); 707 #ifdef _LP64 708 delayed()->ldsw(Rtmp, 0, Rdst); 709 #else 710 delayed()->ld(Rtmp, 0, Rdst); 711 #endif 712 713 ldub(Lbcp, bcp_offset + 3, Rdst); 714 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); 715 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 716 #ifdef _LP64 717 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 718 #else 719 // Unsigned load is faster than signed on some implementations 720 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 721 #endif 722 or3(Rtmp, Rdst, Rdst ); 723 724 bind(aligned); 725 if (should_set_CC == set_CC) tst(Rdst); 726 } 727 728 729 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp, 730 int bcp_offset, size_t index_size) { 731 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 732 if (index_size == sizeof(u2)) { 733 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 734 } else if (index_size == sizeof(u4)) { 735 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 736 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); 737 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 738 xor3(tmp, -1, tmp); // convert to plain index 739 } else if (index_size == sizeof(u1)) { 740 assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); 741 ldub(Lbcp, bcp_offset, tmp); 742 } else { 743 ShouldNotReachHere(); 744 } 745 } 746 747 748 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 749 int bcp_offset, size_t index_size) { 750 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 751 assert_different_registers(cache, tmp); 752 assert_not_delayed(); 753 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 754 // convert from field index to ConstantPoolCacheEntry index and from 755 // word index to byte offset 756 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 757 add(LcpoolCache, tmp, cache); 758 } 759 760 761 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 762 Register temp, 763 Register bytecode, 764 int byte_no, 765 int bcp_offset, 766 size_t index_size) { 767 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 768 ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 769 const int shift_count = (1 + byte_no) * BitsPerByte; 770 srl( bytecode, shift_count, bytecode); 771 and3(bytecode, 0xFF, bytecode); 772 } 773 774 775 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 776 int bcp_offset, size_t index_size) { 777 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 778 assert_different_registers(cache, tmp); 779 assert_not_delayed(); 780 if (index_size == sizeof(u2)) { 781 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 782 } else { 783 ShouldNotReachHere(); // other sizes not supported here 784 } 785 // convert from field index to ConstantPoolCacheEntry index 786 // and from word index to byte offset 787 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 788 // skip past the header 789 add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp); 790 // construct pointer to cache entry 791 add(LcpoolCache, tmp, cache); 792 } 793 794 795 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 796 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 797 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 798 Register Rsuper_klass, 799 Register Rtmp1, 800 Register Rtmp2, 801 Register Rtmp3, 802 Label &ok_is_subtype ) { 803 Label not_subtype; 804 805 // Profile the not-null value's klass. 806 profile_typecheck(Rsub_klass, Rtmp1); 807 808 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, 809 Rtmp1, Rtmp2, 810 &ok_is_subtype, ¬_subtype, NULL); 811 812 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, 813 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, 814 &ok_is_subtype, NULL); 815 816 bind(not_subtype); 817 profile_typecheck_failed(Rtmp1); 818 } 819 820 // Separate these two to allow for delay slot in middle 821 // These are used to do a test and full jump to exception-throwing code. 822 823 // %%%%% Could possibly reoptimize this by testing to see if could use 824 // a single conditional branch (i.e. if span is small enough. 825 // If you go that route, than get rid of the split and give up 826 // on the delay-slot hack. 827 828 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 829 Label& ok ) { 830 assert_not_delayed(); 831 br(ok_condition, true, pt, ok); 832 // DELAY SLOT 833 } 834 835 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, 836 Label& ok ) { 837 assert_not_delayed(); 838 bp( ok_condition, true, Assembler::xcc, pt, ok); 839 // DELAY SLOT 840 } 841 842 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, 843 Label& ok ) { 844 assert_not_delayed(); 845 brx(ok_condition, true, pt, ok); 846 // DELAY SLOT 847 } 848 849 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, 850 Register Rscratch, 851 Label& ok ) { 852 assert(throw_entry_point != NULL, "entry point must be generated by now"); 853 AddressLiteral dest(throw_entry_point); 854 jump_to(dest, Rscratch); 855 delayed()->nop(); 856 bind(ok); 857 } 858 859 860 // And if you cannot use the delay slot, here is a shorthand: 861 862 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, 863 address throw_entry_point, 864 Register Rscratch ) { 865 Label ok; 866 if (ok_condition != never) { 867 throw_if_not_1_icc( ok_condition, ok); 868 delayed()->nop(); 869 } 870 throw_if_not_2( throw_entry_point, Rscratch, ok); 871 } 872 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, 873 address throw_entry_point, 874 Register Rscratch ) { 875 Label ok; 876 if (ok_condition != never) { 877 throw_if_not_1_xcc( ok_condition, ok); 878 delayed()->nop(); 879 } 880 throw_if_not_2( throw_entry_point, Rscratch, ok); 881 } 882 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, 883 address throw_entry_point, 884 Register Rscratch ) { 885 Label ok; 886 if (ok_condition != never) { 887 throw_if_not_1_x( ok_condition, ok); 888 delayed()->nop(); 889 } 890 throw_if_not_2( throw_entry_point, Rscratch, ok); 891 } 892 893 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res 894 // Note: res is still shy of address by array offset into object. 895 896 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { 897 assert_not_delayed(); 898 899 verify_oop(array); 900 #ifdef _LP64 901 // sign extend since tos (index) can be a 32bit value 902 sra(index, G0, index); 903 #endif // _LP64 904 905 // check array 906 Label ptr_ok; 907 tst(array); 908 throw_if_not_1_x( notZero, ptr_ok ); 909 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index 910 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); 911 912 Label index_ok; 913 cmp(index, tmp); 914 throw_if_not_1_icc( lessUnsigned, index_ok ); 915 if (index_shift > 0) delayed()->sll(index, index_shift, index); 916 else delayed()->add(array, index, res); // addr - const offset in index 917 // convention: move aberrant index into G3_scratch for exception message 918 mov(index, G3_scratch); 919 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); 920 921 // add offset if didn't do it in delay slot 922 if (index_shift > 0) add(array, index, res); // addr - const offset in index 923 } 924 925 926 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 927 assert_not_delayed(); 928 929 // pop array 930 pop_ptr(array); 931 932 // check array 933 index_check_without_pop(array, index, index_shift, tmp, res); 934 } 935 936 937 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 938 ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst); 939 } 940 941 942 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 943 get_constant_pool(Rdst); 944 ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst); 945 } 946 947 948 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 949 get_constant_pool(Rcpool); 950 ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags); 951 } 952 953 954 // unlock if synchronized method 955 // 956 // Unlock the receiver if this is a synchronized method. 957 // Unlock any Java monitors from syncronized blocks. 958 // 959 // If there are locked Java monitors 960 // If throw_monitor_exception 961 // throws IllegalMonitorStateException 962 // Else if install_monitor_exception 963 // installs IllegalMonitorStateException 964 // Else 965 // no error processing 966 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 967 bool throw_monitor_exception, 968 bool install_monitor_exception) { 969 Label unlocked, unlock, no_unlock; 970 971 // get the value of _do_not_unlock_if_synchronized into G1_scratch 972 const Address do_not_unlock_if_synchronized(G2_thread, 973 JavaThread::do_not_unlock_if_synchronized_offset()); 974 ldbool(do_not_unlock_if_synchronized, G1_scratch); 975 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 976 977 // check if synchronized method 978 const Address access_flags(Lmethod, methodOopDesc::access_flags_offset()); 979 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 980 push(state); // save tos 981 ld(access_flags, G3_scratch); // Load access flags. 982 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 983 br(zero, false, pt, unlocked); 984 delayed()->nop(); 985 986 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 987 // is set. 988 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); 989 delayed()->nop(); 990 991 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 992 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 993 994 //Intel: if (throw_monitor_exception) ... else ... 995 // Entry already unlocked, need to throw exception 996 //... 997 998 // pass top-most monitor elem 999 add( top_most_monitor(), O1 ); 1000 1001 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); 1002 br_notnull_short(G3_scratch, pt, unlock); 1003 1004 if (throw_monitor_exception) { 1005 // Entry already unlocked need to throw an exception 1006 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1007 should_not_reach_here(); 1008 } else { 1009 // Monitor already unlocked during a stack unroll. 1010 // If requested, install an illegal_monitor_state_exception. 1011 // Continue with stack unrolling. 1012 if (install_monitor_exception) { 1013 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1014 } 1015 ba_short(unlocked); 1016 } 1017 1018 bind(unlock); 1019 1020 unlock_object(O1); 1021 1022 bind(unlocked); 1023 1024 // I0, I1: Might contain return value 1025 1026 // Check that all monitors are unlocked 1027 { Label loop, exception, entry, restart; 1028 1029 Register Rmptr = O0; 1030 Register Rtemp = O1; 1031 Register Rlimit = Lmonitors; 1032 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1033 assert( (delta & LongAlignmentMask) == 0, 1034 "sizeof BasicObjectLock must be even number of doublewords"); 1035 1036 #ifdef ASSERT 1037 add(top_most_monitor(), Rmptr, delta); 1038 { Label L; 1039 // ensure that Rmptr starts out above (or at) Rlimit 1040 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1041 stop("monitor stack has negative size"); 1042 bind(L); 1043 } 1044 #endif 1045 bind(restart); 1046 ba(entry); 1047 delayed()-> 1048 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry 1049 1050 // Entry is still locked, need to throw exception 1051 bind(exception); 1052 if (throw_monitor_exception) { 1053 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1054 should_not_reach_here(); 1055 } else { 1056 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 1057 // Unlock does not block, so don't have to worry about the frame 1058 unlock_object(Rmptr); 1059 if (install_monitor_exception) { 1060 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1061 } 1062 ba_short(restart); 1063 } 1064 1065 bind(loop); 1066 cmp(Rtemp, G0); // check if current entry is used 1067 brx(Assembler::notEqual, false, pn, exception); 1068 delayed()-> 1069 dec(Rmptr, delta); // otherwise advance to next entry 1070 #ifdef ASSERT 1071 { Label L; 1072 // ensure that Rmptr has not somehow stepped below Rlimit 1073 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1074 stop("ran off the end of the monitor stack"); 1075 bind(L); 1076 } 1077 #endif 1078 bind(entry); 1079 cmp(Rmptr, Rlimit); // check if bottom reached 1080 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry 1081 delayed()-> 1082 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); 1083 } 1084 1085 bind(no_unlock); 1086 pop(state); 1087 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1088 } 1089 1090 1091 // remove activation 1092 // 1093 // Unlock the receiver if this is a synchronized method. 1094 // Unlock any Java monitors from syncronized blocks. 1095 // Remove the activation from the stack. 1096 // 1097 // If there are locked Java monitors 1098 // If throw_monitor_exception 1099 // throws IllegalMonitorStateException 1100 // Else if install_monitor_exception 1101 // installs IllegalMonitorStateException 1102 // Else 1103 // no error processing 1104 void InterpreterMacroAssembler::remove_activation(TosState state, 1105 bool throw_monitor_exception, 1106 bool install_monitor_exception) { 1107 1108 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 1109 1110 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1111 notify_method_exit(false, state, NotifyJVMTI); 1112 1113 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1114 verify_oop(Lmethod); 1115 verify_thread(); 1116 1117 // return tos 1118 assert(Otos_l1 == Otos_i, "adjust code below"); 1119 switch (state) { 1120 #ifdef _LP64 1121 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 1122 #else 1123 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 1124 #endif 1125 case btos: // fall through 1126 case ctos: 1127 case stos: // fall through 1128 case atos: // fall through 1129 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 1130 case ftos: // fall through 1131 case dtos: // fall through 1132 case vtos: /* nothing to do */ break; 1133 default : ShouldNotReachHere(); 1134 } 1135 1136 #if defined(COMPILER2) && !defined(_LP64) 1137 if (state == ltos) { 1138 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1139 // or compiled so just be safe use G1 and O0/O1 1140 1141 // Shift bits into high (msb) of G1 1142 sllx(Otos_l1->after_save(), 32, G1); 1143 // Zero extend low bits 1144 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); 1145 or3 (Otos_l2->after_save(), G1, G1); 1146 } 1147 #endif /* COMPILER2 */ 1148 1149 } 1150 #endif /* CC_INTERP */ 1151 1152 1153 // Lock object 1154 // 1155 // Argument - lock_reg points to the BasicObjectLock to be used for locking, 1156 // it must be initialized with the object to lock 1157 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { 1158 if (UseHeavyMonitors) { 1159 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1160 } 1161 else { 1162 Register obj_reg = Object; 1163 Register mark_reg = G4_scratch; 1164 Register temp_reg = G1_scratch; 1165 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); 1166 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1167 Label done; 1168 1169 Label slow_case; 1170 1171 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); 1172 1173 // load markOop from object into mark_reg 1174 ld_ptr(mark_addr, mark_reg); 1175 1176 if (UseBiasedLocking) { 1177 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); 1178 } 1179 1180 // get the address of basicLock on stack that will be stored in the object 1181 // we need a temporary register here as we do not want to clobber lock_reg 1182 // (cas clobbers the destination register) 1183 mov(lock_reg, temp_reg); 1184 // set mark reg to be (markOop of object | UNLOCK_VALUE) 1185 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); 1186 // initialize the box (Must happen before we update the object mark!) 1187 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1188 // compare and exchange object_addr, markOop | 1, stack address of basicLock 1189 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1190 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, 1191 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 1192 1193 // if the compare and exchange succeeded we are done (we saw an unlocked object) 1194 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 1195 1196 // We did not see an unlocked object so try the fast recursive case 1197 1198 // Check if owner is self by comparing the value in the markOop of object 1199 // with the stack pointer 1200 sub(temp_reg, SP, temp_reg); 1201 #ifdef _LP64 1202 sub(temp_reg, STACK_BIAS, temp_reg); 1203 #endif 1204 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 1205 1206 // Composite "andcc" test: 1207 // (a) %sp -vs- markword proximity check, and, 1208 // (b) verify mark word LSBs == 0 (Stack-locked). 1209 // 1210 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) 1211 // Note that the page size used for %sp proximity testing is arbitrary and is 1212 // unrelated to the actual MMU page size. We use a 'logical' page size of 1213 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate 1214 // field of the andcc instruction. 1215 andcc (temp_reg, 0xFFFFF003, G0) ; 1216 1217 // if condition is true we are done and hence we can store 0 in the displaced 1218 // header indicating it is a recursive lock and be done 1219 brx(Assembler::zero, true, Assembler::pt, done); 1220 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1221 1222 // none of the above fast optimizations worked so we have to get into the 1223 // slow case of monitor enter 1224 bind(slow_case); 1225 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1226 1227 bind(done); 1228 } 1229 } 1230 1231 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 1232 // 1233 // Argument - lock_reg points to the BasicObjectLock for lock 1234 // Throw IllegalMonitorException if object is not locked by current thread 1235 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1236 if (UseHeavyMonitors) { 1237 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1238 } else { 1239 Register obj_reg = G3_scratch; 1240 Register mark_reg = G4_scratch; 1241 Register displaced_header_reg = G1_scratch; 1242 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); 1243 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1244 Label done; 1245 1246 if (UseBiasedLocking) { 1247 // load the object out of the BasicObjectLock 1248 ld_ptr(lockobj_addr, obj_reg); 1249 biased_locking_exit(mark_addr, mark_reg, done, true); 1250 st_ptr(G0, lockobj_addr); // free entry 1251 } 1252 1253 // Test first if we are in the fast recursive case 1254 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); 1255 ld_ptr(lock_addr, displaced_header_reg); 1256 br_null(displaced_header_reg, true, Assembler::pn, done); 1257 delayed()->st_ptr(G0, lockobj_addr); // free entry 1258 1259 // See if it is still a light weight lock, if so we just unlock 1260 // the object and we are done 1261 1262 if (!UseBiasedLocking) { 1263 // load the object out of the BasicObjectLock 1264 ld_ptr(lockobj_addr, obj_reg); 1265 } 1266 1267 // we have the displaced header in displaced_header_reg 1268 // we expect to see the stack address of the basicLock in case the 1269 // lock is still a light weight lock (lock_reg) 1270 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1271 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, 1272 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 1273 cmp(lock_reg, displaced_header_reg); 1274 brx(Assembler::equal, true, Assembler::pn, done); 1275 delayed()->st_ptr(G0, lockobj_addr); // free entry 1276 1277 // The lock has been converted into a heavy lock and hence 1278 // we need to get into the slow case 1279 1280 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1281 1282 bind(done); 1283 } 1284 } 1285 1286 #ifndef CC_INTERP 1287 1288 // Get the method data pointer from the methodOop and set the 1289 // specified register to its value. 1290 1291 void InterpreterMacroAssembler::set_method_data_pointer() { 1292 assert(ProfileInterpreter, "must be profiling interpreter"); 1293 Label get_continue; 1294 1295 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); 1296 test_method_data_pointer(get_continue); 1297 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); 1298 bind(get_continue); 1299 } 1300 1301 // Set the method data pointer for the current bcp. 1302 1303 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1304 assert(ProfileInterpreter, "must be profiling interpreter"); 1305 Label zero_continue; 1306 1307 // Test MDO to avoid the call if it is NULL. 1308 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); 1309 test_method_data_pointer(zero_continue); 1310 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1311 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); 1312 add(ImethodDataPtr, O0, ImethodDataPtr); 1313 bind(zero_continue); 1314 } 1315 1316 // Test ImethodDataPtr. If it is null, continue at the specified label 1317 1318 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1319 assert(ProfileInterpreter, "must be profiling interpreter"); 1320 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); 1321 } 1322 1323 void InterpreterMacroAssembler::verify_method_data_pointer() { 1324 assert(ProfileInterpreter, "must be profiling interpreter"); 1325 #ifdef ASSERT 1326 Label verify_continue; 1327 test_method_data_pointer(verify_continue); 1328 1329 // If the mdp is valid, it will point to a DataLayout header which is 1330 // consistent with the bcp. The converse is highly probable also. 1331 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1332 ld_ptr(Lmethod, methodOopDesc::const_offset(), O5); 1333 add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); 1334 add(G3_scratch, O5, G3_scratch); 1335 cmp(Lbcp, G3_scratch); 1336 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1337 1338 Register temp_reg = O5; 1339 delayed()->mov(ImethodDataPtr, temp_reg); 1340 // %%% should use call_VM_leaf here? 1341 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); 1342 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); 1343 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 1344 stf(FloatRegisterImpl::D, Ftos_d, d_save); 1345 mov(temp_reg->after_save(), O2); 1346 save_thread(L7_thread_cache); 1347 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); 1348 delayed()->nop(); 1349 restore_thread(L7_thread_cache); 1350 ldf(FloatRegisterImpl::D, d_save, Ftos_d); 1351 restore(); 1352 bind(verify_continue); 1353 #endif // ASSERT 1354 } 1355 1356 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1357 Register Rtmp, 1358 Label &profile_continue) { 1359 assert(ProfileInterpreter, "must be profiling interpreter"); 1360 // Control will flow to "profile_continue" if the counter is less than the 1361 // limit or if we call profile_method() 1362 1363 Label done; 1364 1365 // if no method data exists, and the counter is high enough, make one 1366 br_notnull_short(ImethodDataPtr, Assembler::pn, done); 1367 1368 // Test to see if we should create a method data oop 1369 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); 1370 sethi(profile_limit, Rtmp); 1371 ld(Rtmp, profile_limit.low10(), Rtmp); 1372 cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue); 1373 1374 // Build it now. 1375 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1376 set_method_data_pointer_for_bcp(); 1377 ba_short(profile_continue); 1378 bind(done); 1379 } 1380 1381 // Store a value at some constant offset from the method data pointer. 1382 1383 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1384 assert(ProfileInterpreter, "must be profiling interpreter"); 1385 st_ptr(value, ImethodDataPtr, constant); 1386 } 1387 1388 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, 1389 Register bumped_count, 1390 bool decrement) { 1391 assert(ProfileInterpreter, "must be profiling interpreter"); 1392 1393 // Load the counter. 1394 ld_ptr(counter, bumped_count); 1395 1396 if (decrement) { 1397 // Decrement the register. Set condition codes. 1398 subcc(bumped_count, DataLayout::counter_increment, bumped_count); 1399 1400 // If the decrement causes the counter to overflow, stay negative 1401 Label L; 1402 brx(Assembler::negative, true, Assembler::pn, L); 1403 1404 // Store the decremented counter, if it is still negative. 1405 delayed()->st_ptr(bumped_count, counter); 1406 bind(L); 1407 } else { 1408 // Increment the register. Set carry flag. 1409 addcc(bumped_count, DataLayout::counter_increment, bumped_count); 1410 1411 // If the increment causes the counter to overflow, pull back by 1. 1412 assert(DataLayout::counter_increment == 1, "subc works"); 1413 subc(bumped_count, G0, bumped_count); 1414 1415 // Store the incremented counter. 1416 st_ptr(bumped_count, counter); 1417 } 1418 } 1419 1420 // Increment the value at some constant offset from the method data pointer. 1421 1422 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1423 Register bumped_count, 1424 bool decrement) { 1425 // Locate the counter at a fixed offset from the mdp: 1426 Address counter(ImethodDataPtr, constant); 1427 increment_mdp_data_at(counter, bumped_count, decrement); 1428 } 1429 1430 // Increment the value at some non-fixed (reg + constant) offset from 1431 // the method data pointer. 1432 1433 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1434 int constant, 1435 Register bumped_count, 1436 Register scratch2, 1437 bool decrement) { 1438 // Add the constant to reg to get the offset. 1439 add(ImethodDataPtr, reg, scratch2); 1440 Address counter(scratch2, constant); 1441 increment_mdp_data_at(counter, bumped_count, decrement); 1442 } 1443 1444 // Set a flag value at the current method data pointer position. 1445 // Updates a single byte of the header, to avoid races with other header bits. 1446 1447 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1448 Register scratch) { 1449 assert(ProfileInterpreter, "must be profiling interpreter"); 1450 // Load the data header 1451 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); 1452 1453 // Set the flag 1454 or3(scratch, flag_constant, scratch); 1455 1456 // Store the modified header. 1457 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); 1458 } 1459 1460 // Test the location at some offset from the method data pointer. 1461 // If it is not equal to value, branch to the not_equal_continue Label. 1462 // Set condition codes to match the nullness of the loaded value. 1463 1464 void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1465 Register value, 1466 Label& not_equal_continue, 1467 Register scratch) { 1468 assert(ProfileInterpreter, "must be profiling interpreter"); 1469 ld_ptr(ImethodDataPtr, offset, scratch); 1470 cmp(value, scratch); 1471 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); 1472 delayed()->tst(scratch); 1473 } 1474 1475 // Update the method data pointer by the displacement located at some fixed 1476 // offset from the method data pointer. 1477 1478 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1479 Register scratch) { 1480 assert(ProfileInterpreter, "must be profiling interpreter"); 1481 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); 1482 add(ImethodDataPtr, scratch, ImethodDataPtr); 1483 } 1484 1485 // Update the method data pointer by the displacement located at the 1486 // offset (reg + offset_of_disp). 1487 1488 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1489 int offset_of_disp, 1490 Register scratch) { 1491 assert(ProfileInterpreter, "must be profiling interpreter"); 1492 add(reg, offset_of_disp, scratch); 1493 ld_ptr(ImethodDataPtr, scratch, scratch); 1494 add(ImethodDataPtr, scratch, ImethodDataPtr); 1495 } 1496 1497 // Update the method data pointer by a simple constant displacement. 1498 1499 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1500 assert(ProfileInterpreter, "must be profiling interpreter"); 1501 add(ImethodDataPtr, constant, ImethodDataPtr); 1502 } 1503 1504 // Update the method data pointer for a _ret bytecode whose target 1505 // was not among our cached targets. 1506 1507 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1508 Register return_bci) { 1509 assert(ProfileInterpreter, "must be profiling interpreter"); 1510 push(state); 1511 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile 1512 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1513 ld_ptr(l_tmp, return_bci); 1514 pop(state); 1515 } 1516 1517 // Count a taken branch in the bytecodes. 1518 1519 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1520 if (ProfileInterpreter) { 1521 Label profile_continue; 1522 1523 // If no method data exists, go to profile_continue. 1524 test_method_data_pointer(profile_continue); 1525 1526 // We are taking a branch. Increment the taken count. 1527 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); 1528 1529 // The method data pointer needs to be updated to reflect the new target. 1530 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1531 bind (profile_continue); 1532 } 1533 } 1534 1535 1536 // Count a not-taken branch in the bytecodes. 1537 1538 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { 1539 if (ProfileInterpreter) { 1540 Label profile_continue; 1541 1542 // If no method data exists, go to profile_continue. 1543 test_method_data_pointer(profile_continue); 1544 1545 // We are taking a branch. Increment the not taken count. 1546 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); 1547 1548 // The method data pointer needs to be updated to correspond to the 1549 // next bytecode. 1550 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1551 bind (profile_continue); 1552 } 1553 } 1554 1555 1556 // Count a non-virtual call in the bytecodes. 1557 1558 void InterpreterMacroAssembler::profile_call(Register scratch) { 1559 if (ProfileInterpreter) { 1560 Label profile_continue; 1561 1562 // If no method data exists, go to profile_continue. 1563 test_method_data_pointer(profile_continue); 1564 1565 // We are making a call. Increment the count. 1566 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1567 1568 // The method data pointer needs to be updated to reflect the new target. 1569 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1570 bind (profile_continue); 1571 } 1572 } 1573 1574 1575 // Count a final call in the bytecodes. 1576 1577 void InterpreterMacroAssembler::profile_final_call(Register scratch) { 1578 if (ProfileInterpreter) { 1579 Label profile_continue; 1580 1581 // If no method data exists, go to profile_continue. 1582 test_method_data_pointer(profile_continue); 1583 1584 // We are making a call. Increment the count. 1585 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1586 1587 // The method data pointer needs to be updated to reflect the new target. 1588 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1589 bind (profile_continue); 1590 } 1591 } 1592 1593 1594 // Count a virtual call in the bytecodes. 1595 1596 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1597 Register scratch, 1598 bool receiver_can_be_null) { 1599 if (ProfileInterpreter) { 1600 Label profile_continue; 1601 1602 // If no method data exists, go to profile_continue. 1603 test_method_data_pointer(profile_continue); 1604 1605 1606 Label skip_receiver_profile; 1607 if (receiver_can_be_null) { 1608 Label not_null; 1609 br_notnull_short(receiver, Assembler::pt, not_null); 1610 // We are making a call. Increment the count for null receiver. 1611 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1612 ba_short(skip_receiver_profile); 1613 bind(not_null); 1614 } 1615 1616 // Record the receiver type. 1617 record_klass_in_profile(receiver, scratch, true); 1618 bind(skip_receiver_profile); 1619 1620 // The method data pointer needs to be updated to reflect the new target. 1621 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1622 bind (profile_continue); 1623 } 1624 } 1625 1626 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1627 Register receiver, Register scratch, 1628 int start_row, Label& done, bool is_virtual_call) { 1629 if (TypeProfileWidth == 0) { 1630 if (is_virtual_call) { 1631 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1632 } 1633 return; 1634 } 1635 1636 int last_row = VirtualCallData::row_limit() - 1; 1637 assert(start_row <= last_row, "must be work left to do"); 1638 // Test this row for both the receiver and for null. 1639 // Take any of three different outcomes: 1640 // 1. found receiver => increment count and goto done 1641 // 2. found null => keep looking for case 1, maybe allocate this cell 1642 // 3. found something else => keep looking for cases 1 and 2 1643 // Case 3 is handled by a recursive call. 1644 for (int row = start_row; row <= last_row; row++) { 1645 Label next_test; 1646 bool test_for_null_also = (row == start_row); 1647 1648 // See if the receiver is receiver[n]. 1649 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1650 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); 1651 // delayed()->tst(scratch); 1652 1653 // The receiver is receiver[n]. Increment count[n]. 1654 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1655 increment_mdp_data_at(count_offset, scratch); 1656 ba_short(done); 1657 bind(next_test); 1658 1659 if (test_for_null_also) { 1660 Label found_null; 1661 // Failed the equality check on receiver[n]... Test for null. 1662 if (start_row == last_row) { 1663 // The only thing left to do is handle the null case. 1664 if (is_virtual_call) { 1665 brx(Assembler::zero, false, Assembler::pn, found_null); 1666 delayed()->nop(); 1667 // Receiver did not match any saved receiver and there is no empty row for it. 1668 // Increment total counter to indicate polymorphic case. 1669 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1670 ba_short(done); 1671 bind(found_null); 1672 } else { 1673 brx(Assembler::notZero, false, Assembler::pt, done); 1674 delayed()->nop(); 1675 } 1676 break; 1677 } 1678 // Since null is rare, make it be the branch-taken case. 1679 brx(Assembler::zero, false, Assembler::pn, found_null); 1680 delayed()->nop(); 1681 1682 // Put all the "Case 3" tests here. 1683 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); 1684 1685 // Found a null. Keep searching for a matching receiver, 1686 // but remember that this is an empty (unused) slot. 1687 bind(found_null); 1688 } 1689 } 1690 1691 // In the fall-through case, we found no matching receiver, but we 1692 // observed the receiver[start_row] is NULL. 1693 1694 // Fill in the receiver field and increment the count. 1695 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1696 set_mdp_data_at(recvr_offset, receiver); 1697 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1698 mov(DataLayout::counter_increment, scratch); 1699 set_mdp_data_at(count_offset, scratch); 1700 if (start_row > 0) { 1701 ba_short(done); 1702 } 1703 } 1704 1705 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1706 Register scratch, bool is_virtual_call) { 1707 assert(ProfileInterpreter, "must be profiling"); 1708 Label done; 1709 1710 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); 1711 1712 bind (done); 1713 } 1714 1715 1716 // Count a ret in the bytecodes. 1717 1718 void InterpreterMacroAssembler::profile_ret(TosState state, 1719 Register return_bci, 1720 Register scratch) { 1721 if (ProfileInterpreter) { 1722 Label profile_continue; 1723 uint row; 1724 1725 // If no method data exists, go to profile_continue. 1726 test_method_data_pointer(profile_continue); 1727 1728 // Update the total ret count. 1729 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1730 1731 for (row = 0; row < RetData::row_limit(); row++) { 1732 Label next_test; 1733 1734 // See if return_bci is equal to bci[n]: 1735 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), 1736 return_bci, next_test, scratch); 1737 1738 // return_bci is equal to bci[n]. Increment the count. 1739 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); 1740 1741 // The method data pointer needs to be updated to reflect the new target. 1742 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); 1743 ba_short(profile_continue); 1744 bind(next_test); 1745 } 1746 1747 update_mdp_for_ret(state, return_bci); 1748 1749 bind (profile_continue); 1750 } 1751 } 1752 1753 // Profile an unexpected null in the bytecodes. 1754 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { 1755 if (ProfileInterpreter) { 1756 Label profile_continue; 1757 1758 // If no method data exists, go to profile_continue. 1759 test_method_data_pointer(profile_continue); 1760 1761 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); 1762 1763 // The method data pointer needs to be updated. 1764 int mdp_delta = in_bytes(BitData::bit_data_size()); 1765 if (TypeProfileCasts) { 1766 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1767 } 1768 update_mdp_by_constant(mdp_delta); 1769 1770 bind (profile_continue); 1771 } 1772 } 1773 1774 void InterpreterMacroAssembler::profile_typecheck(Register klass, 1775 Register scratch) { 1776 if (ProfileInterpreter) { 1777 Label profile_continue; 1778 1779 // If no method data exists, go to profile_continue. 1780 test_method_data_pointer(profile_continue); 1781 1782 int mdp_delta = in_bytes(BitData::bit_data_size()); 1783 if (TypeProfileCasts) { 1784 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1785 1786 // Record the object type. 1787 record_klass_in_profile(klass, scratch, false); 1788 } 1789 1790 // The method data pointer needs to be updated. 1791 update_mdp_by_constant(mdp_delta); 1792 1793 bind (profile_continue); 1794 } 1795 } 1796 1797 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { 1798 if (ProfileInterpreter && TypeProfileCasts) { 1799 Label profile_continue; 1800 1801 // If no method data exists, go to profile_continue. 1802 test_method_data_pointer(profile_continue); 1803 1804 int count_offset = in_bytes(CounterData::count_offset()); 1805 // Back up the address, since we have already bumped the mdp. 1806 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1807 1808 // *Decrement* the counter. We expect to see zero or small negatives. 1809 increment_mdp_data_at(count_offset, scratch, true); 1810 1811 bind (profile_continue); 1812 } 1813 } 1814 1815 // Count the default case of a switch construct. 1816 1817 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { 1818 if (ProfileInterpreter) { 1819 Label profile_continue; 1820 1821 // If no method data exists, go to profile_continue. 1822 test_method_data_pointer(profile_continue); 1823 1824 // Update the default case count 1825 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1826 scratch); 1827 1828 // The method data pointer needs to be updated. 1829 update_mdp_by_offset( 1830 in_bytes(MultiBranchData::default_displacement_offset()), 1831 scratch); 1832 1833 bind (profile_continue); 1834 } 1835 } 1836 1837 // Count the index'th case of a switch construct. 1838 1839 void InterpreterMacroAssembler::profile_switch_case(Register index, 1840 Register scratch, 1841 Register scratch2, 1842 Register scratch3) { 1843 if (ProfileInterpreter) { 1844 Label profile_continue; 1845 1846 // If no method data exists, go to profile_continue. 1847 test_method_data_pointer(profile_continue); 1848 1849 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1850 set(in_bytes(MultiBranchData::per_case_size()), scratch); 1851 smul(index, scratch, scratch); 1852 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); 1853 1854 // Update the case count 1855 increment_mdp_data_at(scratch, 1856 in_bytes(MultiBranchData::relative_count_offset()), 1857 scratch2, 1858 scratch3); 1859 1860 // The method data pointer needs to be updated. 1861 update_mdp_by_offset(scratch, 1862 in_bytes(MultiBranchData::relative_displacement_offset()), 1863 scratch2); 1864 1865 bind (profile_continue); 1866 } 1867 } 1868 1869 // add a InterpMonitorElem to stack (see frame_sparc.hpp) 1870 1871 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, 1872 Register Rtemp, 1873 Register Rtemp2 ) { 1874 1875 Register Rlimit = Lmonitors; 1876 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1877 assert( (delta & LongAlignmentMask) == 0, 1878 "sizeof BasicObjectLock must be even number of doublewords"); 1879 1880 sub( SP, delta, SP); 1881 sub( Lesp, delta, Lesp); 1882 sub( Lmonitors, delta, Lmonitors); 1883 1884 if (!stack_is_empty) { 1885 1886 // must copy stack contents down 1887 1888 Label start_copying, next; 1889 1890 // untested("monitor stack expansion"); 1891 compute_stack_base(Rtemp); 1892 ba(start_copying); 1893 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below 1894 1895 // note: must copy from low memory upwards 1896 // On entry to loop, 1897 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) 1898 // Loop mutates Rtemp 1899 1900 bind( next); 1901 1902 st_ptr(Rtemp2, Rtemp, 0); 1903 inc(Rtemp, wordSize); 1904 cmp(Rtemp, Rlimit); // are we done? (duplicated above) 1905 1906 bind( start_copying ); 1907 1908 brx( notEqual, true, pn, next ); 1909 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); 1910 1911 // done copying stack 1912 } 1913 } 1914 1915 // Locals 1916 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { 1917 assert_not_delayed(); 1918 sll(index, Interpreter::logStackElementSize, index); 1919 sub(Llocals, index, index); 1920 ld_ptr(index, 0, dst); 1921 // Note: index must hold the effective address--the iinc template uses it 1922 } 1923 1924 // Just like access_local_ptr but the tag is a returnAddress 1925 void InterpreterMacroAssembler::access_local_returnAddress(Register index, 1926 Register dst ) { 1927 assert_not_delayed(); 1928 sll(index, Interpreter::logStackElementSize, index); 1929 sub(Llocals, index, index); 1930 ld_ptr(index, 0, dst); 1931 } 1932 1933 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { 1934 assert_not_delayed(); 1935 sll(index, Interpreter::logStackElementSize, index); 1936 sub(Llocals, index, index); 1937 ld(index, 0, dst); 1938 // Note: index must hold the effective address--the iinc template uses it 1939 } 1940 1941 1942 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { 1943 assert_not_delayed(); 1944 sll(index, Interpreter::logStackElementSize, index); 1945 sub(Llocals, index, index); 1946 // First half stored at index n+1 (which grows down from Llocals[n]) 1947 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); 1948 } 1949 1950 1951 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { 1952 assert_not_delayed(); 1953 sll(index, Interpreter::logStackElementSize, index); 1954 sub(Llocals, index, index); 1955 ldf(FloatRegisterImpl::S, index, 0, dst); 1956 } 1957 1958 1959 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { 1960 assert_not_delayed(); 1961 sll(index, Interpreter::logStackElementSize, index); 1962 sub(Llocals, index, index); 1963 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); 1964 } 1965 1966 1967 #ifdef ASSERT 1968 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { 1969 Label L; 1970 1971 assert(Rindex != Rscratch, "Registers cannot be same"); 1972 assert(Rindex != Rscratch1, "Registers cannot be same"); 1973 assert(Rlimit != Rscratch, "Registers cannot be same"); 1974 assert(Rlimit != Rscratch1, "Registers cannot be same"); 1975 assert(Rscratch1 != Rscratch, "Registers cannot be same"); 1976 1977 // untested("reg area corruption"); 1978 add(Rindex, offset, Rscratch); 1979 add(Rlimit, 64 + STACK_BIAS, Rscratch1); 1980 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); 1981 stop("regsave area is being clobbered"); 1982 bind(L); 1983 } 1984 #endif // ASSERT 1985 1986 1987 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { 1988 assert_not_delayed(); 1989 sll(index, Interpreter::logStackElementSize, index); 1990 sub(Llocals, index, index); 1991 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) 1992 st(src, index, 0); 1993 } 1994 1995 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { 1996 assert_not_delayed(); 1997 sll(index, Interpreter::logStackElementSize, index); 1998 sub(Llocals, index, index); 1999 #ifdef ASSERT 2000 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2001 #endif 2002 st_ptr(src, index, 0); 2003 } 2004 2005 2006 2007 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { 2008 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); 2009 } 2010 2011 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { 2012 assert_not_delayed(); 2013 sll(index, Interpreter::logStackElementSize, index); 2014 sub(Llocals, index, index); 2015 #ifdef ASSERT 2016 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2017 #endif 2018 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 2019 } 2020 2021 2022 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { 2023 assert_not_delayed(); 2024 sll(index, Interpreter::logStackElementSize, index); 2025 sub(Llocals, index, index); 2026 #ifdef ASSERT 2027 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2028 #endif 2029 stf(FloatRegisterImpl::S, src, index, 0); 2030 } 2031 2032 2033 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { 2034 assert_not_delayed(); 2035 sll(index, Interpreter::logStackElementSize, index); 2036 sub(Llocals, index, index); 2037 #ifdef ASSERT 2038 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2039 #endif 2040 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); 2041 } 2042 2043 2044 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { 2045 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2046 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); 2047 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; 2048 } 2049 2050 2051 Address InterpreterMacroAssembler::top_most_monitor() { 2052 return Address(FP, top_most_monitor_byte_offset()); 2053 } 2054 2055 2056 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { 2057 add( Lesp, wordSize, Rdest ); 2058 } 2059 2060 #endif /* CC_INTERP */ 2061 2062 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { 2063 assert(UseCompiler, "incrementing must be useful"); 2064 #ifdef CC_INTERP 2065 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + 2066 InvocationCounter::counter_offset()); 2067 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + 2068 InvocationCounter::counter_offset()); 2069 #else 2070 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + 2071 InvocationCounter::counter_offset()); 2072 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + 2073 InvocationCounter::counter_offset()); 2074 #endif /* CC_INTERP */ 2075 int delta = InvocationCounter::count_increment; 2076 2077 // Load each counter in a register 2078 ld( inv_counter, Rtmp ); 2079 ld( be_counter, Rtmp2 ); 2080 2081 assert( is_simm13( delta ), " delta too large."); 2082 2083 // Add the delta to the invocation counter and store the result 2084 add( Rtmp, delta, Rtmp ); 2085 2086 // Mask the backedge counter 2087 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2088 2089 // Store value 2090 st( Rtmp, inv_counter); 2091 2092 // Add invocation counter + backedge counter 2093 add( Rtmp, Rtmp2, Rtmp); 2094 2095 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! 2096 } 2097 2098 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { 2099 assert(UseCompiler, "incrementing must be useful"); 2100 #ifdef CC_INTERP 2101 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + 2102 InvocationCounter::counter_offset()); 2103 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + 2104 InvocationCounter::counter_offset()); 2105 #else 2106 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + 2107 InvocationCounter::counter_offset()); 2108 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + 2109 InvocationCounter::counter_offset()); 2110 #endif /* CC_INTERP */ 2111 int delta = InvocationCounter::count_increment; 2112 // Load each counter in a register 2113 ld( be_counter, Rtmp ); 2114 ld( inv_counter, Rtmp2 ); 2115 2116 // Add the delta to the backedge counter 2117 add( Rtmp, delta, Rtmp ); 2118 2119 // Mask the invocation counter, add to backedge counter 2120 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2121 2122 // and store the result to memory 2123 st( Rtmp, be_counter ); 2124 2125 // Add backedge + invocation counter 2126 add( Rtmp, Rtmp2, Rtmp ); 2127 2128 // Note that this macro must leave backedge_count + invocation_count in Rtmp! 2129 } 2130 2131 #ifndef CC_INTERP 2132 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, 2133 Register branch_bcp, 2134 Register Rtmp ) { 2135 Label did_not_overflow; 2136 Label overflow_with_error; 2137 assert_different_registers(backedge_count, Rtmp, branch_bcp); 2138 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 2139 2140 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); 2141 load_contents(limit, Rtmp); 2142 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2143 2144 // When ProfileInterpreter is on, the backedge_count comes from the 2145 // methodDataOop, which value does not get reset on the call to 2146 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2147 // routine while the method is being compiled, add a second test to make sure 2148 // the overflow function is called only once every overflow_frequency. 2149 if (ProfileInterpreter) { 2150 const int overflow_frequency = 1024; 2151 andcc(backedge_count, overflow_frequency-1, Rtmp); 2152 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); 2153 delayed()->nop(); 2154 } 2155 2156 // overflow in loop, pass branch bytecode 2157 set(6,Rtmp); 2158 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); 2159 2160 // Was an OSR adapter generated? 2161 // O0 = osr nmethod 2162 br_null_short(O0, Assembler::pn, overflow_with_error); 2163 2164 // Has the nmethod been invalidated already? 2165 ld(O0, nmethod::entry_bci_offset(), O2); 2166 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); 2167 2168 // migrate the interpreter frame off of the stack 2169 2170 mov(G2_thread, L7); 2171 // save nmethod 2172 mov(O0, L6); 2173 set_last_Java_frame(SP, noreg); 2174 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 2175 reset_last_Java_frame(); 2176 mov(L7, G2_thread); 2177 2178 // move OSR nmethod to I1 2179 mov(L6, I1); 2180 2181 // OSR buffer to I0 2182 mov(O0, I0); 2183 2184 // remove the interpreter frame 2185 restore(I5_savedSP, 0, SP); 2186 2187 // Jump to the osr code. 2188 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 2189 jmp(O2, G0); 2190 delayed()->nop(); 2191 2192 bind(overflow_with_error); 2193 2194 bind(did_not_overflow); 2195 } 2196 2197 2198 2199 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { 2200 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2201 } 2202 2203 2204 // local helper function for the verify_oop_or_return_address macro 2205 static bool verify_return_address(methodOopDesc* m, int bci) { 2206 #ifndef PRODUCT 2207 address pc = (address)(m->constMethod()) 2208 + in_bytes(constMethodOopDesc::codes_offset()) + bci; 2209 // assume it is a valid return address if it is inside m and is preceded by a jsr 2210 if (!m->contains(pc)) return false; 2211 address jsr_pc; 2212 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2213 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2214 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2215 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2216 #endif // PRODUCT 2217 return false; 2218 } 2219 2220 2221 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2222 if (!VerifyOops) return; 2223 // the VM documentation for the astore[_wide] bytecode allows 2224 // the TOS to be not only an oop but also a return address 2225 Label test; 2226 Label skip; 2227 // See if it is an address (in the current method): 2228 2229 mov(reg, Rtmp); 2230 const int log2_bytecode_size_limit = 16; 2231 srl(Rtmp, log2_bytecode_size_limit, Rtmp); 2232 br_notnull_short( Rtmp, pt, test ); 2233 2234 // %%% should use call_VM_leaf here? 2235 save_frame_and_mov(0, Lmethod, O0, reg, O1); 2236 save_thread(L7_thread_cache); 2237 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); 2238 delayed()->nop(); 2239 restore_thread(L7_thread_cache); 2240 br_notnull( O0, false, pt, skip ); 2241 delayed()->restore(); 2242 2243 // Perform a more elaborate out-of-line call 2244 // Not an address; verify it: 2245 bind(test); 2246 verify_oop(reg); 2247 bind(skip); 2248 } 2249 2250 2251 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2252 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 2253 } 2254 #endif /* CC_INTERP */ 2255 2256 // Inline assembly for: 2257 // 2258 // if (thread is in interp_only_mode) { 2259 // InterpreterRuntime::post_method_entry(); 2260 // } 2261 // if (DTraceMethodProbes) { 2262 // SharedRuntime::dtrace_method_entry(method, receiver); 2263 // } 2264 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2265 // SharedRuntime::rc_trace_method_entry(method, receiver); 2266 // } 2267 2268 void InterpreterMacroAssembler::notify_method_entry() { 2269 2270 // C++ interpreter only uses this for native methods. 2271 2272 // Whenever JVMTI puts a thread in interp_only_mode, method 2273 // entry/exit events are sent for that thread to track stack 2274 // depth. If it is possible to enter interp_only_mode we add 2275 // the code to check if the event should be sent. 2276 if (JvmtiExport::can_post_interpreter_events()) { 2277 Label L; 2278 Register temp_reg = O5; 2279 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2280 ld(interp_only, temp_reg); 2281 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2282 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 2283 bind(L); 2284 } 2285 2286 { 2287 Register temp_reg = O5; 2288 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2289 call_VM_leaf(noreg, 2290 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2291 G2_thread, Lmethod); 2292 } 2293 2294 // RedefineClasses() tracing support for obsolete method entry 2295 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2296 call_VM_leaf(noreg, 2297 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2298 G2_thread, Lmethod); 2299 } 2300 } 2301 2302 2303 // Inline assembly for: 2304 // 2305 // if (thread is in interp_only_mode) { 2306 // // save result 2307 // InterpreterRuntime::post_method_exit(); 2308 // // restore result 2309 // } 2310 // if (DTraceMethodProbes) { 2311 // SharedRuntime::dtrace_method_exit(thread, method); 2312 // } 2313 // 2314 // Native methods have their result stored in d_tmp and l_tmp 2315 // Java methods have their result stored in the expression stack 2316 2317 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, 2318 TosState state, 2319 NotifyMethodExitMode mode) { 2320 // C++ interpreter only uses this for native methods. 2321 2322 // Whenever JVMTI puts a thread in interp_only_mode, method 2323 // entry/exit events are sent for that thread to track stack 2324 // depth. If it is possible to enter interp_only_mode we add 2325 // the code to check if the event should be sent. 2326 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2327 Label L; 2328 Register temp_reg = O5; 2329 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2330 ld(interp_only, temp_reg); 2331 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2332 2333 // Note: frame::interpreter_frame_result has a dependency on how the 2334 // method result is saved across the call to post_method_exit. For 2335 // native methods it assumes the result registers are saved to 2336 // l_scratch and d_scratch. If this changes then the interpreter_frame_result 2337 // implementation will need to be updated too. 2338 2339 save_return_value(state, is_native_method); 2340 call_VM(noreg, 2341 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2342 restore_return_value(state, is_native_method); 2343 bind(L); 2344 } 2345 2346 { 2347 Register temp_reg = O5; 2348 // Dtrace notification 2349 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2350 save_return_value(state, is_native_method); 2351 call_VM_leaf( 2352 noreg, 2353 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2354 G2_thread, Lmethod); 2355 restore_return_value(state, is_native_method); 2356 } 2357 } 2358 2359 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { 2360 #ifdef CC_INTERP 2361 // result potentially in O0/O1: save it across calls 2362 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); 2363 #ifdef _LP64 2364 stx(O0, STATE(_native_lresult)); 2365 #else 2366 std(O0, STATE(_native_lresult)); 2367 #endif 2368 #else // CC_INTERP 2369 if (is_native_call) { 2370 stf(FloatRegisterImpl::D, F0, d_tmp); 2371 #ifdef _LP64 2372 stx(O0, l_tmp); 2373 #else 2374 std(O0, l_tmp); 2375 #endif 2376 } else { 2377 push(state); 2378 } 2379 #endif // CC_INTERP 2380 } 2381 2382 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { 2383 #ifdef CC_INTERP 2384 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); 2385 #ifdef _LP64 2386 ldx(STATE(_native_lresult), O0); 2387 #else 2388 ldd(STATE(_native_lresult), O0); 2389 #endif 2390 #else // CC_INTERP 2391 if (is_native_call) { 2392 ldf(FloatRegisterImpl::D, d_tmp, F0); 2393 #ifdef _LP64 2394 ldx(l_tmp, O0); 2395 #else 2396 ldd(l_tmp, O0); 2397 #endif 2398 } else { 2399 pop(state); 2400 } 2401 #endif // CC_INTERP 2402 } 2403 2404 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2405 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2406 int increment, int mask, 2407 Register scratch1, Register scratch2, 2408 Condition cond, Label *where) { 2409 ld(counter_addr, scratch1); 2410 add(scratch1, increment, scratch1); 2411 if (is_simm13(mask)) { 2412 andcc(scratch1, mask, G0); 2413 } else { 2414 set(mask, scratch2); 2415 andcc(scratch1, scratch2, G0); 2416 } 2417 br(cond, false, Assembler::pn, *where); 2418 delayed()->st(scratch1, counter_addr); 2419 }