1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_sparc.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "oops/arrayOop.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/method.hpp" 33 #include "oops/methodCounters.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "prims/jvmtiRedefineClassesTrace.hpp" 36 #include "prims/jvmtiThreadState.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/thread.inline.hpp" 41 42 #ifndef CC_INTERP 43 #ifndef FAST_DISPATCH 44 #define FAST_DISPATCH 1 45 #endif 46 #undef FAST_DISPATCH 47 48 // Implementation of InterpreterMacroAssembler 49 50 // This file specializes the assember with interpreter-specific macros 51 52 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); 53 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); 54 55 #else // CC_INTERP 56 #ifndef STATE 57 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) 58 #endif // STATE 59 60 #endif // CC_INTERP 61 62 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { 63 // Note: this algorithm is also used by C1's OSR entry sequence. 64 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). 65 assert_different_registers(args_size, locals_size); 66 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. 67 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words 68 // Use br/mov combination because it works on both V8 and V9 and is 69 // faster. 70 Label skip_move; 71 br(Assembler::negative, true, Assembler::pt, skip_move); 72 delayed()->mov(G0, delta); 73 bind(skip_move); 74 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) 75 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes 76 } 77 78 #ifndef CC_INTERP 79 80 // Dispatch code executed in the prolog of a bytecode which does not do it's 81 // own dispatch. The dispatch address is computed and placed in IdispatchAddress 82 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 83 assert_not_delayed(); 84 #ifdef FAST_DISPATCH 85 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 86 // they both use I2. 87 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 88 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode 89 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 90 // add offset to correct dispatch table 91 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 92 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr 93 #else 94 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 95 // dispatch table to use 96 AddressLiteral tbl(Interpreter::dispatch_table(state)); 97 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 98 set(tbl, G3_scratch); // compute addr of table 99 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr 100 #endif 101 } 102 103 104 // Dispatch code executed in the epilog of a bytecode which does not do it's 105 // own dispatch. The dispatch address in IdispatchAddress is used for the 106 // dispatch. 107 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 108 assert_not_delayed(); 109 verify_FPU(1, state); 110 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 111 jmp( IdispatchAddress, 0 ); 112 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 113 else delayed()->nop(); 114 } 115 116 117 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 118 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 119 assert_not_delayed(); 120 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 121 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); 122 } 123 124 125 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { 126 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 127 assert_not_delayed(); 128 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 129 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); 130 } 131 132 133 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 134 // load current bytecode 135 assert_not_delayed(); 136 ldub( Lbcp, 0, Lbyte_code); // load next bytecode 137 dispatch_base(state, table); 138 } 139 140 141 void InterpreterMacroAssembler::call_VM_leaf_base( 142 Register java_thread, 143 address entry_point, 144 int number_of_arguments 145 ) { 146 if (!java_thread->is_valid()) 147 java_thread = L7_thread_cache; 148 // super call 149 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); 150 } 151 152 153 void InterpreterMacroAssembler::call_VM_base( 154 Register oop_result, 155 Register java_thread, 156 Register last_java_sp, 157 address entry_point, 158 int number_of_arguments, 159 bool check_exception 160 ) { 161 if (!java_thread->is_valid()) 162 java_thread = L7_thread_cache; 163 // See class ThreadInVMfromInterpreter, which assumes that the interpreter 164 // takes responsibility for setting its own thread-state on call-out. 165 // However, ThreadInVMfromInterpreter resets the state to "in_Java". 166 167 //save_bcp(); // save bcp 168 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); 169 //restore_bcp(); // restore bcp 170 //restore_locals(); // restore locals pointer 171 } 172 173 174 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 175 if (JvmtiExport::can_pop_frame()) { 176 Label L; 177 178 // Check the "pending popframe condition" flag in the current thread 179 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); 180 181 // Initiate popframe handling only if it is not already being processed. If the flag 182 // has the popframe_processing bit set, it means that this code is called *during* popframe 183 // handling - we don't want to reenter. 184 btst(JavaThread::popframe_pending_bit, scratch_reg); 185 br(zero, false, pt, L); 186 delayed()->nop(); 187 btst(JavaThread::popframe_processing_bit, scratch_reg); 188 br(notZero, false, pt, L); 189 delayed()->nop(); 190 191 // Call Interpreter::remove_activation_preserving_args_entry() to get the 192 // address of the same-named entrypoint in the generated interpreter code. 193 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 194 195 // Jump to Interpreter::_remove_activation_preserving_args_entry 196 jmpl(O0, G0, G0); 197 delayed()->nop(); 198 bind(L); 199 } 200 } 201 202 203 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 204 Register thr_state = G4_scratch; 205 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 206 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); 207 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); 208 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); 209 switch (state) { 210 case ltos: ld_long(val_addr, Otos_l); break; 211 case atos: ld_ptr(oop_addr, Otos_l); 212 st_ptr(G0, oop_addr); break; 213 case btos: // fall through 214 case ctos: // fall through 215 case stos: // fall through 216 case itos: ld(val_addr, Otos_l1); break; 217 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; 218 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; 219 case vtos: /* nothing to do */ break; 220 default : ShouldNotReachHere(); 221 } 222 // Clean up tos value in the jvmti thread state 223 or3(G0, ilgl, G3_scratch); 224 stw(G3_scratch, tos_addr); 225 st_long(G0, val_addr); 226 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 227 } 228 229 230 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 231 if (JvmtiExport::can_force_early_return()) { 232 Label L; 233 Register thr_state = G3_scratch; 234 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 235 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; 236 237 // Initiate earlyret handling only if it is not already being processed. 238 // If the flag has the earlyret_processing bit set, it means that this code 239 // is called *during* earlyret handling - we don't want to reenter. 240 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); 241 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); 242 243 // Call Interpreter::remove_activation_early_entry() to get the address of the 244 // same-named entrypoint in the generated interpreter code 245 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); 246 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); 247 248 // Jump to Interpreter::_remove_activation_early_entry 249 jmpl(O0, G0, G0); 250 delayed()->nop(); 251 bind(L); 252 } 253 } 254 255 256 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 257 mov(arg_1, O0); 258 mov(arg_2, O1); 259 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); 260 } 261 #endif /* CC_INTERP */ 262 263 264 #ifndef CC_INTERP 265 266 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { 267 assert_not_delayed(); 268 dispatch_Lbyte_code(state, table); 269 } 270 271 272 void InterpreterMacroAssembler::dispatch_normal(TosState state) { 273 dispatch_base(state, Interpreter::normal_table(state)); 274 } 275 276 277 void InterpreterMacroAssembler::dispatch_only(TosState state) { 278 dispatch_base(state, Interpreter::dispatch_table(state)); 279 } 280 281 282 // common code to dispatch and dispatch_only 283 // dispatch value in Lbyte_code and increment Lbcp 284 285 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { 286 verify_FPU(1, state); 287 // %%%%% maybe implement +VerifyActivationFrameSize here 288 //verify_thread(); //too slow; we will just verify on method entry & exit 289 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 290 #ifdef FAST_DISPATCH 291 if (table == Interpreter::dispatch_table(state)) { 292 // use IdispatchTables 293 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 294 // add offset to correct dispatch table 295 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 296 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr 297 } else { 298 #endif 299 // dispatch table to use 300 AddressLiteral tbl(table); 301 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 302 set(tbl, G3_scratch); // compute addr of table 303 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr 304 #ifdef FAST_DISPATCH 305 } 306 #endif 307 jmp( G3_scratch, 0 ); 308 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 309 else delayed()->nop(); 310 } 311 312 313 // Helpers for expression stack 314 315 // Longs and doubles are Category 2 computational types in the 316 // JVM specification (section 3.11.1) and take 2 expression stack or 317 // local slots. 318 // Aligning them on 32 bit with tagged stacks is hard because the code generated 319 // for the dup* bytecodes depends on what types are already on the stack. 320 // If the types are split into the two stack/local slots, that is much easier 321 // (and we can use 0 for non-reference tags). 322 323 // Known good alignment in _LP64 but unknown otherwise 324 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { 325 assert_not_delayed(); 326 327 #ifdef _LP64 328 ldf(FloatRegisterImpl::D, r1, offset, d); 329 #else 330 ldf(FloatRegisterImpl::S, r1, offset, d); 331 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); 332 #endif 333 } 334 335 // Known good alignment in _LP64 but unknown otherwise 336 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { 337 assert_not_delayed(); 338 339 #ifdef _LP64 340 stf(FloatRegisterImpl::D, d, r1, offset); 341 // store something more useful here 342 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 343 #else 344 stf(FloatRegisterImpl::S, d, r1, offset); 345 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); 346 #endif 347 } 348 349 350 // Known good alignment in _LP64 but unknown otherwise 351 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { 352 assert_not_delayed(); 353 #ifdef _LP64 354 ldx(r1, offset, rd); 355 #else 356 ld(r1, offset, rd); 357 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); 358 #endif 359 } 360 361 // Known good alignment in _LP64 but unknown otherwise 362 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { 363 assert_not_delayed(); 364 365 #ifdef _LP64 366 stx(l, r1, offset); 367 // store something more useful here 368 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 369 #else 370 st(l, r1, offset); 371 st(l->successor(), r1, offset + Interpreter::stackElementSize); 372 #endif 373 } 374 375 void InterpreterMacroAssembler::pop_i(Register r) { 376 assert_not_delayed(); 377 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); 378 inc(Lesp, Interpreter::stackElementSize); 379 debug_only(verify_esp(Lesp)); 380 } 381 382 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { 383 assert_not_delayed(); 384 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); 385 inc(Lesp, Interpreter::stackElementSize); 386 debug_only(verify_esp(Lesp)); 387 } 388 389 void InterpreterMacroAssembler::pop_l(Register r) { 390 assert_not_delayed(); 391 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); 392 inc(Lesp, 2*Interpreter::stackElementSize); 393 debug_only(verify_esp(Lesp)); 394 } 395 396 397 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { 398 assert_not_delayed(); 399 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); 400 inc(Lesp, Interpreter::stackElementSize); 401 debug_only(verify_esp(Lesp)); 402 } 403 404 405 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { 406 assert_not_delayed(); 407 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); 408 inc(Lesp, 2*Interpreter::stackElementSize); 409 debug_only(verify_esp(Lesp)); 410 } 411 412 413 void InterpreterMacroAssembler::push_i(Register r) { 414 assert_not_delayed(); 415 debug_only(verify_esp(Lesp)); 416 st(r, Lesp, 0); 417 dec(Lesp, Interpreter::stackElementSize); 418 } 419 420 void InterpreterMacroAssembler::push_ptr(Register r) { 421 assert_not_delayed(); 422 st_ptr(r, Lesp, 0); 423 dec(Lesp, Interpreter::stackElementSize); 424 } 425 426 // remember: our convention for longs in SPARC is: 427 // O0 (Otos_l1) has high-order part in first word, 428 // O1 (Otos_l2) has low-order part in second word 429 430 void InterpreterMacroAssembler::push_l(Register r) { 431 assert_not_delayed(); 432 debug_only(verify_esp(Lesp)); 433 // Longs are stored in memory-correct order, even if unaligned. 434 int offset = -Interpreter::stackElementSize; 435 store_unaligned_long(r, Lesp, offset); 436 dec(Lesp, 2 * Interpreter::stackElementSize); 437 } 438 439 440 void InterpreterMacroAssembler::push_f(FloatRegister f) { 441 assert_not_delayed(); 442 debug_only(verify_esp(Lesp)); 443 stf(FloatRegisterImpl::S, f, Lesp, 0); 444 dec(Lesp, Interpreter::stackElementSize); 445 } 446 447 448 void InterpreterMacroAssembler::push_d(FloatRegister d) { 449 assert_not_delayed(); 450 debug_only(verify_esp(Lesp)); 451 // Longs are stored in memory-correct order, even if unaligned. 452 int offset = -Interpreter::stackElementSize; 453 store_unaligned_double(d, Lesp, offset); 454 dec(Lesp, 2 * Interpreter::stackElementSize); 455 } 456 457 458 void InterpreterMacroAssembler::push(TosState state) { 459 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 460 switch (state) { 461 case atos: push_ptr(); break; 462 case btos: push_i(); break; 463 case ctos: 464 case stos: push_i(); break; 465 case itos: push_i(); break; 466 case ltos: push_l(); break; 467 case ftos: push_f(); break; 468 case dtos: push_d(); break; 469 case vtos: /* nothing to do */ break; 470 default : ShouldNotReachHere(); 471 } 472 } 473 474 475 void InterpreterMacroAssembler::pop(TosState state) { 476 switch (state) { 477 case atos: pop_ptr(); break; 478 case btos: pop_i(); break; 479 case ctos: 480 case stos: pop_i(); break; 481 case itos: pop_i(); break; 482 case ltos: pop_l(); break; 483 case ftos: pop_f(); break; 484 case dtos: pop_d(); break; 485 case vtos: /* nothing to do */ break; 486 default : ShouldNotReachHere(); 487 } 488 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 489 } 490 491 492 // Helpers for swap and dup 493 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 494 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); 495 } 496 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 497 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); 498 } 499 500 501 void InterpreterMacroAssembler::load_receiver(Register param_count, 502 Register recv) { 503 sll(param_count, Interpreter::logStackElementSize, param_count); 504 ld_ptr(Lesp, param_count, recv); // gets receiver oop 505 } 506 507 void InterpreterMacroAssembler::empty_expression_stack() { 508 // Reset Lesp. 509 sub( Lmonitors, wordSize, Lesp ); 510 511 // Reset SP by subtracting more space from Lesp. 512 Label done; 513 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 514 515 // A native does not need to do this, since its callee does not change SP. 516 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. 517 btst(JVM_ACC_NATIVE, Gframe_size); 518 br(Assembler::notZero, false, Assembler::pt, done); 519 delayed()->nop(); 520 521 // Compute max expression stack+register save area 522 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); 523 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. 524 add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size ); 525 526 // 527 // now set up a stack frame with the size computed above 528 // 529 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below 530 sll( Gframe_size, LogBytesPerWord, Gframe_size ); 531 sub( Lesp, Gframe_size, Gframe_size ); 532 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary 533 debug_only(verify_sp(Gframe_size, G4_scratch)); 534 #ifdef _LP64 535 sub(Gframe_size, STACK_BIAS, Gframe_size ); 536 #endif 537 mov(Gframe_size, SP); 538 539 bind(done); 540 } 541 542 543 #ifdef ASSERT 544 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { 545 Label Bad, OK; 546 547 // Saved SP must be aligned. 548 #ifdef _LP64 549 btst(2*BytesPerWord-1, Rsp); 550 #else 551 btst(LongAlignmentMask, Rsp); 552 #endif 553 br(Assembler::notZero, false, Assembler::pn, Bad); 554 delayed()->nop(); 555 556 // Saved SP, plus register window size, must not be above FP. 557 add(Rsp, frame::register_save_words * wordSize, Rtemp); 558 #ifdef _LP64 559 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP 560 #endif 561 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); 562 563 // Saved SP must not be ridiculously below current SP. 564 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); 565 set(maxstack, Rtemp); 566 sub(SP, Rtemp, Rtemp); 567 #ifdef _LP64 568 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp 569 #endif 570 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); 571 572 ba_short(OK); 573 574 bind(Bad); 575 stop("on return to interpreted call, restored SP is corrupted"); 576 577 bind(OK); 578 } 579 580 581 void InterpreterMacroAssembler::verify_esp(Register Resp) { 582 // about to read or write Resp[0] 583 // make sure it is not in the monitors or the register save area 584 Label OK1, OK2; 585 586 cmp(Resp, Lmonitors); 587 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); 588 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); 589 stop("too many pops: Lesp points into monitor area"); 590 bind(OK1); 591 #ifdef _LP64 592 sub(Resp, STACK_BIAS, Resp); 593 #endif 594 cmp(Resp, SP); 595 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); 596 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); 597 stop("too many pushes: Lesp points into register window"); 598 bind(OK2); 599 } 600 #endif // ASSERT 601 602 // Load compiled (i2c) or interpreter entry when calling from interpreted and 603 // do the call. Centralized so that all interpreter calls will do the same actions. 604 // If jvmti single stepping is on for a thread we must not call compiled code. 605 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 606 607 // Assume we want to go compiled if available 608 609 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); 610 611 if (JvmtiExport::can_post_interpreter_events()) { 612 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 613 // compiled code in threads for which the event is enabled. Check here for 614 // interp_only_mode if these events CAN be enabled. 615 verify_thread(); 616 Label skip_compiled_code; 617 618 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 619 ld(interp_only, scratch); 620 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 621 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 622 bind(skip_compiled_code); 623 } 624 625 // the i2c_adapters need Method* in G5_method (right? %%%) 626 // do the call 627 #ifdef ASSERT 628 { 629 Label ok; 630 br_notnull_short(target, Assembler::pt, ok); 631 stop("null entry point"); 632 bind(ok); 633 } 634 #endif // ASSERT 635 636 // Adjust Rret first so Llast_SP can be same as Rret 637 add(Rret, -frame::pc_return_offset, O7); 638 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer 639 // Record SP so we can remove any stack space allocated by adapter transition 640 jmp(target, 0); 641 delayed()->mov(SP, Llast_SP); 642 } 643 644 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { 645 assert_not_delayed(); 646 647 Label not_taken; 648 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); 649 else br (cc, false, Assembler::pn, not_taken); 650 delayed()->nop(); 651 652 TemplateTable::branch(false,false); 653 654 bind(not_taken); 655 656 profile_not_taken_branch(G3_scratch); 657 } 658 659 660 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( 661 int bcp_offset, 662 Register Rtmp, 663 Register Rdst, 664 signedOrNot is_signed, 665 setCCOrNot should_set_CC ) { 666 assert(Rtmp != Rdst, "need separate temp register"); 667 assert_not_delayed(); 668 switch (is_signed) { 669 default: ShouldNotReachHere(); 670 671 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte 672 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte 673 } 674 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte 675 sll( Rdst, BitsPerByte, Rdst); 676 switch (should_set_CC ) { 677 default: ShouldNotReachHere(); 678 679 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; 680 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; 681 } 682 } 683 684 685 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( 686 int bcp_offset, 687 Register Rtmp, 688 Register Rdst, 689 setCCOrNot should_set_CC ) { 690 assert(Rtmp != Rdst, "need separate temp register"); 691 assert_not_delayed(); 692 add( Lbcp, bcp_offset, Rtmp); 693 andcc( Rtmp, 3, G0); 694 Label aligned; 695 switch (should_set_CC ) { 696 default: ShouldNotReachHere(); 697 698 case set_CC: break; 699 case dont_set_CC: break; 700 } 701 702 br(Assembler::zero, true, Assembler::pn, aligned); 703 #ifdef _LP64 704 delayed()->ldsw(Rtmp, 0, Rdst); 705 #else 706 delayed()->ld(Rtmp, 0, Rdst); 707 #endif 708 709 ldub(Lbcp, bcp_offset + 3, Rdst); 710 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); 711 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 712 #ifdef _LP64 713 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 714 #else 715 // Unsigned load is faster than signed on some implementations 716 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 717 #endif 718 or3(Rtmp, Rdst, Rdst ); 719 720 bind(aligned); 721 if (should_set_CC == set_CC) tst(Rdst); 722 } 723 724 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, 725 int bcp_offset, size_t index_size) { 726 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 727 if (index_size == sizeof(u2)) { 728 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); 729 } else if (index_size == sizeof(u4)) { 730 get_4_byte_integer_at_bcp(bcp_offset, temp, index); 731 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 732 xor3(index, -1, index); // convert to plain index 733 } else if (index_size == sizeof(u1)) { 734 ldub(Lbcp, bcp_offset, index); 735 } else { 736 ShouldNotReachHere(); 737 } 738 } 739 740 741 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 742 int bcp_offset, size_t index_size) { 743 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 744 assert_different_registers(cache, tmp); 745 assert_not_delayed(); 746 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 747 // convert from field index to ConstantPoolCacheEntry index and from 748 // word index to byte offset 749 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 750 add(LcpoolCache, tmp, cache); 751 } 752 753 754 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 755 Register temp, 756 Register bytecode, 757 int byte_no, 758 int bcp_offset, 759 size_t index_size) { 760 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 761 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 762 const int shift_count = (1 + byte_no) * BitsPerByte; 763 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 764 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 765 "correct shift count"); 766 srl(bytecode, shift_count, bytecode); 767 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 768 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); 769 } 770 771 772 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 773 int bcp_offset, size_t index_size) { 774 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 775 assert_different_registers(cache, tmp); 776 assert_not_delayed(); 777 if (index_size == sizeof(u2)) { 778 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 779 } else { 780 ShouldNotReachHere(); // other sizes not supported here 781 } 782 // convert from field index to ConstantPoolCacheEntry index 783 // and from word index to byte offset 784 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 785 // skip past the header 786 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); 787 // construct pointer to cache entry 788 add(LcpoolCache, tmp, cache); 789 } 790 791 792 // Load object from cpool->resolved_references(index) 793 void InterpreterMacroAssembler::load_resolved_reference_at_index( 794 Register result, Register index) { 795 assert_different_registers(result, index); 796 assert_not_delayed(); 797 // convert from field index to resolved_references() index and from 798 // word index to byte offset. Since this is a java object, it can be compressed 799 Register tmp = index; // reuse 800 sll(index, LogBytesPerHeapOop, tmp); 801 get_constant_pool(result); 802 // load pointer for resolved_references[] objArray 803 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); 804 // JNIHandles::resolve(result) 805 ld_ptr(result, 0, result); 806 // Add in the index 807 add(result, tmp, result); 808 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 809 } 810 811 812 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 813 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 814 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 815 Register Rsuper_klass, 816 Register Rtmp1, 817 Register Rtmp2, 818 Register Rtmp3, 819 Label &ok_is_subtype ) { 820 Label not_subtype; 821 822 // Profile the not-null value's klass. 823 profile_typecheck(Rsub_klass, Rtmp1); 824 825 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, 826 Rtmp1, Rtmp2, 827 &ok_is_subtype, ¬_subtype, NULL); 828 829 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, 830 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, 831 &ok_is_subtype, NULL); 832 833 bind(not_subtype); 834 profile_typecheck_failed(Rtmp1); 835 } 836 837 // Separate these two to allow for delay slot in middle 838 // These are used to do a test and full jump to exception-throwing code. 839 840 // %%%%% Could possibly reoptimize this by testing to see if could use 841 // a single conditional branch (i.e. if span is small enough. 842 // If you go that route, than get rid of the split and give up 843 // on the delay-slot hack. 844 845 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 846 Label& ok ) { 847 assert_not_delayed(); 848 br(ok_condition, true, pt, ok); 849 // DELAY SLOT 850 } 851 852 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, 853 Label& ok ) { 854 assert_not_delayed(); 855 bp( ok_condition, true, Assembler::xcc, pt, ok); 856 // DELAY SLOT 857 } 858 859 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, 860 Label& ok ) { 861 assert_not_delayed(); 862 brx(ok_condition, true, pt, ok); 863 // DELAY SLOT 864 } 865 866 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, 867 Register Rscratch, 868 Label& ok ) { 869 assert(throw_entry_point != NULL, "entry point must be generated by now"); 870 AddressLiteral dest(throw_entry_point); 871 jump_to(dest, Rscratch); 872 delayed()->nop(); 873 bind(ok); 874 } 875 876 877 // And if you cannot use the delay slot, here is a shorthand: 878 879 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, 880 address throw_entry_point, 881 Register Rscratch ) { 882 Label ok; 883 if (ok_condition != never) { 884 throw_if_not_1_icc( ok_condition, ok); 885 delayed()->nop(); 886 } 887 throw_if_not_2( throw_entry_point, Rscratch, ok); 888 } 889 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, 890 address throw_entry_point, 891 Register Rscratch ) { 892 Label ok; 893 if (ok_condition != never) { 894 throw_if_not_1_xcc( ok_condition, ok); 895 delayed()->nop(); 896 } 897 throw_if_not_2( throw_entry_point, Rscratch, ok); 898 } 899 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, 900 address throw_entry_point, 901 Register Rscratch ) { 902 Label ok; 903 if (ok_condition != never) { 904 throw_if_not_1_x( ok_condition, ok); 905 delayed()->nop(); 906 } 907 throw_if_not_2( throw_entry_point, Rscratch, ok); 908 } 909 910 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res 911 // Note: res is still shy of address by array offset into object. 912 913 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { 914 assert_not_delayed(); 915 916 verify_oop(array); 917 #ifdef _LP64 918 // sign extend since tos (index) can be a 32bit value 919 sra(index, G0, index); 920 #endif // _LP64 921 922 // check array 923 Label ptr_ok; 924 tst(array); 925 throw_if_not_1_x( notZero, ptr_ok ); 926 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index 927 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); 928 929 Label index_ok; 930 cmp(index, tmp); 931 throw_if_not_1_icc( lessUnsigned, index_ok ); 932 if (index_shift > 0) delayed()->sll(index, index_shift, index); 933 else delayed()->add(array, index, res); // addr - const offset in index 934 // convention: move aberrant index into G3_scratch for exception message 935 mov(index, G3_scratch); 936 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); 937 938 // add offset if didn't do it in delay slot 939 if (index_shift > 0) add(array, index, res); // addr - const offset in index 940 } 941 942 943 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 944 assert_not_delayed(); 945 946 // pop array 947 pop_ptr(array); 948 949 // check array 950 index_check_without_pop(array, index, index_shift, tmp, res); 951 } 952 953 954 void InterpreterMacroAssembler::get_const(Register Rdst) { 955 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); 956 } 957 958 959 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 960 get_const(Rdst); 961 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 962 } 963 964 965 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 966 get_constant_pool(Rdst); 967 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 968 } 969 970 971 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 972 get_constant_pool(Rcpool); 973 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); 974 } 975 976 977 // unlock if synchronized method 978 // 979 // Unlock the receiver if this is a synchronized method. 980 // Unlock any Java monitors from syncronized blocks. 981 // 982 // If there are locked Java monitors 983 // If throw_monitor_exception 984 // throws IllegalMonitorStateException 985 // Else if install_monitor_exception 986 // installs IllegalMonitorStateException 987 // Else 988 // no error processing 989 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 990 bool throw_monitor_exception, 991 bool install_monitor_exception) { 992 Label unlocked, unlock, no_unlock; 993 994 // get the value of _do_not_unlock_if_synchronized into G1_scratch 995 const Address do_not_unlock_if_synchronized(G2_thread, 996 JavaThread::do_not_unlock_if_synchronized_offset()); 997 ldbool(do_not_unlock_if_synchronized, G1_scratch); 998 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 999 1000 // check if synchronized method 1001 const Address access_flags(Lmethod, Method::access_flags_offset()); 1002 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1003 push(state); // save tos 1004 ld(access_flags, G3_scratch); // Load access flags. 1005 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 1006 br(zero, false, pt, unlocked); 1007 delayed()->nop(); 1008 1009 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1010 // is set. 1011 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); 1012 delayed()->nop(); 1013 1014 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1015 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1016 1017 //Intel: if (throw_monitor_exception) ... else ... 1018 // Entry already unlocked, need to throw exception 1019 //... 1020 1021 // pass top-most monitor elem 1022 add( top_most_monitor(), O1 ); 1023 1024 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); 1025 br_notnull_short(G3_scratch, pt, unlock); 1026 1027 if (throw_monitor_exception) { 1028 // Entry already unlocked need to throw an exception 1029 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1030 should_not_reach_here(); 1031 } else { 1032 // Monitor already unlocked during a stack unroll. 1033 // If requested, install an illegal_monitor_state_exception. 1034 // Continue with stack unrolling. 1035 if (install_monitor_exception) { 1036 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1037 } 1038 ba_short(unlocked); 1039 } 1040 1041 bind(unlock); 1042 1043 unlock_object(O1); 1044 1045 bind(unlocked); 1046 1047 // I0, I1: Might contain return value 1048 1049 // Check that all monitors are unlocked 1050 { Label loop, exception, entry, restart; 1051 1052 Register Rmptr = O0; 1053 Register Rtemp = O1; 1054 Register Rlimit = Lmonitors; 1055 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1056 assert( (delta & LongAlignmentMask) == 0, 1057 "sizeof BasicObjectLock must be even number of doublewords"); 1058 1059 #ifdef ASSERT 1060 add(top_most_monitor(), Rmptr, delta); 1061 { Label L; 1062 // ensure that Rmptr starts out above (or at) Rlimit 1063 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1064 stop("monitor stack has negative size"); 1065 bind(L); 1066 } 1067 #endif 1068 bind(restart); 1069 ba(entry); 1070 delayed()-> 1071 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry 1072 1073 // Entry is still locked, need to throw exception 1074 bind(exception); 1075 if (throw_monitor_exception) { 1076 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1077 should_not_reach_here(); 1078 } else { 1079 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 1080 // Unlock does not block, so don't have to worry about the frame 1081 unlock_object(Rmptr); 1082 if (install_monitor_exception) { 1083 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1084 } 1085 ba_short(restart); 1086 } 1087 1088 bind(loop); 1089 cmp(Rtemp, G0); // check if current entry is used 1090 brx(Assembler::notEqual, false, pn, exception); 1091 delayed()-> 1092 dec(Rmptr, delta); // otherwise advance to next entry 1093 #ifdef ASSERT 1094 { Label L; 1095 // ensure that Rmptr has not somehow stepped below Rlimit 1096 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1097 stop("ran off the end of the monitor stack"); 1098 bind(L); 1099 } 1100 #endif 1101 bind(entry); 1102 cmp(Rmptr, Rlimit); // check if bottom reached 1103 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry 1104 delayed()-> 1105 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); 1106 } 1107 1108 bind(no_unlock); 1109 pop(state); 1110 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1111 } 1112 1113 1114 // remove activation 1115 // 1116 // Unlock the receiver if this is a synchronized method. 1117 // Unlock any Java monitors from syncronized blocks. 1118 // Remove the activation from the stack. 1119 // 1120 // If there are locked Java monitors 1121 // If throw_monitor_exception 1122 // throws IllegalMonitorStateException 1123 // Else if install_monitor_exception 1124 // installs IllegalMonitorStateException 1125 // Else 1126 // no error processing 1127 void InterpreterMacroAssembler::remove_activation(TosState state, 1128 bool throw_monitor_exception, 1129 bool install_monitor_exception) { 1130 1131 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 1132 1133 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1134 notify_method_exit(false, state, NotifyJVMTI); 1135 1136 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1137 verify_thread(); 1138 1139 // return tos 1140 assert(Otos_l1 == Otos_i, "adjust code below"); 1141 switch (state) { 1142 #ifdef _LP64 1143 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 1144 #else 1145 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 1146 #endif 1147 case btos: // fall through 1148 case ctos: 1149 case stos: // fall through 1150 case atos: // fall through 1151 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 1152 case ftos: // fall through 1153 case dtos: // fall through 1154 case vtos: /* nothing to do */ break; 1155 default : ShouldNotReachHere(); 1156 } 1157 1158 #if defined(COMPILER2) && !defined(_LP64) 1159 if (state == ltos) { 1160 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1161 // or compiled so just be safe use G1 and O0/O1 1162 1163 // Shift bits into high (msb) of G1 1164 sllx(Otos_l1->after_save(), 32, G1); 1165 // Zero extend low bits 1166 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); 1167 or3 (Otos_l2->after_save(), G1, G1); 1168 } 1169 #endif /* COMPILER2 */ 1170 1171 } 1172 #endif /* CC_INTERP */ 1173 1174 1175 // Lock object 1176 // 1177 // Argument - lock_reg points to the BasicObjectLock to be used for locking, 1178 // it must be initialized with the object to lock 1179 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { 1180 if (UseHeavyMonitors) { 1181 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1182 } 1183 else { 1184 Register obj_reg = Object; 1185 Register mark_reg = G4_scratch; 1186 Register temp_reg = G1_scratch; 1187 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); 1188 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1189 Label done; 1190 1191 Label slow_case; 1192 1193 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); 1194 1195 // load markOop from object into mark_reg 1196 ld_ptr(mark_addr, mark_reg); 1197 1198 if (UseBiasedLocking) { 1199 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); 1200 } 1201 1202 // get the address of basicLock on stack that will be stored in the object 1203 // we need a temporary register here as we do not want to clobber lock_reg 1204 // (cas clobbers the destination register) 1205 mov(lock_reg, temp_reg); 1206 // set mark reg to be (markOop of object | UNLOCK_VALUE) 1207 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); 1208 // initialize the box (Must happen before we update the object mark!) 1209 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1210 // compare and exchange object_addr, markOop | 1, stack address of basicLock 1211 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1212 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 1213 1214 // if the compare and exchange succeeded we are done (we saw an unlocked object) 1215 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 1216 1217 // We did not see an unlocked object so try the fast recursive case 1218 1219 // Check if owner is self by comparing the value in the markOop of object 1220 // with the stack pointer 1221 sub(temp_reg, SP, temp_reg); 1222 #ifdef _LP64 1223 sub(temp_reg, STACK_BIAS, temp_reg); 1224 #endif 1225 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 1226 1227 // Composite "andcc" test: 1228 // (a) %sp -vs- markword proximity check, and, 1229 // (b) verify mark word LSBs == 0 (Stack-locked). 1230 // 1231 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) 1232 // Note that the page size used for %sp proximity testing is arbitrary and is 1233 // unrelated to the actual MMU page size. We use a 'logical' page size of 1234 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate 1235 // field of the andcc instruction. 1236 andcc (temp_reg, 0xFFFFF003, G0) ; 1237 1238 // if condition is true we are done and hence we can store 0 in the displaced 1239 // header indicating it is a recursive lock and be done 1240 brx(Assembler::zero, true, Assembler::pt, done); 1241 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1242 1243 // none of the above fast optimizations worked so we have to get into the 1244 // slow case of monitor enter 1245 bind(slow_case); 1246 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1247 1248 bind(done); 1249 } 1250 } 1251 1252 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 1253 // 1254 // Argument - lock_reg points to the BasicObjectLock for lock 1255 // Throw IllegalMonitorException if object is not locked by current thread 1256 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1257 if (UseHeavyMonitors) { 1258 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1259 } else { 1260 Register obj_reg = G3_scratch; 1261 Register mark_reg = G4_scratch; 1262 Register displaced_header_reg = G1_scratch; 1263 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); 1264 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1265 Label done; 1266 1267 if (UseBiasedLocking) { 1268 // load the object out of the BasicObjectLock 1269 ld_ptr(lockobj_addr, obj_reg); 1270 biased_locking_exit(mark_addr, mark_reg, done, true); 1271 st_ptr(G0, lockobj_addr); // free entry 1272 } 1273 1274 // Test first if we are in the fast recursive case 1275 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); 1276 ld_ptr(lock_addr, displaced_header_reg); 1277 br_null(displaced_header_reg, true, Assembler::pn, done); 1278 delayed()->st_ptr(G0, lockobj_addr); // free entry 1279 1280 // See if it is still a light weight lock, if so we just unlock 1281 // the object and we are done 1282 1283 if (!UseBiasedLocking) { 1284 // load the object out of the BasicObjectLock 1285 ld_ptr(lockobj_addr, obj_reg); 1286 } 1287 1288 // we have the displaced header in displaced_header_reg 1289 // we expect to see the stack address of the basicLock in case the 1290 // lock is still a light weight lock (lock_reg) 1291 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1292 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); 1293 cmp(lock_reg, displaced_header_reg); 1294 brx(Assembler::equal, true, Assembler::pn, done); 1295 delayed()->st_ptr(G0, lockobj_addr); // free entry 1296 1297 // The lock has been converted into a heavy lock and hence 1298 // we need to get into the slow case 1299 1300 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1301 1302 bind(done); 1303 } 1304 } 1305 1306 #ifndef CC_INTERP 1307 1308 // Get the method data pointer from the Method* and set the 1309 // specified register to its value. 1310 1311 void InterpreterMacroAssembler::set_method_data_pointer() { 1312 assert(ProfileInterpreter, "must be profiling interpreter"); 1313 Label get_continue; 1314 1315 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1316 test_method_data_pointer(get_continue); 1317 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1318 bind(get_continue); 1319 } 1320 1321 // Set the method data pointer for the current bcp. 1322 1323 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1324 assert(ProfileInterpreter, "must be profiling interpreter"); 1325 Label zero_continue; 1326 1327 // Test MDO to avoid the call if it is NULL. 1328 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1329 test_method_data_pointer(zero_continue); 1330 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1331 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1332 add(ImethodDataPtr, O0, ImethodDataPtr); 1333 bind(zero_continue); 1334 } 1335 1336 // Test ImethodDataPtr. If it is null, continue at the specified label 1337 1338 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1339 assert(ProfileInterpreter, "must be profiling interpreter"); 1340 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); 1341 } 1342 1343 void InterpreterMacroAssembler::verify_method_data_pointer() { 1344 assert(ProfileInterpreter, "must be profiling interpreter"); 1345 #ifdef ASSERT 1346 Label verify_continue; 1347 test_method_data_pointer(verify_continue); 1348 1349 // If the mdp is valid, it will point to a DataLayout header which is 1350 // consistent with the bcp. The converse is highly probable also. 1351 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1352 ld_ptr(Lmethod, Method::const_offset(), O5); 1353 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); 1354 add(G3_scratch, O5, G3_scratch); 1355 cmp(Lbcp, G3_scratch); 1356 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1357 1358 Register temp_reg = O5; 1359 delayed()->mov(ImethodDataPtr, temp_reg); 1360 // %%% should use call_VM_leaf here? 1361 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); 1362 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); 1363 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 1364 stf(FloatRegisterImpl::D, Ftos_d, d_save); 1365 mov(temp_reg->after_save(), O2); 1366 save_thread(L7_thread_cache); 1367 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); 1368 delayed()->nop(); 1369 restore_thread(L7_thread_cache); 1370 ldf(FloatRegisterImpl::D, d_save, Ftos_d); 1371 restore(); 1372 bind(verify_continue); 1373 #endif // ASSERT 1374 } 1375 1376 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1377 Register Rtmp, 1378 Label &profile_continue) { 1379 assert(ProfileInterpreter, "must be profiling interpreter"); 1380 // Control will flow to "profile_continue" if the counter is less than the 1381 // limit or if we call profile_method() 1382 1383 Label done; 1384 1385 // if no method data exists, and the counter is high enough, make one 1386 br_notnull_short(ImethodDataPtr, Assembler::pn, done); 1387 1388 // Test to see if we should create a method data oop 1389 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); 1390 sethi(profile_limit, Rtmp); 1391 ld(Rtmp, profile_limit.low10(), Rtmp); 1392 cmp(invocation_count, Rtmp); 1393 // Use long branches because call_VM() code and following code generated by 1394 // test_backedge_count_for_osr() is large in debug VM. 1395 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); 1396 delayed()->nop(); 1397 1398 // Build it now. 1399 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1400 set_method_data_pointer_for_bcp(); 1401 ba(profile_continue); 1402 delayed()->nop(); 1403 bind(done); 1404 } 1405 1406 // Store a value at some constant offset from the method data pointer. 1407 1408 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1409 assert(ProfileInterpreter, "must be profiling interpreter"); 1410 st_ptr(value, ImethodDataPtr, constant); 1411 } 1412 1413 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, 1414 Register bumped_count, 1415 bool decrement) { 1416 assert(ProfileInterpreter, "must be profiling interpreter"); 1417 1418 // Load the counter. 1419 ld_ptr(counter, bumped_count); 1420 1421 if (decrement) { 1422 // Decrement the register. Set condition codes. 1423 subcc(bumped_count, DataLayout::counter_increment, bumped_count); 1424 1425 // If the decrement causes the counter to overflow, stay negative 1426 Label L; 1427 brx(Assembler::negative, true, Assembler::pn, L); 1428 1429 // Store the decremented counter, if it is still negative. 1430 delayed()->st_ptr(bumped_count, counter); 1431 bind(L); 1432 } else { 1433 // Increment the register. Set carry flag. 1434 addcc(bumped_count, DataLayout::counter_increment, bumped_count); 1435 1436 // If the increment causes the counter to overflow, pull back by 1. 1437 assert(DataLayout::counter_increment == 1, "subc works"); 1438 subc(bumped_count, G0, bumped_count); 1439 1440 // Store the incremented counter. 1441 st_ptr(bumped_count, counter); 1442 } 1443 } 1444 1445 // Increment the value at some constant offset from the method data pointer. 1446 1447 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1448 Register bumped_count, 1449 bool decrement) { 1450 // Locate the counter at a fixed offset from the mdp: 1451 Address counter(ImethodDataPtr, constant); 1452 increment_mdp_data_at(counter, bumped_count, decrement); 1453 } 1454 1455 // Increment the value at some non-fixed (reg + constant) offset from 1456 // the method data pointer. 1457 1458 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1459 int constant, 1460 Register bumped_count, 1461 Register scratch2, 1462 bool decrement) { 1463 // Add the constant to reg to get the offset. 1464 add(ImethodDataPtr, reg, scratch2); 1465 Address counter(scratch2, constant); 1466 increment_mdp_data_at(counter, bumped_count, decrement); 1467 } 1468 1469 // Set a flag value at the current method data pointer position. 1470 // Updates a single byte of the header, to avoid races with other header bits. 1471 1472 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1473 Register scratch) { 1474 assert(ProfileInterpreter, "must be profiling interpreter"); 1475 // Load the data header 1476 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); 1477 1478 // Set the flag 1479 or3(scratch, flag_constant, scratch); 1480 1481 // Store the modified header. 1482 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); 1483 } 1484 1485 // Test the location at some offset from the method data pointer. 1486 // If it is not equal to value, branch to the not_equal_continue Label. 1487 // Set condition codes to match the nullness of the loaded value. 1488 1489 void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1490 Register value, 1491 Label& not_equal_continue, 1492 Register scratch) { 1493 assert(ProfileInterpreter, "must be profiling interpreter"); 1494 ld_ptr(ImethodDataPtr, offset, scratch); 1495 cmp(value, scratch); 1496 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); 1497 delayed()->tst(scratch); 1498 } 1499 1500 // Update the method data pointer by the displacement located at some fixed 1501 // offset from the method data pointer. 1502 1503 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1504 Register scratch) { 1505 assert(ProfileInterpreter, "must be profiling interpreter"); 1506 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); 1507 add(ImethodDataPtr, scratch, ImethodDataPtr); 1508 } 1509 1510 // Update the method data pointer by the displacement located at the 1511 // offset (reg + offset_of_disp). 1512 1513 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1514 int offset_of_disp, 1515 Register scratch) { 1516 assert(ProfileInterpreter, "must be profiling interpreter"); 1517 add(reg, offset_of_disp, scratch); 1518 ld_ptr(ImethodDataPtr, scratch, scratch); 1519 add(ImethodDataPtr, scratch, ImethodDataPtr); 1520 } 1521 1522 // Update the method data pointer by a simple constant displacement. 1523 1524 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1525 assert(ProfileInterpreter, "must be profiling interpreter"); 1526 add(ImethodDataPtr, constant, ImethodDataPtr); 1527 } 1528 1529 // Update the method data pointer for a _ret bytecode whose target 1530 // was not among our cached targets. 1531 1532 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1533 Register return_bci) { 1534 assert(ProfileInterpreter, "must be profiling interpreter"); 1535 push(state); 1536 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile 1537 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1538 ld_ptr(l_tmp, return_bci); 1539 pop(state); 1540 } 1541 1542 // Count a taken branch in the bytecodes. 1543 1544 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1545 if (ProfileInterpreter) { 1546 Label profile_continue; 1547 1548 // If no method data exists, go to profile_continue. 1549 test_method_data_pointer(profile_continue); 1550 1551 // We are taking a branch. Increment the taken count. 1552 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); 1553 1554 // The method data pointer needs to be updated to reflect the new target. 1555 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1556 bind (profile_continue); 1557 } 1558 } 1559 1560 1561 // Count a not-taken branch in the bytecodes. 1562 1563 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { 1564 if (ProfileInterpreter) { 1565 Label profile_continue; 1566 1567 // If no method data exists, go to profile_continue. 1568 test_method_data_pointer(profile_continue); 1569 1570 // We are taking a branch. Increment the not taken count. 1571 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); 1572 1573 // The method data pointer needs to be updated to correspond to the 1574 // next bytecode. 1575 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1576 bind (profile_continue); 1577 } 1578 } 1579 1580 1581 // Count a non-virtual call in the bytecodes. 1582 1583 void InterpreterMacroAssembler::profile_call(Register scratch) { 1584 if (ProfileInterpreter) { 1585 Label profile_continue; 1586 1587 // If no method data exists, go to profile_continue. 1588 test_method_data_pointer(profile_continue); 1589 1590 // We are making a call. Increment the count. 1591 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1592 1593 // The method data pointer needs to be updated to reflect the new target. 1594 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1595 bind (profile_continue); 1596 } 1597 } 1598 1599 1600 // Count a final call in the bytecodes. 1601 1602 void InterpreterMacroAssembler::profile_final_call(Register scratch) { 1603 if (ProfileInterpreter) { 1604 Label profile_continue; 1605 1606 // If no method data exists, go to profile_continue. 1607 test_method_data_pointer(profile_continue); 1608 1609 // We are making a call. Increment the count. 1610 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1611 1612 // The method data pointer needs to be updated to reflect the new target. 1613 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1614 bind (profile_continue); 1615 } 1616 } 1617 1618 1619 // Count a virtual call in the bytecodes. 1620 1621 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1622 Register scratch, 1623 bool receiver_can_be_null) { 1624 if (ProfileInterpreter) { 1625 Label profile_continue; 1626 1627 // If no method data exists, go to profile_continue. 1628 test_method_data_pointer(profile_continue); 1629 1630 1631 Label skip_receiver_profile; 1632 if (receiver_can_be_null) { 1633 Label not_null; 1634 br_notnull_short(receiver, Assembler::pt, not_null); 1635 // We are making a call. Increment the count for null receiver. 1636 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1637 ba_short(skip_receiver_profile); 1638 bind(not_null); 1639 } 1640 1641 // Record the receiver type. 1642 record_klass_in_profile(receiver, scratch, true); 1643 bind(skip_receiver_profile); 1644 1645 // The method data pointer needs to be updated to reflect the new target. 1646 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1647 bind (profile_continue); 1648 } 1649 } 1650 1651 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1652 Register receiver, Register scratch, 1653 int start_row, Label& done, bool is_virtual_call) { 1654 if (TypeProfileWidth == 0) { 1655 if (is_virtual_call) { 1656 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1657 } 1658 return; 1659 } 1660 1661 int last_row = VirtualCallData::row_limit() - 1; 1662 assert(start_row <= last_row, "must be work left to do"); 1663 // Test this row for both the receiver and for null. 1664 // Take any of three different outcomes: 1665 // 1. found receiver => increment count and goto done 1666 // 2. found null => keep looking for case 1, maybe allocate this cell 1667 // 3. found something else => keep looking for cases 1 and 2 1668 // Case 3 is handled by a recursive call. 1669 for (int row = start_row; row <= last_row; row++) { 1670 Label next_test; 1671 bool test_for_null_also = (row == start_row); 1672 1673 // See if the receiver is receiver[n]. 1674 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1675 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); 1676 // delayed()->tst(scratch); 1677 1678 // The receiver is receiver[n]. Increment count[n]. 1679 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1680 increment_mdp_data_at(count_offset, scratch); 1681 ba_short(done); 1682 bind(next_test); 1683 1684 if (test_for_null_also) { 1685 Label found_null; 1686 // Failed the equality check on receiver[n]... Test for null. 1687 if (start_row == last_row) { 1688 // The only thing left to do is handle the null case. 1689 if (is_virtual_call) { 1690 brx(Assembler::zero, false, Assembler::pn, found_null); 1691 delayed()->nop(); 1692 // Receiver did not match any saved receiver and there is no empty row for it. 1693 // Increment total counter to indicate polymorphic case. 1694 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1695 ba_short(done); 1696 bind(found_null); 1697 } else { 1698 brx(Assembler::notZero, false, Assembler::pt, done); 1699 delayed()->nop(); 1700 } 1701 break; 1702 } 1703 // Since null is rare, make it be the branch-taken case. 1704 brx(Assembler::zero, false, Assembler::pn, found_null); 1705 delayed()->nop(); 1706 1707 // Put all the "Case 3" tests here. 1708 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); 1709 1710 // Found a null. Keep searching for a matching receiver, 1711 // but remember that this is an empty (unused) slot. 1712 bind(found_null); 1713 } 1714 } 1715 1716 // In the fall-through case, we found no matching receiver, but we 1717 // observed the receiver[start_row] is NULL. 1718 1719 // Fill in the receiver field and increment the count. 1720 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1721 set_mdp_data_at(recvr_offset, receiver); 1722 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1723 mov(DataLayout::counter_increment, scratch); 1724 set_mdp_data_at(count_offset, scratch); 1725 if (start_row > 0) { 1726 ba_short(done); 1727 } 1728 } 1729 1730 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1731 Register scratch, bool is_virtual_call) { 1732 assert(ProfileInterpreter, "must be profiling"); 1733 Label done; 1734 1735 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); 1736 1737 bind (done); 1738 } 1739 1740 1741 // Count a ret in the bytecodes. 1742 1743 void InterpreterMacroAssembler::profile_ret(TosState state, 1744 Register return_bci, 1745 Register scratch) { 1746 if (ProfileInterpreter) { 1747 Label profile_continue; 1748 uint row; 1749 1750 // If no method data exists, go to profile_continue. 1751 test_method_data_pointer(profile_continue); 1752 1753 // Update the total ret count. 1754 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1755 1756 for (row = 0; row < RetData::row_limit(); row++) { 1757 Label next_test; 1758 1759 // See if return_bci is equal to bci[n]: 1760 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), 1761 return_bci, next_test, scratch); 1762 1763 // return_bci is equal to bci[n]. Increment the count. 1764 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); 1765 1766 // The method data pointer needs to be updated to reflect the new target. 1767 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); 1768 ba_short(profile_continue); 1769 bind(next_test); 1770 } 1771 1772 update_mdp_for_ret(state, return_bci); 1773 1774 bind (profile_continue); 1775 } 1776 } 1777 1778 // Profile an unexpected null in the bytecodes. 1779 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { 1780 if (ProfileInterpreter) { 1781 Label profile_continue; 1782 1783 // If no method data exists, go to profile_continue. 1784 test_method_data_pointer(profile_continue); 1785 1786 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); 1787 1788 // The method data pointer needs to be updated. 1789 int mdp_delta = in_bytes(BitData::bit_data_size()); 1790 if (TypeProfileCasts) { 1791 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1792 } 1793 update_mdp_by_constant(mdp_delta); 1794 1795 bind (profile_continue); 1796 } 1797 } 1798 1799 void InterpreterMacroAssembler::profile_typecheck(Register klass, 1800 Register scratch) { 1801 if (ProfileInterpreter) { 1802 Label profile_continue; 1803 1804 // If no method data exists, go to profile_continue. 1805 test_method_data_pointer(profile_continue); 1806 1807 int mdp_delta = in_bytes(BitData::bit_data_size()); 1808 if (TypeProfileCasts) { 1809 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1810 1811 // Record the object type. 1812 record_klass_in_profile(klass, scratch, false); 1813 } 1814 1815 // The method data pointer needs to be updated. 1816 update_mdp_by_constant(mdp_delta); 1817 1818 bind (profile_continue); 1819 } 1820 } 1821 1822 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { 1823 if (ProfileInterpreter && TypeProfileCasts) { 1824 Label profile_continue; 1825 1826 // If no method data exists, go to profile_continue. 1827 test_method_data_pointer(profile_continue); 1828 1829 int count_offset = in_bytes(CounterData::count_offset()); 1830 // Back up the address, since we have already bumped the mdp. 1831 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1832 1833 // *Decrement* the counter. We expect to see zero or small negatives. 1834 increment_mdp_data_at(count_offset, scratch, true); 1835 1836 bind (profile_continue); 1837 } 1838 } 1839 1840 // Count the default case of a switch construct. 1841 1842 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { 1843 if (ProfileInterpreter) { 1844 Label profile_continue; 1845 1846 // If no method data exists, go to profile_continue. 1847 test_method_data_pointer(profile_continue); 1848 1849 // Update the default case count 1850 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1851 scratch); 1852 1853 // The method data pointer needs to be updated. 1854 update_mdp_by_offset( 1855 in_bytes(MultiBranchData::default_displacement_offset()), 1856 scratch); 1857 1858 bind (profile_continue); 1859 } 1860 } 1861 1862 // Count the index'th case of a switch construct. 1863 1864 void InterpreterMacroAssembler::profile_switch_case(Register index, 1865 Register scratch, 1866 Register scratch2, 1867 Register scratch3) { 1868 if (ProfileInterpreter) { 1869 Label profile_continue; 1870 1871 // If no method data exists, go to profile_continue. 1872 test_method_data_pointer(profile_continue); 1873 1874 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1875 set(in_bytes(MultiBranchData::per_case_size()), scratch); 1876 smul(index, scratch, scratch); 1877 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); 1878 1879 // Update the case count 1880 increment_mdp_data_at(scratch, 1881 in_bytes(MultiBranchData::relative_count_offset()), 1882 scratch2, 1883 scratch3); 1884 1885 // The method data pointer needs to be updated. 1886 update_mdp_by_offset(scratch, 1887 in_bytes(MultiBranchData::relative_displacement_offset()), 1888 scratch2); 1889 1890 bind (profile_continue); 1891 } 1892 } 1893 1894 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { 1895 Label not_null, do_nothing, do_update; 1896 1897 assert_different_registers(obj, mdo_addr.base(), tmp); 1898 1899 verify_oop(obj); 1900 1901 ld_ptr(mdo_addr, tmp); 1902 1903 br_notnull_short(obj, pt, not_null); 1904 or3(tmp, TypeEntries::null_seen, tmp); 1905 ba_short(do_update); 1906 1907 bind(not_null); 1908 load_klass(obj, obj); 1909 1910 xor3(obj, tmp, obj); 1911 btst(TypeEntries::type_klass_mask, obj); 1912 // klass seen before, nothing to do. The unknown bit may have been 1913 // set already but no need to check. 1914 brx(zero, false, pt, do_nothing); 1915 delayed()-> 1916 1917 btst(TypeEntries::type_unknown, obj); 1918 // already unknown. Nothing to do anymore. 1919 brx(notZero, false, pt, do_nothing); 1920 delayed()-> 1921 1922 btst(TypeEntries::type_mask, tmp); 1923 brx(zero, true, pt, do_update); 1924 // first time here. Set profile type. 1925 delayed()->or3(tmp, obj, tmp); 1926 1927 // different than before. Cannot keep accurate profile. 1928 or3(tmp, TypeEntries::type_unknown, tmp); 1929 1930 bind(do_update); 1931 // update profile 1932 st_ptr(tmp, mdo_addr); 1933 1934 bind(do_nothing); 1935 } 1936 1937 void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { 1938 if (!ProfileInterpreter) { 1939 return; 1940 } 1941 1942 assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr); 1943 1944 if (MethodData::profile_arguments() || MethodData::profile_return()) { 1945 Label profile_continue; 1946 1947 test_method_data_pointer(profile_continue); 1948 1949 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 1950 1951 ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1); 1952 cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue); 1953 1954 if (MethodData::profile_arguments()) { 1955 Label done; 1956 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 1957 add(ImethodDataPtr, off_to_args, ImethodDataPtr); 1958 1959 for (int i = 0; i < TypeProfileArgsLimit; i++) { 1960 if (i > 0 || MethodData::profile_return()) { 1961 // If return value type is profiled we may have no argument to profile 1962 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 1963 sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1); 1964 cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done); 1965 } 1966 ld_ptr(Address(callee, Method::const_offset()), tmp1); 1967 lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1); 1968 // stack offset o (zero based) from the start of the argument 1969 // list, for n arguments translates into offset n - o - 1 from 1970 // the end of the argument list. But there's an extra slot at 1971 // the stop of the stack. So the offset is n - o from Lesp. 1972 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2); 1973 sub(tmp1, tmp2, tmp1); 1974 1975 // Can't use MacroAssembler::argument_address() which needs Gargs to be set up 1976 sll(tmp1, Interpreter::logStackElementSize, tmp1); 1977 ld_ptr(Lesp, tmp1, tmp1); 1978 1979 Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 1980 profile_obj_type(tmp1, mdo_arg_addr, tmp2); 1981 1982 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 1983 add(ImethodDataPtr, to_add, ImethodDataPtr); 1984 off_to_args += to_add; 1985 } 1986 1987 if (MethodData::profile_return()) { 1988 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 1989 sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1); 1990 } 1991 1992 bind(done); 1993 1994 if (MethodData::profile_return()) { 1995 // We're right after the type profile for the last 1996 // argument. tmp1 is the number of cells left in the 1997 // CallTypeData/VirtualCallTypeData to reach its end. Non null 1998 // if there's a return to profile. 1999 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 2000 sll(tmp1, exact_log2(DataLayout::cell_size), tmp1); 2001 add(ImethodDataPtr, tmp1, ImethodDataPtr); 2002 } 2003 } else { 2004 assert(MethodData::profile_return(), "either profile call args or call ret"); 2005 update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); 2006 } 2007 2008 // mdp points right after the end of the 2009 // CallTypeData/VirtualCallTypeData, right after the cells for the 2010 // return value type if there's one. 2011 2012 bind(profile_continue); 2013 } 2014 } 2015 2016 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { 2017 assert_different_registers(ret, tmp1, tmp2); 2018 if (ProfileInterpreter && MethodData::profile_return()) { 2019 Label profile_continue, done; 2020 2021 test_method_data_pointer(profile_continue); 2022 2023 if (MethodData::profile_return_jsr292_only()) { 2024 // If we don't profile all invoke bytecodes we must make sure 2025 // it's a bytecode we indeed profile. We can't go back to the 2026 // begining of the ProfileData we intend to update to check its 2027 // type because we're right after it and we don't known its 2028 // length. 2029 Label do_profile; 2030 ldub(Lbcp, 0, tmp1); 2031 cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile); 2032 cmp(tmp1, Bytecodes::_invokehandle); 2033 br(equal, false, pn, do_profile); 2034 delayed()->ldub(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1); 2035 cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue); 2036 2037 bind(do_profile); 2038 } 2039 2040 Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size())); 2041 mov(ret, tmp1); 2042 profile_obj_type(tmp1, mdo_ret_addr, tmp2); 2043 2044 bind(profile_continue); 2045 } 2046 } 2047 2048 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 2049 if (ProfileInterpreter && MethodData::profile_parameters()) { 2050 Label profile_continue, done; 2051 2052 test_method_data_pointer(profile_continue); 2053 2054 // Load the offset of the area within the MDO used for 2055 // parameters. If it's negative we're not profiling any parameters. 2056 lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1); 2057 cmp_and_br_short(tmp1, 0, less, pn, profile_continue); 2058 2059 // Compute a pointer to the area for parameters from the offset 2060 // and move the pointer to the slot for the last 2061 // parameters. Collect profiling from last parameter down. 2062 // mdo start + parameters offset + array length - 1 2063 2064 // Pointer to the parameter area in the MDO 2065 Register mdp = tmp1; 2066 add(ImethodDataPtr, tmp1, mdp); 2067 2068 // offset of the current profile entry to update 2069 Register entry_offset = tmp2; 2070 // entry_offset = array len in number of cells 2071 ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset); 2072 2073 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 2074 assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); 2075 2076 // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field 2077 sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset); 2078 // entry_offset in bytes 2079 sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset); 2080 2081 Label loop; 2082 bind(loop); 2083 2084 // load offset on the stack from the slot for this parameter 2085 ld_ptr(mdp, entry_offset, tmp3); 2086 sll(tmp3,Interpreter::logStackElementSize, tmp3); 2087 neg(tmp3); 2088 // read the parameter from the local area 2089 ld_ptr(Llocals, tmp3, tmp3); 2090 2091 // make entry_offset now point to the type field for this parameter 2092 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 2093 assert(type_base > off_base, "unexpected"); 2094 add(entry_offset, type_base - off_base, entry_offset); 2095 2096 // profile the parameter 2097 Address arg_type(mdp, entry_offset); 2098 profile_obj_type(tmp3, arg_type, tmp4); 2099 2100 // go to next parameter 2101 sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset); 2102 cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop); 2103 2104 bind(profile_continue); 2105 } 2106 } 2107 2108 // add a InterpMonitorElem to stack (see frame_sparc.hpp) 2109 2110 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, 2111 Register Rtemp, 2112 Register Rtemp2 ) { 2113 2114 Register Rlimit = Lmonitors; 2115 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2116 assert( (delta & LongAlignmentMask) == 0, 2117 "sizeof BasicObjectLock must be even number of doublewords"); 2118 2119 sub( SP, delta, SP); 2120 sub( Lesp, delta, Lesp); 2121 sub( Lmonitors, delta, Lmonitors); 2122 2123 if (!stack_is_empty) { 2124 2125 // must copy stack contents down 2126 2127 Label start_copying, next; 2128 2129 // untested("monitor stack expansion"); 2130 compute_stack_base(Rtemp); 2131 ba(start_copying); 2132 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below 2133 2134 // note: must copy from low memory upwards 2135 // On entry to loop, 2136 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) 2137 // Loop mutates Rtemp 2138 2139 bind( next); 2140 2141 st_ptr(Rtemp2, Rtemp, 0); 2142 inc(Rtemp, wordSize); 2143 cmp(Rtemp, Rlimit); // are we done? (duplicated above) 2144 2145 bind( start_copying ); 2146 2147 brx( notEqual, true, pn, next ); 2148 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); 2149 2150 // done copying stack 2151 } 2152 } 2153 2154 // Locals 2155 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { 2156 assert_not_delayed(); 2157 sll(index, Interpreter::logStackElementSize, index); 2158 sub(Llocals, index, index); 2159 ld_ptr(index, 0, dst); 2160 // Note: index must hold the effective address--the iinc template uses it 2161 } 2162 2163 // Just like access_local_ptr but the tag is a returnAddress 2164 void InterpreterMacroAssembler::access_local_returnAddress(Register index, 2165 Register dst ) { 2166 assert_not_delayed(); 2167 sll(index, Interpreter::logStackElementSize, index); 2168 sub(Llocals, index, index); 2169 ld_ptr(index, 0, dst); 2170 } 2171 2172 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { 2173 assert_not_delayed(); 2174 sll(index, Interpreter::logStackElementSize, index); 2175 sub(Llocals, index, index); 2176 ld(index, 0, dst); 2177 // Note: index must hold the effective address--the iinc template uses it 2178 } 2179 2180 2181 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { 2182 assert_not_delayed(); 2183 sll(index, Interpreter::logStackElementSize, index); 2184 sub(Llocals, index, index); 2185 // First half stored at index n+1 (which grows down from Llocals[n]) 2186 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); 2187 } 2188 2189 2190 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { 2191 assert_not_delayed(); 2192 sll(index, Interpreter::logStackElementSize, index); 2193 sub(Llocals, index, index); 2194 ldf(FloatRegisterImpl::S, index, 0, dst); 2195 } 2196 2197 2198 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { 2199 assert_not_delayed(); 2200 sll(index, Interpreter::logStackElementSize, index); 2201 sub(Llocals, index, index); 2202 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); 2203 } 2204 2205 2206 #ifdef ASSERT 2207 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { 2208 Label L; 2209 2210 assert(Rindex != Rscratch, "Registers cannot be same"); 2211 assert(Rindex != Rscratch1, "Registers cannot be same"); 2212 assert(Rlimit != Rscratch, "Registers cannot be same"); 2213 assert(Rlimit != Rscratch1, "Registers cannot be same"); 2214 assert(Rscratch1 != Rscratch, "Registers cannot be same"); 2215 2216 // untested("reg area corruption"); 2217 add(Rindex, offset, Rscratch); 2218 add(Rlimit, 64 + STACK_BIAS, Rscratch1); 2219 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); 2220 stop("regsave area is being clobbered"); 2221 bind(L); 2222 } 2223 #endif // ASSERT 2224 2225 2226 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { 2227 assert_not_delayed(); 2228 sll(index, Interpreter::logStackElementSize, index); 2229 sub(Llocals, index, index); 2230 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) 2231 st(src, index, 0); 2232 } 2233 2234 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { 2235 assert_not_delayed(); 2236 sll(index, Interpreter::logStackElementSize, index); 2237 sub(Llocals, index, index); 2238 #ifdef ASSERT 2239 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2240 #endif 2241 st_ptr(src, index, 0); 2242 } 2243 2244 2245 2246 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { 2247 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); 2248 } 2249 2250 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { 2251 assert_not_delayed(); 2252 sll(index, Interpreter::logStackElementSize, index); 2253 sub(Llocals, index, index); 2254 #ifdef ASSERT 2255 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2256 #endif 2257 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 2258 } 2259 2260 2261 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { 2262 assert_not_delayed(); 2263 sll(index, Interpreter::logStackElementSize, index); 2264 sub(Llocals, index, index); 2265 #ifdef ASSERT 2266 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2267 #endif 2268 stf(FloatRegisterImpl::S, src, index, 0); 2269 } 2270 2271 2272 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { 2273 assert_not_delayed(); 2274 sll(index, Interpreter::logStackElementSize, index); 2275 sub(Llocals, index, index); 2276 #ifdef ASSERT 2277 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2278 #endif 2279 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); 2280 } 2281 2282 2283 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { 2284 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2285 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); 2286 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; 2287 } 2288 2289 2290 Address InterpreterMacroAssembler::top_most_monitor() { 2291 return Address(FP, top_most_monitor_byte_offset()); 2292 } 2293 2294 2295 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { 2296 add( Lesp, wordSize, Rdest ); 2297 } 2298 2299 #endif /* CC_INTERP */ 2300 2301 void InterpreterMacroAssembler::get_method_counters(Register method, 2302 Register Rcounters, 2303 Label& skip) { 2304 Label has_counters; 2305 Address method_counters(method, in_bytes(Method::method_counters_offset())); 2306 ld_ptr(method_counters, Rcounters); 2307 br_notnull_short(Rcounters, Assembler::pt, has_counters); 2308 call_VM(noreg, CAST_FROM_FN_PTR(address, 2309 InterpreterRuntime::build_method_counters), method); 2310 ld_ptr(method_counters, Rcounters); 2311 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory 2312 delayed()->nop(); 2313 bind(has_counters); 2314 } 2315 2316 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2317 assert(UseCompiler, "incrementing must be useful"); 2318 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2319 2320 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2321 InvocationCounter::counter_offset()); 2322 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2323 InvocationCounter::counter_offset()); 2324 int delta = InvocationCounter::count_increment; 2325 2326 // Load each counter in a register 2327 ld( inv_counter, Rtmp ); 2328 ld( be_counter, Rtmp2 ); 2329 2330 assert( is_simm13( delta ), " delta too large."); 2331 2332 // Add the delta to the invocation counter and store the result 2333 add( Rtmp, delta, Rtmp ); 2334 2335 // Mask the backedge counter 2336 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2337 2338 // Store value 2339 st( Rtmp, inv_counter); 2340 2341 // Add invocation counter + backedge counter 2342 add( Rtmp, Rtmp2, Rtmp); 2343 2344 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! 2345 } 2346 2347 void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2348 assert(UseCompiler, "incrementing must be useful"); 2349 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2350 2351 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2352 InvocationCounter::counter_offset()); 2353 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2354 InvocationCounter::counter_offset()); 2355 2356 int delta = InvocationCounter::count_increment; 2357 // Load each counter in a register 2358 ld( be_counter, Rtmp ); 2359 ld( inv_counter, Rtmp2 ); 2360 2361 // Add the delta to the backedge counter 2362 add( Rtmp, delta, Rtmp ); 2363 2364 // Mask the invocation counter, add to backedge counter 2365 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2366 2367 // and store the result to memory 2368 st( Rtmp, be_counter ); 2369 2370 // Add backedge + invocation counter 2371 add( Rtmp, Rtmp2, Rtmp ); 2372 2373 // Note that this macro must leave backedge_count + invocation_count in Rtmp! 2374 } 2375 2376 #ifndef CC_INTERP 2377 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, 2378 Register branch_bcp, 2379 Register Rtmp ) { 2380 Label did_not_overflow; 2381 Label overflow_with_error; 2382 assert_different_registers(backedge_count, Rtmp, branch_bcp); 2383 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 2384 2385 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); 2386 load_contents(limit, Rtmp); 2387 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2388 2389 // When ProfileInterpreter is on, the backedge_count comes from the 2390 // MethodData*, which value does not get reset on the call to 2391 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2392 // routine while the method is being compiled, add a second test to make sure 2393 // the overflow function is called only once every overflow_frequency. 2394 if (ProfileInterpreter) { 2395 const int overflow_frequency = 1024; 2396 andcc(backedge_count, overflow_frequency-1, Rtmp); 2397 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); 2398 delayed()->nop(); 2399 } 2400 2401 // overflow in loop, pass branch bytecode 2402 set(6,Rtmp); 2403 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); 2404 2405 // Was an OSR adapter generated? 2406 // O0 = osr nmethod 2407 br_null_short(O0, Assembler::pn, overflow_with_error); 2408 2409 // Has the nmethod been invalidated already? 2410 ld(O0, nmethod::entry_bci_offset(), O2); 2411 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); 2412 2413 // migrate the interpreter frame off of the stack 2414 2415 mov(G2_thread, L7); 2416 // save nmethod 2417 mov(O0, L6); 2418 set_last_Java_frame(SP, noreg); 2419 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 2420 reset_last_Java_frame(); 2421 mov(L7, G2_thread); 2422 2423 // move OSR nmethod to I1 2424 mov(L6, I1); 2425 2426 // OSR buffer to I0 2427 mov(O0, I0); 2428 2429 // remove the interpreter frame 2430 restore(I5_savedSP, 0, SP); 2431 2432 // Jump to the osr code. 2433 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 2434 jmp(O2, G0); 2435 delayed()->nop(); 2436 2437 bind(overflow_with_error); 2438 2439 bind(did_not_overflow); 2440 } 2441 2442 2443 2444 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { 2445 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2446 } 2447 2448 2449 // local helper function for the verify_oop_or_return_address macro 2450 static bool verify_return_address(Method* m, int bci) { 2451 #ifndef PRODUCT 2452 address pc = (address)(m->constMethod()) 2453 + in_bytes(ConstMethod::codes_offset()) + bci; 2454 // assume it is a valid return address if it is inside m and is preceded by a jsr 2455 if (!m->contains(pc)) return false; 2456 address jsr_pc; 2457 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2458 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2459 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2460 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2461 #endif // PRODUCT 2462 return false; 2463 } 2464 2465 2466 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2467 if (!VerifyOops) return; 2468 // the VM documentation for the astore[_wide] bytecode allows 2469 // the TOS to be not only an oop but also a return address 2470 Label test; 2471 Label skip; 2472 // See if it is an address (in the current method): 2473 2474 mov(reg, Rtmp); 2475 const int log2_bytecode_size_limit = 16; 2476 srl(Rtmp, log2_bytecode_size_limit, Rtmp); 2477 br_notnull_short( Rtmp, pt, test ); 2478 2479 // %%% should use call_VM_leaf here? 2480 save_frame_and_mov(0, Lmethod, O0, reg, O1); 2481 save_thread(L7_thread_cache); 2482 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); 2483 delayed()->nop(); 2484 restore_thread(L7_thread_cache); 2485 br_notnull( O0, false, pt, skip ); 2486 delayed()->restore(); 2487 2488 // Perform a more elaborate out-of-line call 2489 // Not an address; verify it: 2490 bind(test); 2491 verify_oop(reg); 2492 bind(skip); 2493 } 2494 2495 2496 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2497 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 2498 } 2499 2500 2501 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2502 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2503 int increment, int mask, 2504 Register scratch1, Register scratch2, 2505 Condition cond, Label *where) { 2506 ld(counter_addr, scratch1); 2507 add(scratch1, increment, scratch1); 2508 if (is_simm13(mask)) { 2509 andcc(scratch1, mask, G0); 2510 } else { 2511 set(mask, scratch2); 2512 andcc(scratch1, scratch2, G0); 2513 } 2514 br(cond, false, Assembler::pn, *where); 2515 delayed()->st(scratch1, counter_addr); 2516 } 2517 #endif /* CC_INTERP */ 2518 2519 // Inline assembly for: 2520 // 2521 // if (thread is in interp_only_mode) { 2522 // InterpreterRuntime::post_method_entry(); 2523 // } 2524 // if (DTraceMethodProbes) { 2525 // SharedRuntime::dtrace_method_entry(method, receiver); 2526 // } 2527 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2528 // SharedRuntime::rc_trace_method_entry(method, receiver); 2529 // } 2530 2531 void InterpreterMacroAssembler::notify_method_entry() { 2532 2533 // C++ interpreter only uses this for native methods. 2534 2535 // Whenever JVMTI puts a thread in interp_only_mode, method 2536 // entry/exit events are sent for that thread to track stack 2537 // depth. If it is possible to enter interp_only_mode we add 2538 // the code to check if the event should be sent. 2539 if (JvmtiExport::can_post_interpreter_events()) { 2540 Label L; 2541 Register temp_reg = O5; 2542 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2543 ld(interp_only, temp_reg); 2544 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2545 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 2546 bind(L); 2547 } 2548 2549 { 2550 Register temp_reg = O5; 2551 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2552 call_VM_leaf(noreg, 2553 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2554 G2_thread, Lmethod); 2555 } 2556 2557 // RedefineClasses() tracing support for obsolete method entry 2558 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2559 call_VM_leaf(noreg, 2560 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2561 G2_thread, Lmethod); 2562 } 2563 } 2564 2565 2566 // Inline assembly for: 2567 // 2568 // if (thread is in interp_only_mode) { 2569 // // save result 2570 // InterpreterRuntime::post_method_exit(); 2571 // // restore result 2572 // } 2573 // if (DTraceMethodProbes) { 2574 // SharedRuntime::dtrace_method_exit(thread, method); 2575 // } 2576 // 2577 // Native methods have their result stored in d_tmp and l_tmp 2578 // Java methods have their result stored in the expression stack 2579 2580 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, 2581 TosState state, 2582 NotifyMethodExitMode mode) { 2583 // C++ interpreter only uses this for native methods. 2584 2585 // Whenever JVMTI puts a thread in interp_only_mode, method 2586 // entry/exit events are sent for that thread to track stack 2587 // depth. If it is possible to enter interp_only_mode we add 2588 // the code to check if the event should be sent. 2589 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2590 Label L; 2591 Register temp_reg = O5; 2592 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2593 ld(interp_only, temp_reg); 2594 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2595 2596 // Note: frame::interpreter_frame_result has a dependency on how the 2597 // method result is saved across the call to post_method_exit. For 2598 // native methods it assumes the result registers are saved to 2599 // l_scratch and d_scratch. If this changes then the interpreter_frame_result 2600 // implementation will need to be updated too. 2601 2602 save_return_value(state, is_native_method); 2603 call_VM(noreg, 2604 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2605 restore_return_value(state, is_native_method); 2606 bind(L); 2607 } 2608 2609 { 2610 Register temp_reg = O5; 2611 // Dtrace notification 2612 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2613 save_return_value(state, is_native_method); 2614 call_VM_leaf( 2615 noreg, 2616 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2617 G2_thread, Lmethod); 2618 restore_return_value(state, is_native_method); 2619 } 2620 } 2621 2622 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { 2623 #ifdef CC_INTERP 2624 // result potentially in O0/O1: save it across calls 2625 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); 2626 #ifdef _LP64 2627 stx(O0, STATE(_native_lresult)); 2628 #else 2629 std(O0, STATE(_native_lresult)); 2630 #endif 2631 #else // CC_INTERP 2632 if (is_native_call) { 2633 stf(FloatRegisterImpl::D, F0, d_tmp); 2634 #ifdef _LP64 2635 stx(O0, l_tmp); 2636 #else 2637 std(O0, l_tmp); 2638 #endif 2639 } else { 2640 push(state); 2641 } 2642 #endif // CC_INTERP 2643 } 2644 2645 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { 2646 #ifdef CC_INTERP 2647 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); 2648 #ifdef _LP64 2649 ldx(STATE(_native_lresult), O0); 2650 #else 2651 ldd(STATE(_native_lresult), O0); 2652 #endif 2653 #else // CC_INTERP 2654 if (is_native_call) { 2655 ldf(FloatRegisterImpl::D, d_tmp, F0); 2656 #ifdef _LP64 2657 ldx(l_tmp, O0); 2658 #else 2659 ldd(l_tmp, O0); 2660 #endif 2661 } else { 2662 pop(state); 2663 } 2664 #endif // CC_INTERP 2665 }