1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_sparc.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "oops/arrayOop.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/method.hpp" 33 #include "oops/methodCounters.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "prims/jvmtiRedefineClassesTrace.hpp" 36 #include "prims/jvmtiThreadState.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/thread.inline.hpp" 41 42 #ifndef FAST_DISPATCH 43 #define FAST_DISPATCH 1 44 #endif 45 #undef FAST_DISPATCH 46 47 // Implementation of InterpreterMacroAssembler 48 49 // This file specializes the assember with interpreter-specific macros 50 51 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); 52 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); 53 54 void InterpreterMacroAssembler::jump_to_entry(address entry) { 55 assert(entry, "Entry must have been generated by now"); 56 AddressLiteral al(entry); 57 jump_to(al, G3_scratch); 58 delayed()->nop(); 59 } 60 61 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { 62 // Note: this algorithm is also used by C1's OSR entry sequence. 63 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). 64 assert_different_registers(args_size, locals_size); 65 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. 66 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words 67 // Use br/mov combination because it works on both V8 and V9 and is 68 // faster. 69 Label skip_move; 70 br(Assembler::negative, true, Assembler::pt, skip_move); 71 delayed()->mov(G0, delta); 72 bind(skip_move); 73 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) 74 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes 75 } 76 77 // Dispatch code executed in the prolog of a bytecode which does not do it's 78 // own dispatch. The dispatch address is computed and placed in IdispatchAddress 79 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 80 assert_not_delayed(); 81 #ifdef FAST_DISPATCH 82 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 83 // they both use I2. 84 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 85 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode 86 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 87 // add offset to correct dispatch table 88 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 89 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr 90 #else 91 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 92 // dispatch table to use 93 AddressLiteral tbl(Interpreter::dispatch_table(state)); 94 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 95 set(tbl, G3_scratch); // compute addr of table 96 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr 97 #endif 98 } 99 100 101 // Dispatch code executed in the epilog of a bytecode which does not do it's 102 // own dispatch. The dispatch address in IdispatchAddress is used for the 103 // dispatch. 104 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 105 assert_not_delayed(); 106 verify_FPU(1, state); 107 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 108 jmp( IdispatchAddress, 0 ); 109 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 110 else delayed()->nop(); 111 } 112 113 114 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 115 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 116 assert_not_delayed(); 117 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 118 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); 119 } 120 121 122 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { 123 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 124 assert_not_delayed(); 125 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 126 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); 127 } 128 129 130 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 131 // load current bytecode 132 assert_not_delayed(); 133 ldub( Lbcp, 0, Lbyte_code); // load next bytecode 134 dispatch_base(state, table); 135 } 136 137 138 void InterpreterMacroAssembler::call_VM_leaf_base( 139 Register java_thread, 140 address entry_point, 141 int number_of_arguments 142 ) { 143 if (!java_thread->is_valid()) 144 java_thread = L7_thread_cache; 145 // super call 146 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); 147 } 148 149 150 void InterpreterMacroAssembler::call_VM_base( 151 Register oop_result, 152 Register java_thread, 153 Register last_java_sp, 154 address entry_point, 155 int number_of_arguments, 156 bool check_exception 157 ) { 158 if (!java_thread->is_valid()) 159 java_thread = L7_thread_cache; 160 // See class ThreadInVMfromInterpreter, which assumes that the interpreter 161 // takes responsibility for setting its own thread-state on call-out. 162 // However, ThreadInVMfromInterpreter resets the state to "in_Java". 163 164 //save_bcp(); // save bcp 165 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); 166 //restore_bcp(); // restore bcp 167 //restore_locals(); // restore locals pointer 168 } 169 170 171 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 172 if (JvmtiExport::can_pop_frame()) { 173 Label L; 174 175 // Check the "pending popframe condition" flag in the current thread 176 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); 177 178 // Initiate popframe handling only if it is not already being processed. If the flag 179 // has the popframe_processing bit set, it means that this code is called *during* popframe 180 // handling - we don't want to reenter. 181 btst(JavaThread::popframe_pending_bit, scratch_reg); 182 br(zero, false, pt, L); 183 delayed()->nop(); 184 btst(JavaThread::popframe_processing_bit, scratch_reg); 185 br(notZero, false, pt, L); 186 delayed()->nop(); 187 188 // Call Interpreter::remove_activation_preserving_args_entry() to get the 189 // address of the same-named entrypoint in the generated interpreter code. 190 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 191 192 // Jump to Interpreter::_remove_activation_preserving_args_entry 193 jmpl(O0, G0, G0); 194 delayed()->nop(); 195 bind(L); 196 } 197 } 198 199 200 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 201 Register thr_state = G4_scratch; 202 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 203 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); 204 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); 205 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); 206 switch (state) { 207 case ltos: ld_long(val_addr, Otos_l); break; 208 case atos: ld_ptr(oop_addr, Otos_l); 209 st_ptr(G0, oop_addr); break; 210 case btos: // fall through 211 case ctos: // fall through 212 case stos: // fall through 213 case itos: ld(val_addr, Otos_l1); break; 214 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; 215 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; 216 case vtos: /* nothing to do */ break; 217 default : ShouldNotReachHere(); 218 } 219 // Clean up tos value in the jvmti thread state 220 or3(G0, ilgl, G3_scratch); 221 stw(G3_scratch, tos_addr); 222 st_long(G0, val_addr); 223 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 224 } 225 226 227 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 228 if (JvmtiExport::can_force_early_return()) { 229 Label L; 230 Register thr_state = G3_scratch; 231 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 232 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; 233 234 // Initiate earlyret handling only if it is not already being processed. 235 // If the flag has the earlyret_processing bit set, it means that this code 236 // is called *during* earlyret handling - we don't want to reenter. 237 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); 238 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); 239 240 // Call Interpreter::remove_activation_early_entry() to get the address of the 241 // same-named entrypoint in the generated interpreter code 242 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); 243 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); 244 245 // Jump to Interpreter::_remove_activation_early_entry 246 jmpl(O0, G0, G0); 247 delayed()->nop(); 248 bind(L); 249 } 250 } 251 252 253 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 254 mov(arg_1, O0); 255 mov(arg_2, O1); 256 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); 257 } 258 259 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { 260 assert_not_delayed(); 261 dispatch_Lbyte_code(state, table); 262 } 263 264 265 void InterpreterMacroAssembler::dispatch_normal(TosState state) { 266 dispatch_base(state, Interpreter::normal_table(state)); 267 } 268 269 270 void InterpreterMacroAssembler::dispatch_only(TosState state) { 271 dispatch_base(state, Interpreter::dispatch_table(state)); 272 } 273 274 275 // common code to dispatch and dispatch_only 276 // dispatch value in Lbyte_code and increment Lbcp 277 278 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { 279 verify_FPU(1, state); 280 // %%%%% maybe implement +VerifyActivationFrameSize here 281 //verify_thread(); //too slow; we will just verify on method entry & exit 282 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 283 #ifdef FAST_DISPATCH 284 if (table == Interpreter::dispatch_table(state)) { 285 // use IdispatchTables 286 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 287 // add offset to correct dispatch table 288 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 289 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr 290 } else { 291 #endif 292 // dispatch table to use 293 AddressLiteral tbl(table); 294 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 295 set(tbl, G3_scratch); // compute addr of table 296 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr 297 #ifdef FAST_DISPATCH 298 } 299 #endif 300 jmp( G3_scratch, 0 ); 301 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 302 else delayed()->nop(); 303 } 304 305 306 // Helpers for expression stack 307 308 // Longs and doubles are Category 2 computational types in the 309 // JVM specification (section 3.11.1) and take 2 expression stack or 310 // local slots. 311 // Aligning them on 32 bit with tagged stacks is hard because the code generated 312 // for the dup* bytecodes depends on what types are already on the stack. 313 // If the types are split into the two stack/local slots, that is much easier 314 // (and we can use 0 for non-reference tags). 315 316 // Known good alignment in _LP64 but unknown otherwise 317 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { 318 assert_not_delayed(); 319 320 #ifdef _LP64 321 ldf(FloatRegisterImpl::D, r1, offset, d); 322 #else 323 ldf(FloatRegisterImpl::S, r1, offset, d); 324 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); 325 #endif 326 } 327 328 // Known good alignment in _LP64 but unknown otherwise 329 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { 330 assert_not_delayed(); 331 332 #ifdef _LP64 333 stf(FloatRegisterImpl::D, d, r1, offset); 334 // store something more useful here 335 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 336 #else 337 stf(FloatRegisterImpl::S, d, r1, offset); 338 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); 339 #endif 340 } 341 342 343 // Known good alignment in _LP64 but unknown otherwise 344 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { 345 assert_not_delayed(); 346 #ifdef _LP64 347 ldx(r1, offset, rd); 348 #else 349 ld(r1, offset, rd); 350 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); 351 #endif 352 } 353 354 // Known good alignment in _LP64 but unknown otherwise 355 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { 356 assert_not_delayed(); 357 358 #ifdef _LP64 359 stx(l, r1, offset); 360 // store something more useful here 361 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 362 #else 363 st(l, r1, offset); 364 st(l->successor(), r1, offset + Interpreter::stackElementSize); 365 #endif 366 } 367 368 void InterpreterMacroAssembler::pop_i(Register r) { 369 assert_not_delayed(); 370 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); 371 inc(Lesp, Interpreter::stackElementSize); 372 debug_only(verify_esp(Lesp)); 373 } 374 375 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { 376 assert_not_delayed(); 377 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); 378 inc(Lesp, Interpreter::stackElementSize); 379 debug_only(verify_esp(Lesp)); 380 } 381 382 void InterpreterMacroAssembler::pop_l(Register r) { 383 assert_not_delayed(); 384 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); 385 inc(Lesp, 2*Interpreter::stackElementSize); 386 debug_only(verify_esp(Lesp)); 387 } 388 389 390 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { 391 assert_not_delayed(); 392 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); 393 inc(Lesp, Interpreter::stackElementSize); 394 debug_only(verify_esp(Lesp)); 395 } 396 397 398 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { 399 assert_not_delayed(); 400 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); 401 inc(Lesp, 2*Interpreter::stackElementSize); 402 debug_only(verify_esp(Lesp)); 403 } 404 405 406 void InterpreterMacroAssembler::push_i(Register r) { 407 assert_not_delayed(); 408 debug_only(verify_esp(Lesp)); 409 st(r, Lesp, 0); 410 dec(Lesp, Interpreter::stackElementSize); 411 } 412 413 void InterpreterMacroAssembler::push_ptr(Register r) { 414 assert_not_delayed(); 415 st_ptr(r, Lesp, 0); 416 dec(Lesp, Interpreter::stackElementSize); 417 } 418 419 // remember: our convention for longs in SPARC is: 420 // O0 (Otos_l1) has high-order part in first word, 421 // O1 (Otos_l2) has low-order part in second word 422 423 void InterpreterMacroAssembler::push_l(Register r) { 424 assert_not_delayed(); 425 debug_only(verify_esp(Lesp)); 426 // Longs are stored in memory-correct order, even if unaligned. 427 int offset = -Interpreter::stackElementSize; 428 store_unaligned_long(r, Lesp, offset); 429 dec(Lesp, 2 * Interpreter::stackElementSize); 430 } 431 432 433 void InterpreterMacroAssembler::push_f(FloatRegister f) { 434 assert_not_delayed(); 435 debug_only(verify_esp(Lesp)); 436 stf(FloatRegisterImpl::S, f, Lesp, 0); 437 dec(Lesp, Interpreter::stackElementSize); 438 } 439 440 441 void InterpreterMacroAssembler::push_d(FloatRegister d) { 442 assert_not_delayed(); 443 debug_only(verify_esp(Lesp)); 444 // Longs are stored in memory-correct order, even if unaligned. 445 int offset = -Interpreter::stackElementSize; 446 store_unaligned_double(d, Lesp, offset); 447 dec(Lesp, 2 * Interpreter::stackElementSize); 448 } 449 450 451 void InterpreterMacroAssembler::push(TosState state) { 452 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 453 switch (state) { 454 case atos: push_ptr(); break; 455 case btos: push_i(); break; 456 case ctos: 457 case stos: push_i(); break; 458 case itos: push_i(); break; 459 case ltos: push_l(); break; 460 case ftos: push_f(); break; 461 case dtos: push_d(); break; 462 case vtos: /* nothing to do */ break; 463 default : ShouldNotReachHere(); 464 } 465 } 466 467 468 void InterpreterMacroAssembler::pop(TosState state) { 469 switch (state) { 470 case atos: pop_ptr(); break; 471 case btos: pop_i(); break; 472 case ctos: 473 case stos: pop_i(); break; 474 case itos: pop_i(); break; 475 case ltos: pop_l(); break; 476 case ftos: pop_f(); break; 477 case dtos: pop_d(); break; 478 case vtos: /* nothing to do */ break; 479 default : ShouldNotReachHere(); 480 } 481 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 482 } 483 484 485 // Helpers for swap and dup 486 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 487 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); 488 } 489 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 490 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); 491 } 492 493 494 void InterpreterMacroAssembler::load_receiver(Register param_count, 495 Register recv) { 496 sll(param_count, Interpreter::logStackElementSize, param_count); 497 ld_ptr(Lesp, param_count, recv); // gets receiver oop 498 } 499 500 void InterpreterMacroAssembler::empty_expression_stack() { 501 // Reset Lesp. 502 sub( Lmonitors, wordSize, Lesp ); 503 504 // Reset SP by subtracting more space from Lesp. 505 Label done; 506 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 507 508 // A native does not need to do this, since its callee does not change SP. 509 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. 510 btst(JVM_ACC_NATIVE, Gframe_size); 511 br(Assembler::notZero, false, Assembler::pt, done); 512 delayed()->nop(); 513 514 // Compute max expression stack+register save area 515 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); 516 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. 517 add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size ); 518 519 // 520 // now set up a stack frame with the size computed above 521 // 522 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below 523 sll( Gframe_size, LogBytesPerWord, Gframe_size ); 524 sub( Lesp, Gframe_size, Gframe_size ); 525 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary 526 debug_only(verify_sp(Gframe_size, G4_scratch)); 527 #ifdef _LP64 528 sub(Gframe_size, STACK_BIAS, Gframe_size ); 529 #endif 530 mov(Gframe_size, SP); 531 532 bind(done); 533 } 534 535 536 #ifdef ASSERT 537 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { 538 Label Bad, OK; 539 540 // Saved SP must be aligned. 541 #ifdef _LP64 542 btst(2*BytesPerWord-1, Rsp); 543 #else 544 btst(LongAlignmentMask, Rsp); 545 #endif 546 br(Assembler::notZero, false, Assembler::pn, Bad); 547 delayed()->nop(); 548 549 // Saved SP, plus register window size, must not be above FP. 550 add(Rsp, frame::register_save_words * wordSize, Rtemp); 551 #ifdef _LP64 552 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP 553 #endif 554 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); 555 556 // Saved SP must not be ridiculously below current SP. 557 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); 558 set(maxstack, Rtemp); 559 sub(SP, Rtemp, Rtemp); 560 #ifdef _LP64 561 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp 562 #endif 563 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); 564 565 ba_short(OK); 566 567 bind(Bad); 568 stop("on return to interpreted call, restored SP is corrupted"); 569 570 bind(OK); 571 } 572 573 574 void InterpreterMacroAssembler::verify_esp(Register Resp) { 575 // about to read or write Resp[0] 576 // make sure it is not in the monitors or the register save area 577 Label OK1, OK2; 578 579 cmp(Resp, Lmonitors); 580 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); 581 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); 582 stop("too many pops: Lesp points into monitor area"); 583 bind(OK1); 584 #ifdef _LP64 585 sub(Resp, STACK_BIAS, Resp); 586 #endif 587 cmp(Resp, SP); 588 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); 589 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); 590 stop("too many pushes: Lesp points into register window"); 591 bind(OK2); 592 } 593 #endif // ASSERT 594 595 // Load compiled (i2c) or interpreter entry when calling from interpreted and 596 // do the call. Centralized so that all interpreter calls will do the same actions. 597 // If jvmti single stepping is on for a thread we must not call compiled code. 598 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 599 600 // Assume we want to go compiled if available 601 602 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); 603 604 if (JvmtiExport::can_post_interpreter_events()) { 605 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 606 // compiled code in threads for which the event is enabled. Check here for 607 // interp_only_mode if these events CAN be enabled. 608 verify_thread(); 609 Label skip_compiled_code; 610 611 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 612 ld(interp_only, scratch); 613 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 614 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 615 bind(skip_compiled_code); 616 } 617 618 // the i2c_adapters need Method* in G5_method (right? %%%) 619 // do the call 620 #ifdef ASSERT 621 { 622 Label ok; 623 br_notnull_short(target, Assembler::pt, ok); 624 stop("null entry point"); 625 bind(ok); 626 } 627 #endif // ASSERT 628 629 // Adjust Rret first so Llast_SP can be same as Rret 630 add(Rret, -frame::pc_return_offset, O7); 631 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer 632 // Record SP so we can remove any stack space allocated by adapter transition 633 jmp(target, 0); 634 delayed()->mov(SP, Llast_SP); 635 } 636 637 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { 638 assert_not_delayed(); 639 640 Label not_taken; 641 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); 642 else br (cc, false, Assembler::pn, not_taken); 643 delayed()->nop(); 644 645 TemplateTable::branch(false,false); 646 647 bind(not_taken); 648 649 profile_not_taken_branch(G3_scratch); 650 } 651 652 653 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( 654 int bcp_offset, 655 Register Rtmp, 656 Register Rdst, 657 signedOrNot is_signed, 658 setCCOrNot should_set_CC ) { 659 assert(Rtmp != Rdst, "need separate temp register"); 660 assert_not_delayed(); 661 switch (is_signed) { 662 default: ShouldNotReachHere(); 663 664 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte 665 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte 666 } 667 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte 668 sll( Rdst, BitsPerByte, Rdst); 669 switch (should_set_CC ) { 670 default: ShouldNotReachHere(); 671 672 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; 673 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; 674 } 675 } 676 677 678 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( 679 int bcp_offset, 680 Register Rtmp, 681 Register Rdst, 682 setCCOrNot should_set_CC ) { 683 assert(Rtmp != Rdst, "need separate temp register"); 684 assert_not_delayed(); 685 add( Lbcp, bcp_offset, Rtmp); 686 andcc( Rtmp, 3, G0); 687 Label aligned; 688 switch (should_set_CC ) { 689 default: ShouldNotReachHere(); 690 691 case set_CC: break; 692 case dont_set_CC: break; 693 } 694 695 br(Assembler::zero, true, Assembler::pn, aligned); 696 #ifdef _LP64 697 delayed()->ldsw(Rtmp, 0, Rdst); 698 #else 699 delayed()->ld(Rtmp, 0, Rdst); 700 #endif 701 702 ldub(Lbcp, bcp_offset + 3, Rdst); 703 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); 704 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 705 #ifdef _LP64 706 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 707 #else 708 // Unsigned load is faster than signed on some implementations 709 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 710 #endif 711 or3(Rtmp, Rdst, Rdst ); 712 713 bind(aligned); 714 if (should_set_CC == set_CC) tst(Rdst); 715 } 716 717 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, 718 int bcp_offset, size_t index_size) { 719 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 720 if (index_size == sizeof(u2)) { 721 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); 722 } else if (index_size == sizeof(u4)) { 723 get_4_byte_integer_at_bcp(bcp_offset, temp, index); 724 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 725 xor3(index, -1, index); // convert to plain index 726 } else if (index_size == sizeof(u1)) { 727 ldub(Lbcp, bcp_offset, index); 728 } else { 729 ShouldNotReachHere(); 730 } 731 } 732 733 734 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 735 int bcp_offset, size_t index_size) { 736 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 737 assert_different_registers(cache, tmp); 738 assert_not_delayed(); 739 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 740 // convert from field index to ConstantPoolCacheEntry index and from 741 // word index to byte offset 742 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 743 add(LcpoolCache, tmp, cache); 744 } 745 746 747 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 748 Register temp, 749 Register bytecode, 750 int byte_no, 751 int bcp_offset, 752 size_t index_size) { 753 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 754 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 755 const int shift_count = (1 + byte_no) * BitsPerByte; 756 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 757 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 758 "correct shift count"); 759 srl(bytecode, shift_count, bytecode); 760 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 761 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); 762 } 763 764 765 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 766 int bcp_offset, size_t index_size) { 767 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 768 assert_different_registers(cache, tmp); 769 assert_not_delayed(); 770 if (index_size == sizeof(u2)) { 771 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 772 } else { 773 ShouldNotReachHere(); // other sizes not supported here 774 } 775 // convert from field index to ConstantPoolCacheEntry index 776 // and from word index to byte offset 777 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 778 // skip past the header 779 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); 780 // construct pointer to cache entry 781 add(LcpoolCache, tmp, cache); 782 } 783 784 785 // Load object from cpool->resolved_references(index) 786 void InterpreterMacroAssembler::load_resolved_reference_at_index( 787 Register result, Register index) { 788 assert_different_registers(result, index); 789 assert_not_delayed(); 790 // convert from field index to resolved_references() index and from 791 // word index to byte offset. Since this is a java object, it can be compressed 792 Register tmp = index; // reuse 793 sll(index, LogBytesPerHeapOop, tmp); 794 get_constant_pool(result); 795 // load pointer for resolved_references[] objArray 796 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); 797 // JNIHandles::resolve(result) 798 ld_ptr(result, 0, result); 799 // Add in the index 800 add(result, tmp, result); 801 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 802 } 803 804 805 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 806 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 807 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 808 Register Rsuper_klass, 809 Register Rtmp1, 810 Register Rtmp2, 811 Register Rtmp3, 812 Label &ok_is_subtype ) { 813 Label not_subtype; 814 815 // Profile the not-null value's klass. 816 profile_typecheck(Rsub_klass, Rtmp1); 817 818 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, 819 Rtmp1, Rtmp2, 820 &ok_is_subtype, ¬_subtype, NULL); 821 822 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, 823 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, 824 &ok_is_subtype, NULL); 825 826 bind(not_subtype); 827 profile_typecheck_failed(Rtmp1); 828 } 829 830 // Separate these two to allow for delay slot in middle 831 // These are used to do a test and full jump to exception-throwing code. 832 833 // %%%%% Could possibly reoptimize this by testing to see if could use 834 // a single conditional branch (i.e. if span is small enough. 835 // If you go that route, than get rid of the split and give up 836 // on the delay-slot hack. 837 838 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 839 Label& ok ) { 840 assert_not_delayed(); 841 br(ok_condition, true, pt, ok); 842 // DELAY SLOT 843 } 844 845 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, 846 Label& ok ) { 847 assert_not_delayed(); 848 bp( ok_condition, true, Assembler::xcc, pt, ok); 849 // DELAY SLOT 850 } 851 852 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, 853 Label& ok ) { 854 assert_not_delayed(); 855 brx(ok_condition, true, pt, ok); 856 // DELAY SLOT 857 } 858 859 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, 860 Register Rscratch, 861 Label& ok ) { 862 assert(throw_entry_point != NULL, "entry point must be generated by now"); 863 AddressLiteral dest(throw_entry_point); 864 jump_to(dest, Rscratch); 865 delayed()->nop(); 866 bind(ok); 867 } 868 869 870 // And if you cannot use the delay slot, here is a shorthand: 871 872 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, 873 address throw_entry_point, 874 Register Rscratch ) { 875 Label ok; 876 if (ok_condition != never) { 877 throw_if_not_1_icc( ok_condition, ok); 878 delayed()->nop(); 879 } 880 throw_if_not_2( throw_entry_point, Rscratch, ok); 881 } 882 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, 883 address throw_entry_point, 884 Register Rscratch ) { 885 Label ok; 886 if (ok_condition != never) { 887 throw_if_not_1_xcc( ok_condition, ok); 888 delayed()->nop(); 889 } 890 throw_if_not_2( throw_entry_point, Rscratch, ok); 891 } 892 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, 893 address throw_entry_point, 894 Register Rscratch ) { 895 Label ok; 896 if (ok_condition != never) { 897 throw_if_not_1_x( ok_condition, ok); 898 delayed()->nop(); 899 } 900 throw_if_not_2( throw_entry_point, Rscratch, ok); 901 } 902 903 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res 904 // Note: res is still shy of address by array offset into object. 905 906 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { 907 assert_not_delayed(); 908 909 verify_oop(array); 910 #ifdef _LP64 911 // sign extend since tos (index) can be a 32bit value 912 sra(index, G0, index); 913 #endif // _LP64 914 915 // check array 916 Label ptr_ok; 917 tst(array); 918 throw_if_not_1_x( notZero, ptr_ok ); 919 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index 920 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); 921 922 Label index_ok; 923 cmp(index, tmp); 924 throw_if_not_1_icc( lessUnsigned, index_ok ); 925 if (index_shift > 0) delayed()->sll(index, index_shift, index); 926 else delayed()->add(array, index, res); // addr - const offset in index 927 // convention: move aberrant index into G3_scratch for exception message 928 mov(index, G3_scratch); 929 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); 930 931 // add offset if didn't do it in delay slot 932 if (index_shift > 0) add(array, index, res); // addr - const offset in index 933 } 934 935 936 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 937 assert_not_delayed(); 938 939 // pop array 940 pop_ptr(array); 941 942 // check array 943 index_check_without_pop(array, index, index_shift, tmp, res); 944 } 945 946 947 void InterpreterMacroAssembler::get_const(Register Rdst) { 948 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); 949 } 950 951 952 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 953 get_const(Rdst); 954 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 955 } 956 957 958 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 959 get_constant_pool(Rdst); 960 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 961 } 962 963 964 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 965 get_constant_pool(Rcpool); 966 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); 967 } 968 969 970 // unlock if synchronized method 971 // 972 // Unlock the receiver if this is a synchronized method. 973 // Unlock any Java monitors from syncronized blocks. 974 // 975 // If there are locked Java monitors 976 // If throw_monitor_exception 977 // throws IllegalMonitorStateException 978 // Else if install_monitor_exception 979 // installs IllegalMonitorStateException 980 // Else 981 // no error processing 982 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 983 bool throw_monitor_exception, 984 bool install_monitor_exception) { 985 Label unlocked, unlock, no_unlock; 986 987 // get the value of _do_not_unlock_if_synchronized into G1_scratch 988 const Address do_not_unlock_if_synchronized(G2_thread, 989 JavaThread::do_not_unlock_if_synchronized_offset()); 990 ldbool(do_not_unlock_if_synchronized, G1_scratch); 991 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 992 993 // check if synchronized method 994 const Address access_flags(Lmethod, Method::access_flags_offset()); 995 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 996 push(state); // save tos 997 ld(access_flags, G3_scratch); // Load access flags. 998 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 999 br(zero, false, pt, unlocked); 1000 delayed()->nop(); 1001 1002 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1003 // is set. 1004 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); 1005 delayed()->nop(); 1006 1007 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1008 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1009 1010 //Intel: if (throw_monitor_exception) ... else ... 1011 // Entry already unlocked, need to throw exception 1012 //... 1013 1014 // pass top-most monitor elem 1015 add( top_most_monitor(), O1 ); 1016 1017 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); 1018 br_notnull_short(G3_scratch, pt, unlock); 1019 1020 if (throw_monitor_exception) { 1021 // Entry already unlocked need to throw an exception 1022 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1023 should_not_reach_here(); 1024 } else { 1025 // Monitor already unlocked during a stack unroll. 1026 // If requested, install an illegal_monitor_state_exception. 1027 // Continue with stack unrolling. 1028 if (install_monitor_exception) { 1029 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1030 } 1031 ba_short(unlocked); 1032 } 1033 1034 bind(unlock); 1035 1036 unlock_object(O1); 1037 1038 bind(unlocked); 1039 1040 // I0, I1: Might contain return value 1041 1042 // Check that all monitors are unlocked 1043 { Label loop, exception, entry, restart; 1044 1045 Register Rmptr = O0; 1046 Register Rtemp = O1; 1047 Register Rlimit = Lmonitors; 1048 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1049 assert( (delta & LongAlignmentMask) == 0, 1050 "sizeof BasicObjectLock must be even number of doublewords"); 1051 1052 #ifdef ASSERT 1053 add(top_most_monitor(), Rmptr, delta); 1054 { Label L; 1055 // ensure that Rmptr starts out above (or at) Rlimit 1056 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1057 stop("monitor stack has negative size"); 1058 bind(L); 1059 } 1060 #endif 1061 bind(restart); 1062 ba(entry); 1063 delayed()-> 1064 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry 1065 1066 // Entry is still locked, need to throw exception 1067 bind(exception); 1068 if (throw_monitor_exception) { 1069 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1070 should_not_reach_here(); 1071 } else { 1072 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 1073 // Unlock does not block, so don't have to worry about the frame 1074 unlock_object(Rmptr); 1075 if (install_monitor_exception) { 1076 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1077 } 1078 ba_short(restart); 1079 } 1080 1081 bind(loop); 1082 cmp(Rtemp, G0); // check if current entry is used 1083 brx(Assembler::notEqual, false, pn, exception); 1084 delayed()-> 1085 dec(Rmptr, delta); // otherwise advance to next entry 1086 #ifdef ASSERT 1087 { Label L; 1088 // ensure that Rmptr has not somehow stepped below Rlimit 1089 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1090 stop("ran off the end of the monitor stack"); 1091 bind(L); 1092 } 1093 #endif 1094 bind(entry); 1095 cmp(Rmptr, Rlimit); // check if bottom reached 1096 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry 1097 delayed()-> 1098 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); 1099 } 1100 1101 bind(no_unlock); 1102 pop(state); 1103 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1104 } 1105 1106 1107 // remove activation 1108 // 1109 // Unlock the receiver if this is a synchronized method. 1110 // Unlock any Java monitors from syncronized blocks. 1111 // Remove the activation from the stack. 1112 // 1113 // If there are locked Java monitors 1114 // If throw_monitor_exception 1115 // throws IllegalMonitorStateException 1116 // Else if install_monitor_exception 1117 // installs IllegalMonitorStateException 1118 // Else 1119 // no error processing 1120 void InterpreterMacroAssembler::remove_activation(TosState state, 1121 bool throw_monitor_exception, 1122 bool install_monitor_exception) { 1123 1124 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 1125 1126 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1127 notify_method_exit(false, state, NotifyJVMTI); 1128 1129 if (StackReservedPages > 0) { 1130 // testing if Stack Reserved Area needs to be re-enabled 1131 Label no_reserved_zone_enabling; 1132 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G3_scratch); 1133 cmp_and_brx_short(SP, G3_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 1134 1135 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 1136 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError), G2_thread); 1137 should_not_reach_here(); 1138 1139 bind(no_reserved_zone_enabling); 1140 } 1141 1142 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1143 verify_thread(); 1144 1145 // return tos 1146 assert(Otos_l1 == Otos_i, "adjust code below"); 1147 switch (state) { 1148 #ifdef _LP64 1149 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 1150 #else 1151 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 1152 #endif 1153 case btos: // fall through 1154 case ctos: 1155 case stos: // fall through 1156 case atos: // fall through 1157 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 1158 case ftos: // fall through 1159 case dtos: // fall through 1160 case vtos: /* nothing to do */ break; 1161 default : ShouldNotReachHere(); 1162 } 1163 1164 #if defined(COMPILER2) && !defined(_LP64) 1165 if (state == ltos) { 1166 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1167 // or compiled so just be safe use G1 and O0/O1 1168 1169 // Shift bits into high (msb) of G1 1170 sllx(Otos_l1->after_save(), 32, G1); 1171 // Zero extend low bits 1172 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); 1173 or3 (Otos_l2->after_save(), G1, G1); 1174 } 1175 #endif /* COMPILER2 */ 1176 1177 } 1178 1179 // Lock object 1180 // 1181 // Argument - lock_reg points to the BasicObjectLock to be used for locking, 1182 // it must be initialized with the object to lock 1183 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { 1184 if (UseHeavyMonitors) { 1185 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1186 } 1187 else { 1188 Register obj_reg = Object; 1189 Register mark_reg = G4_scratch; 1190 Register temp_reg = G1_scratch; 1191 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); 1192 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1193 Label done; 1194 1195 Label slow_case; 1196 1197 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); 1198 1199 // load markOop from object into mark_reg 1200 ld_ptr(mark_addr, mark_reg); 1201 1202 if (UseBiasedLocking) { 1203 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); 1204 } 1205 1206 // get the address of basicLock on stack that will be stored in the object 1207 // we need a temporary register here as we do not want to clobber lock_reg 1208 // (cas clobbers the destination register) 1209 mov(lock_reg, temp_reg); 1210 // set mark reg to be (markOop of object | UNLOCK_VALUE) 1211 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); 1212 // initialize the box (Must happen before we update the object mark!) 1213 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1214 // compare and exchange object_addr, markOop | 1, stack address of basicLock 1215 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1216 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 1217 1218 // if the compare and exchange succeeded we are done (we saw an unlocked object) 1219 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 1220 1221 // We did not see an unlocked object so try the fast recursive case 1222 1223 // Check if owner is self by comparing the value in the markOop of object 1224 // with the stack pointer 1225 sub(temp_reg, SP, temp_reg); 1226 #ifdef _LP64 1227 sub(temp_reg, STACK_BIAS, temp_reg); 1228 #endif 1229 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 1230 1231 // Composite "andcc" test: 1232 // (a) %sp -vs- markword proximity check, and, 1233 // (b) verify mark word LSBs == 0 (Stack-locked). 1234 // 1235 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) 1236 // Note that the page size used for %sp proximity testing is arbitrary and is 1237 // unrelated to the actual MMU page size. We use a 'logical' page size of 1238 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate 1239 // field of the andcc instruction. 1240 andcc (temp_reg, 0xFFFFF003, G0) ; 1241 1242 // if condition is true we are done and hence we can store 0 in the displaced 1243 // header indicating it is a recursive lock and be done 1244 brx(Assembler::zero, true, Assembler::pt, done); 1245 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1246 1247 // none of the above fast optimizations worked so we have to get into the 1248 // slow case of monitor enter 1249 bind(slow_case); 1250 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1251 1252 bind(done); 1253 } 1254 } 1255 1256 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 1257 // 1258 // Argument - lock_reg points to the BasicObjectLock for lock 1259 // Throw IllegalMonitorException if object is not locked by current thread 1260 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1261 if (UseHeavyMonitors) { 1262 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1263 } else { 1264 Register obj_reg = G3_scratch; 1265 Register mark_reg = G4_scratch; 1266 Register displaced_header_reg = G1_scratch; 1267 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); 1268 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1269 Label done; 1270 1271 if (UseBiasedLocking) { 1272 // load the object out of the BasicObjectLock 1273 ld_ptr(lockobj_addr, obj_reg); 1274 biased_locking_exit(mark_addr, mark_reg, done, true); 1275 st_ptr(G0, lockobj_addr); // free entry 1276 } 1277 1278 // Test first if we are in the fast recursive case 1279 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); 1280 ld_ptr(lock_addr, displaced_header_reg); 1281 br_null(displaced_header_reg, true, Assembler::pn, done); 1282 delayed()->st_ptr(G0, lockobj_addr); // free entry 1283 1284 // See if it is still a light weight lock, if so we just unlock 1285 // the object and we are done 1286 1287 if (!UseBiasedLocking) { 1288 // load the object out of the BasicObjectLock 1289 ld_ptr(lockobj_addr, obj_reg); 1290 } 1291 1292 // we have the displaced header in displaced_header_reg 1293 // we expect to see the stack address of the basicLock in case the 1294 // lock is still a light weight lock (lock_reg) 1295 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1296 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); 1297 cmp(lock_reg, displaced_header_reg); 1298 brx(Assembler::equal, true, Assembler::pn, done); 1299 delayed()->st_ptr(G0, lockobj_addr); // free entry 1300 1301 // The lock has been converted into a heavy lock and hence 1302 // we need to get into the slow case 1303 1304 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1305 1306 bind(done); 1307 } 1308 } 1309 1310 // Get the method data pointer from the Method* and set the 1311 // specified register to its value. 1312 1313 void InterpreterMacroAssembler::set_method_data_pointer() { 1314 assert(ProfileInterpreter, "must be profiling interpreter"); 1315 Label get_continue; 1316 1317 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1318 test_method_data_pointer(get_continue); 1319 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1320 bind(get_continue); 1321 } 1322 1323 // Set the method data pointer for the current bcp. 1324 1325 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1326 assert(ProfileInterpreter, "must be profiling interpreter"); 1327 Label zero_continue; 1328 1329 // Test MDO to avoid the call if it is NULL. 1330 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1331 test_method_data_pointer(zero_continue); 1332 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1333 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1334 add(ImethodDataPtr, O0, ImethodDataPtr); 1335 bind(zero_continue); 1336 } 1337 1338 // Test ImethodDataPtr. If it is null, continue at the specified label 1339 1340 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1341 assert(ProfileInterpreter, "must be profiling interpreter"); 1342 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); 1343 } 1344 1345 void InterpreterMacroAssembler::verify_method_data_pointer() { 1346 assert(ProfileInterpreter, "must be profiling interpreter"); 1347 #ifdef ASSERT 1348 Label verify_continue; 1349 test_method_data_pointer(verify_continue); 1350 1351 // If the mdp is valid, it will point to a DataLayout header which is 1352 // consistent with the bcp. The converse is highly probable also. 1353 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1354 ld_ptr(Lmethod, Method::const_offset(), O5); 1355 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); 1356 add(G3_scratch, O5, G3_scratch); 1357 cmp(Lbcp, G3_scratch); 1358 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1359 1360 Register temp_reg = O5; 1361 delayed()->mov(ImethodDataPtr, temp_reg); 1362 // %%% should use call_VM_leaf here? 1363 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); 1364 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); 1365 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 1366 stf(FloatRegisterImpl::D, Ftos_d, d_save); 1367 mov(temp_reg->after_save(), O2); 1368 save_thread(L7_thread_cache); 1369 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); 1370 delayed()->nop(); 1371 restore_thread(L7_thread_cache); 1372 ldf(FloatRegisterImpl::D, d_save, Ftos_d); 1373 restore(); 1374 bind(verify_continue); 1375 #endif // ASSERT 1376 } 1377 1378 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1379 Register method_counters, 1380 Register Rtmp, 1381 Label &profile_continue) { 1382 assert(ProfileInterpreter, "must be profiling interpreter"); 1383 // Control will flow to "profile_continue" if the counter is less than the 1384 // limit or if we call profile_method() 1385 1386 Label done; 1387 1388 // if no method data exists, and the counter is high enough, make one 1389 br_notnull_short(ImethodDataPtr, Assembler::pn, done); 1390 1391 // Test to see if we should create a method data oop 1392 Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset()); 1393 ld(profile_limit, Rtmp); 1394 cmp(invocation_count, Rtmp); 1395 // Use long branches because call_VM() code and following code generated by 1396 // test_backedge_count_for_osr() is large in debug VM. 1397 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); 1398 delayed()->nop(); 1399 1400 // Build it now. 1401 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1402 set_method_data_pointer_for_bcp(); 1403 ba(profile_continue); 1404 delayed()->nop(); 1405 bind(done); 1406 } 1407 1408 // Store a value at some constant offset from the method data pointer. 1409 1410 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1411 assert(ProfileInterpreter, "must be profiling interpreter"); 1412 st_ptr(value, ImethodDataPtr, constant); 1413 } 1414 1415 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, 1416 Register bumped_count, 1417 bool decrement) { 1418 assert(ProfileInterpreter, "must be profiling interpreter"); 1419 1420 // Load the counter. 1421 ld_ptr(counter, bumped_count); 1422 1423 if (decrement) { 1424 // Decrement the register. Set condition codes. 1425 subcc(bumped_count, DataLayout::counter_increment, bumped_count); 1426 1427 // If the decrement causes the counter to overflow, stay negative 1428 Label L; 1429 brx(Assembler::negative, true, Assembler::pn, L); 1430 1431 // Store the decremented counter, if it is still negative. 1432 delayed()->st_ptr(bumped_count, counter); 1433 bind(L); 1434 } else { 1435 // Increment the register. Set carry flag. 1436 addcc(bumped_count, DataLayout::counter_increment, bumped_count); 1437 1438 // If the increment causes the counter to overflow, pull back by 1. 1439 assert(DataLayout::counter_increment == 1, "subc works"); 1440 subc(bumped_count, G0, bumped_count); 1441 1442 // Store the incremented counter. 1443 st_ptr(bumped_count, counter); 1444 } 1445 } 1446 1447 // Increment the value at some constant offset from the method data pointer. 1448 1449 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1450 Register bumped_count, 1451 bool decrement) { 1452 // Locate the counter at a fixed offset from the mdp: 1453 Address counter(ImethodDataPtr, constant); 1454 increment_mdp_data_at(counter, bumped_count, decrement); 1455 } 1456 1457 // Increment the value at some non-fixed (reg + constant) offset from 1458 // the method data pointer. 1459 1460 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1461 int constant, 1462 Register bumped_count, 1463 Register scratch2, 1464 bool decrement) { 1465 // Add the constant to reg to get the offset. 1466 add(ImethodDataPtr, reg, scratch2); 1467 Address counter(scratch2, constant); 1468 increment_mdp_data_at(counter, bumped_count, decrement); 1469 } 1470 1471 // Set a flag value at the current method data pointer position. 1472 // Updates a single byte of the header, to avoid races with other header bits. 1473 1474 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1475 Register scratch) { 1476 assert(ProfileInterpreter, "must be profiling interpreter"); 1477 // Load the data header 1478 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); 1479 1480 // Set the flag 1481 or3(scratch, flag_constant, scratch); 1482 1483 // Store the modified header. 1484 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); 1485 } 1486 1487 // Test the location at some offset from the method data pointer. 1488 // If it is not equal to value, branch to the not_equal_continue Label. 1489 // Set condition codes to match the nullness of the loaded value. 1490 1491 void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1492 Register value, 1493 Label& not_equal_continue, 1494 Register scratch) { 1495 assert(ProfileInterpreter, "must be profiling interpreter"); 1496 ld_ptr(ImethodDataPtr, offset, scratch); 1497 cmp(value, scratch); 1498 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); 1499 delayed()->tst(scratch); 1500 } 1501 1502 // Update the method data pointer by the displacement located at some fixed 1503 // offset from the method data pointer. 1504 1505 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1506 Register scratch) { 1507 assert(ProfileInterpreter, "must be profiling interpreter"); 1508 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); 1509 add(ImethodDataPtr, scratch, ImethodDataPtr); 1510 } 1511 1512 // Update the method data pointer by the displacement located at the 1513 // offset (reg + offset_of_disp). 1514 1515 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1516 int offset_of_disp, 1517 Register scratch) { 1518 assert(ProfileInterpreter, "must be profiling interpreter"); 1519 add(reg, offset_of_disp, scratch); 1520 ld_ptr(ImethodDataPtr, scratch, scratch); 1521 add(ImethodDataPtr, scratch, ImethodDataPtr); 1522 } 1523 1524 // Update the method data pointer by a simple constant displacement. 1525 1526 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1527 assert(ProfileInterpreter, "must be profiling interpreter"); 1528 add(ImethodDataPtr, constant, ImethodDataPtr); 1529 } 1530 1531 // Update the method data pointer for a _ret bytecode whose target 1532 // was not among our cached targets. 1533 1534 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1535 Register return_bci) { 1536 assert(ProfileInterpreter, "must be profiling interpreter"); 1537 push(state); 1538 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile 1539 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1540 ld_ptr(l_tmp, return_bci); 1541 pop(state); 1542 } 1543 1544 // Count a taken branch in the bytecodes. 1545 1546 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1547 if (ProfileInterpreter) { 1548 Label profile_continue; 1549 1550 // If no method data exists, go to profile_continue. 1551 test_method_data_pointer(profile_continue); 1552 1553 // We are taking a branch. Increment the taken count. 1554 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); 1555 1556 // The method data pointer needs to be updated to reflect the new target. 1557 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1558 bind (profile_continue); 1559 } 1560 } 1561 1562 1563 // Count a not-taken branch in the bytecodes. 1564 1565 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { 1566 if (ProfileInterpreter) { 1567 Label profile_continue; 1568 1569 // If no method data exists, go to profile_continue. 1570 test_method_data_pointer(profile_continue); 1571 1572 // We are taking a branch. Increment the not taken count. 1573 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); 1574 1575 // The method data pointer needs to be updated to correspond to the 1576 // next bytecode. 1577 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1578 bind (profile_continue); 1579 } 1580 } 1581 1582 1583 // Count a non-virtual call in the bytecodes. 1584 1585 void InterpreterMacroAssembler::profile_call(Register scratch) { 1586 if (ProfileInterpreter) { 1587 Label profile_continue; 1588 1589 // If no method data exists, go to profile_continue. 1590 test_method_data_pointer(profile_continue); 1591 1592 // We are making a call. Increment the count. 1593 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1594 1595 // The method data pointer needs to be updated to reflect the new target. 1596 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1597 bind (profile_continue); 1598 } 1599 } 1600 1601 1602 // Count a final call in the bytecodes. 1603 1604 void InterpreterMacroAssembler::profile_final_call(Register scratch) { 1605 if (ProfileInterpreter) { 1606 Label profile_continue; 1607 1608 // If no method data exists, go to profile_continue. 1609 test_method_data_pointer(profile_continue); 1610 1611 // We are making a call. Increment the count. 1612 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1613 1614 // The method data pointer needs to be updated to reflect the new target. 1615 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1616 bind (profile_continue); 1617 } 1618 } 1619 1620 1621 // Count a virtual call in the bytecodes. 1622 1623 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1624 Register scratch, 1625 bool receiver_can_be_null) { 1626 if (ProfileInterpreter) { 1627 Label profile_continue; 1628 1629 // If no method data exists, go to profile_continue. 1630 test_method_data_pointer(profile_continue); 1631 1632 1633 Label skip_receiver_profile; 1634 if (receiver_can_be_null) { 1635 Label not_null; 1636 br_notnull_short(receiver, Assembler::pt, not_null); 1637 // We are making a call. Increment the count for null receiver. 1638 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1639 ba_short(skip_receiver_profile); 1640 bind(not_null); 1641 } 1642 1643 // Record the receiver type. 1644 record_klass_in_profile(receiver, scratch, true); 1645 bind(skip_receiver_profile); 1646 1647 // The method data pointer needs to be updated to reflect the new target. 1648 #if INCLUDE_JVMCI 1649 if (MethodProfileWidth == 0) { 1650 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1651 } 1652 #else 1653 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1654 #endif 1655 bind(profile_continue); 1656 } 1657 } 1658 1659 #if INCLUDE_JVMCI 1660 void InterpreterMacroAssembler::profile_called_method(Register method, Register scratch) { 1661 assert_different_registers(method, scratch); 1662 if (ProfileInterpreter && MethodProfileWidth > 0) { 1663 Label profile_continue; 1664 1665 // If no method data exists, go to profile_continue. 1666 test_method_data_pointer(profile_continue); 1667 1668 Label done; 1669 record_item_in_profile_helper(method, scratch, 0, done, MethodProfileWidth, 1670 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1671 bind(done); 1672 1673 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1674 bind(profile_continue); 1675 } 1676 } 1677 #endif // INCLUDE_JVMCI 1678 1679 void InterpreterMacroAssembler::record_klass_in_profile_helper(Register receiver, Register scratch, 1680 Label& done, bool is_virtual_call) { 1681 if (TypeProfileWidth == 0) { 1682 if (is_virtual_call) { 1683 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1684 } 1685 #if INCLUDE_JVMCI 1686 else if (EnableJVMCI) { 1687 increment_mdp_data_at(in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()), scratch); 1688 } 1689 #endif 1690 } else { 1691 int non_profiled_offset = -1; 1692 if (is_virtual_call) { 1693 non_profiled_offset = in_bytes(CounterData::count_offset()); 1694 } 1695 #if INCLUDE_JVMCI 1696 else if (EnableJVMCI) { 1697 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1698 } 1699 #endif 1700 1701 record_item_in_profile_helper(receiver, scratch, 0, done, TypeProfileWidth, 1702 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1703 } 1704 } 1705 1706 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, 1707 Register scratch, int start_row, Label& done, int total_rows, 1708 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1709 int non_profiled_offset) { 1710 int last_row = total_rows - 1; 1711 assert(start_row <= last_row, "must be work left to do"); 1712 // Test this row for both the item and for null. 1713 // Take any of three different outcomes: 1714 // 1. found item => increment count and goto done 1715 // 2. found null => keep looking for case 1, maybe allocate this cell 1716 // 3. found something else => keep looking for cases 1 and 2 1717 // Case 3 is handled by a recursive call. 1718 for (int row = start_row; row <= last_row; row++) { 1719 Label next_test; 1720 bool test_for_null_also = (row == start_row); 1721 1722 // See if the item is item[n]. 1723 int item_offset = in_bytes(item_offset_fn(row)); 1724 test_mdp_data_at(item_offset, item, next_test, scratch); 1725 // delayed()->tst(scratch); 1726 1727 // The receiver is item[n]. Increment count[n]. 1728 int count_offset = in_bytes(item_count_offset_fn(row)); 1729 increment_mdp_data_at(count_offset, scratch); 1730 ba_short(done); 1731 bind(next_test); 1732 1733 if (test_for_null_also) { 1734 Label found_null; 1735 // Failed the equality check on item[n]... Test for null. 1736 if (start_row == last_row) { 1737 // The only thing left to do is handle the null case. 1738 if (non_profiled_offset >= 0) { 1739 brx(Assembler::zero, false, Assembler::pn, found_null); 1740 delayed()->nop(); 1741 // Item did not match any saved item and there is no empty row for it. 1742 // Increment total counter to indicate polymorphic case. 1743 increment_mdp_data_at(non_profiled_offset, scratch); 1744 ba_short(done); 1745 bind(found_null); 1746 } else { 1747 brx(Assembler::notZero, false, Assembler::pt, done); 1748 delayed()->nop(); 1749 } 1750 break; 1751 } 1752 // Since null is rare, make it be the branch-taken case. 1753 brx(Assembler::zero, false, Assembler::pn, found_null); 1754 delayed()->nop(); 1755 1756 // Put all the "Case 3" tests here. 1757 record_item_in_profile_helper(item, scratch, start_row + 1, done, total_rows, 1758 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1759 1760 // Found a null. Keep searching for a matching item, 1761 // but remember that this is an empty (unused) slot. 1762 bind(found_null); 1763 } 1764 } 1765 1766 // In the fall-through case, we found no matching item, but we 1767 // observed the item[start_row] is NULL. 1768 1769 // Fill in the item field and increment the count. 1770 int item_offset = in_bytes(item_offset_fn(start_row)); 1771 set_mdp_data_at(item_offset, item); 1772 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1773 mov(DataLayout::counter_increment, scratch); 1774 set_mdp_data_at(count_offset, scratch); 1775 if (start_row > 0) { 1776 ba_short(done); 1777 } 1778 } 1779 1780 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1781 Register scratch, bool is_virtual_call) { 1782 assert(ProfileInterpreter, "must be profiling"); 1783 Label done; 1784 1785 record_klass_in_profile_helper(receiver, scratch, done, is_virtual_call); 1786 1787 bind (done); 1788 } 1789 1790 1791 // Count a ret in the bytecodes. 1792 1793 void InterpreterMacroAssembler::profile_ret(TosState state, 1794 Register return_bci, 1795 Register scratch) { 1796 if (ProfileInterpreter) { 1797 Label profile_continue; 1798 uint row; 1799 1800 // If no method data exists, go to profile_continue. 1801 test_method_data_pointer(profile_continue); 1802 1803 // Update the total ret count. 1804 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1805 1806 for (row = 0; row < RetData::row_limit(); row++) { 1807 Label next_test; 1808 1809 // See if return_bci is equal to bci[n]: 1810 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), 1811 return_bci, next_test, scratch); 1812 1813 // return_bci is equal to bci[n]. Increment the count. 1814 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); 1815 1816 // The method data pointer needs to be updated to reflect the new target. 1817 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); 1818 ba_short(profile_continue); 1819 bind(next_test); 1820 } 1821 1822 update_mdp_for_ret(state, return_bci); 1823 1824 bind (profile_continue); 1825 } 1826 } 1827 1828 // Profile an unexpected null in the bytecodes. 1829 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { 1830 if (ProfileInterpreter) { 1831 Label profile_continue; 1832 1833 // If no method data exists, go to profile_continue. 1834 test_method_data_pointer(profile_continue); 1835 1836 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); 1837 1838 // The method data pointer needs to be updated. 1839 int mdp_delta = in_bytes(BitData::bit_data_size()); 1840 if (TypeProfileCasts) { 1841 mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size()); 1842 } 1843 update_mdp_by_constant(mdp_delta); 1844 1845 bind (profile_continue); 1846 } 1847 } 1848 1849 void InterpreterMacroAssembler::profile_typecheck(Register klass, 1850 Register scratch) { 1851 if (ProfileInterpreter) { 1852 Label profile_continue; 1853 1854 // If no method data exists, go to profile_continue. 1855 test_method_data_pointer(profile_continue); 1856 1857 int mdp_delta = in_bytes(BitData::bit_data_size()); 1858 if (TypeProfileCasts) { 1859 mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size()); 1860 1861 // Record the object type. 1862 record_klass_in_profile(klass, scratch, false); 1863 } 1864 1865 // The method data pointer needs to be updated. 1866 update_mdp_by_constant(mdp_delta); 1867 1868 bind (profile_continue); 1869 } 1870 } 1871 1872 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { 1873 if (ProfileInterpreter && TypeProfileCasts) { 1874 Label profile_continue; 1875 1876 // If no method data exists, go to profile_continue. 1877 test_method_data_pointer(profile_continue); 1878 1879 int count_offset = in_bytes(CounterData::count_offset()); 1880 // Back up the address, since we have already bumped the mdp. 1881 count_offset -= in_bytes(ReceiverTypeData::receiver_type_data_size()); 1882 1883 // *Decrement* the counter. We expect to see zero or small negatives. 1884 increment_mdp_data_at(count_offset, scratch, true); 1885 1886 bind (profile_continue); 1887 } 1888 } 1889 1890 // Count the default case of a switch construct. 1891 1892 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { 1893 if (ProfileInterpreter) { 1894 Label profile_continue; 1895 1896 // If no method data exists, go to profile_continue. 1897 test_method_data_pointer(profile_continue); 1898 1899 // Update the default case count 1900 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1901 scratch); 1902 1903 // The method data pointer needs to be updated. 1904 update_mdp_by_offset( 1905 in_bytes(MultiBranchData::default_displacement_offset()), 1906 scratch); 1907 1908 bind (profile_continue); 1909 } 1910 } 1911 1912 // Count the index'th case of a switch construct. 1913 1914 void InterpreterMacroAssembler::profile_switch_case(Register index, 1915 Register scratch, 1916 Register scratch2, 1917 Register scratch3) { 1918 if (ProfileInterpreter) { 1919 Label profile_continue; 1920 1921 // If no method data exists, go to profile_continue. 1922 test_method_data_pointer(profile_continue); 1923 1924 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1925 set(in_bytes(MultiBranchData::per_case_size()), scratch); 1926 smul(index, scratch, scratch); 1927 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); 1928 1929 // Update the case count 1930 increment_mdp_data_at(scratch, 1931 in_bytes(MultiBranchData::relative_count_offset()), 1932 scratch2, 1933 scratch3); 1934 1935 // The method data pointer needs to be updated. 1936 update_mdp_by_offset(scratch, 1937 in_bytes(MultiBranchData::relative_displacement_offset()), 1938 scratch2); 1939 1940 bind (profile_continue); 1941 } 1942 } 1943 1944 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { 1945 Label not_null, do_nothing, do_update; 1946 1947 assert_different_registers(obj, mdo_addr.base(), tmp); 1948 1949 verify_oop(obj); 1950 1951 ld_ptr(mdo_addr, tmp); 1952 1953 br_notnull_short(obj, pt, not_null); 1954 or3(tmp, TypeEntries::null_seen, tmp); 1955 ba_short(do_update); 1956 1957 bind(not_null); 1958 load_klass(obj, obj); 1959 1960 xor3(obj, tmp, obj); 1961 btst(TypeEntries::type_klass_mask, obj); 1962 // klass seen before, nothing to do. The unknown bit may have been 1963 // set already but no need to check. 1964 brx(zero, false, pt, do_nothing); 1965 delayed()-> 1966 1967 btst(TypeEntries::type_unknown, obj); 1968 // already unknown. Nothing to do anymore. 1969 brx(notZero, false, pt, do_nothing); 1970 delayed()-> 1971 1972 btst(TypeEntries::type_mask, tmp); 1973 brx(zero, true, pt, do_update); 1974 // first time here. Set profile type. 1975 delayed()->or3(tmp, obj, tmp); 1976 1977 // different than before. Cannot keep accurate profile. 1978 or3(tmp, TypeEntries::type_unknown, tmp); 1979 1980 bind(do_update); 1981 // update profile 1982 st_ptr(tmp, mdo_addr); 1983 1984 bind(do_nothing); 1985 } 1986 1987 void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { 1988 if (!ProfileInterpreter) { 1989 return; 1990 } 1991 1992 assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr); 1993 1994 if (MethodData::profile_arguments() || MethodData::profile_return()) { 1995 Label profile_continue; 1996 1997 test_method_data_pointer(profile_continue); 1998 1999 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 2000 2001 ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1); 2002 cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue); 2003 2004 if (MethodData::profile_arguments()) { 2005 Label done; 2006 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 2007 add(ImethodDataPtr, off_to_args, ImethodDataPtr); 2008 2009 for (int i = 0; i < TypeProfileArgsLimit; i++) { 2010 if (i > 0 || MethodData::profile_return()) { 2011 // If return value type is profiled we may have no argument to profile 2012 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 2013 sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1); 2014 cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done); 2015 } 2016 ld_ptr(Address(callee, Method::const_offset()), tmp1); 2017 lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1); 2018 // stack offset o (zero based) from the start of the argument 2019 // list, for n arguments translates into offset n - o - 1 from 2020 // the end of the argument list. But there's an extra slot at 2021 // the stop of the stack. So the offset is n - o from Lesp. 2022 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2); 2023 sub(tmp1, tmp2, tmp1); 2024 2025 // Can't use MacroAssembler::argument_address() which needs Gargs to be set up 2026 sll(tmp1, Interpreter::logStackElementSize, tmp1); 2027 ld_ptr(Lesp, tmp1, tmp1); 2028 2029 Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 2030 profile_obj_type(tmp1, mdo_arg_addr, tmp2); 2031 2032 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 2033 add(ImethodDataPtr, to_add, ImethodDataPtr); 2034 off_to_args += to_add; 2035 } 2036 2037 if (MethodData::profile_return()) { 2038 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 2039 sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1); 2040 } 2041 2042 bind(done); 2043 2044 if (MethodData::profile_return()) { 2045 // We're right after the type profile for the last 2046 // argument. tmp1 is the number of cells left in the 2047 // CallTypeData/VirtualCallTypeData to reach its end. Non null 2048 // if there's a return to profile. 2049 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 2050 sll(tmp1, exact_log2(DataLayout::cell_size), tmp1); 2051 add(ImethodDataPtr, tmp1, ImethodDataPtr); 2052 } 2053 } else { 2054 assert(MethodData::profile_return(), "either profile call args or call ret"); 2055 update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); 2056 } 2057 2058 // mdp points right after the end of the 2059 // CallTypeData/VirtualCallTypeData, right after the cells for the 2060 // return value type if there's one. 2061 2062 bind(profile_continue); 2063 } 2064 } 2065 2066 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { 2067 assert_different_registers(ret, tmp1, tmp2); 2068 if (ProfileInterpreter && MethodData::profile_return()) { 2069 Label profile_continue, done; 2070 2071 test_method_data_pointer(profile_continue); 2072 2073 if (MethodData::profile_return_jsr292_only()) { 2074 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 2075 2076 // If we don't profile all invoke bytecodes we must make sure 2077 // it's a bytecode we indeed profile. We can't go back to the 2078 // begining of the ProfileData we intend to update to check its 2079 // type because we're right after it and we don't known its 2080 // length. 2081 Label do_profile; 2082 ldub(Lbcp, 0, tmp1); 2083 cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile); 2084 cmp(tmp1, Bytecodes::_invokehandle); 2085 br(equal, false, pn, do_profile); 2086 delayed()->lduh(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1); 2087 cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue); 2088 2089 bind(do_profile); 2090 } 2091 2092 Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size())); 2093 mov(ret, tmp1); 2094 profile_obj_type(tmp1, mdo_ret_addr, tmp2); 2095 2096 bind(profile_continue); 2097 } 2098 } 2099 2100 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 2101 if (ProfileInterpreter && MethodData::profile_parameters()) { 2102 Label profile_continue, done; 2103 2104 test_method_data_pointer(profile_continue); 2105 2106 // Load the offset of the area within the MDO used for 2107 // parameters. If it's negative we're not profiling any parameters. 2108 lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1); 2109 cmp_and_br_short(tmp1, 0, less, pn, profile_continue); 2110 2111 // Compute a pointer to the area for parameters from the offset 2112 // and move the pointer to the slot for the last 2113 // parameters. Collect profiling from last parameter down. 2114 // mdo start + parameters offset + array length - 1 2115 2116 // Pointer to the parameter area in the MDO 2117 Register mdp = tmp1; 2118 add(ImethodDataPtr, tmp1, mdp); 2119 2120 // offset of the current profile entry to update 2121 Register entry_offset = tmp2; 2122 // entry_offset = array len in number of cells 2123 ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset); 2124 2125 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 2126 assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); 2127 2128 // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field 2129 sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset); 2130 // entry_offset in bytes 2131 sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset); 2132 2133 Label loop; 2134 bind(loop); 2135 2136 // load offset on the stack from the slot for this parameter 2137 ld_ptr(mdp, entry_offset, tmp3); 2138 sll(tmp3,Interpreter::logStackElementSize, tmp3); 2139 neg(tmp3); 2140 // read the parameter from the local area 2141 ld_ptr(Llocals, tmp3, tmp3); 2142 2143 // make entry_offset now point to the type field for this parameter 2144 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 2145 assert(type_base > off_base, "unexpected"); 2146 add(entry_offset, type_base - off_base, entry_offset); 2147 2148 // profile the parameter 2149 Address arg_type(mdp, entry_offset); 2150 profile_obj_type(tmp3, arg_type, tmp4); 2151 2152 // go to next parameter 2153 sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset); 2154 cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop); 2155 2156 bind(profile_continue); 2157 } 2158 } 2159 2160 // add a InterpMonitorElem to stack (see frame_sparc.hpp) 2161 2162 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, 2163 Register Rtemp, 2164 Register Rtemp2 ) { 2165 2166 Register Rlimit = Lmonitors; 2167 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2168 assert( (delta & LongAlignmentMask) == 0, 2169 "sizeof BasicObjectLock must be even number of doublewords"); 2170 2171 sub( SP, delta, SP); 2172 sub( Lesp, delta, Lesp); 2173 sub( Lmonitors, delta, Lmonitors); 2174 2175 if (!stack_is_empty) { 2176 2177 // must copy stack contents down 2178 2179 Label start_copying, next; 2180 2181 // untested("monitor stack expansion"); 2182 compute_stack_base(Rtemp); 2183 ba(start_copying); 2184 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below 2185 2186 // note: must copy from low memory upwards 2187 // On entry to loop, 2188 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) 2189 // Loop mutates Rtemp 2190 2191 bind( next); 2192 2193 st_ptr(Rtemp2, Rtemp, 0); 2194 inc(Rtemp, wordSize); 2195 cmp(Rtemp, Rlimit); // are we done? (duplicated above) 2196 2197 bind( start_copying ); 2198 2199 brx( notEqual, true, pn, next ); 2200 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); 2201 2202 // done copying stack 2203 } 2204 } 2205 2206 // Locals 2207 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { 2208 assert_not_delayed(); 2209 sll(index, Interpreter::logStackElementSize, index); 2210 sub(Llocals, index, index); 2211 ld_ptr(index, 0, dst); 2212 // Note: index must hold the effective address--the iinc template uses it 2213 } 2214 2215 // Just like access_local_ptr but the tag is a returnAddress 2216 void InterpreterMacroAssembler::access_local_returnAddress(Register index, 2217 Register dst ) { 2218 assert_not_delayed(); 2219 sll(index, Interpreter::logStackElementSize, index); 2220 sub(Llocals, index, index); 2221 ld_ptr(index, 0, dst); 2222 } 2223 2224 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { 2225 assert_not_delayed(); 2226 sll(index, Interpreter::logStackElementSize, index); 2227 sub(Llocals, index, index); 2228 ld(index, 0, dst); 2229 // Note: index must hold the effective address--the iinc template uses it 2230 } 2231 2232 2233 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { 2234 assert_not_delayed(); 2235 sll(index, Interpreter::logStackElementSize, index); 2236 sub(Llocals, index, index); 2237 // First half stored at index n+1 (which grows down from Llocals[n]) 2238 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); 2239 } 2240 2241 2242 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { 2243 assert_not_delayed(); 2244 sll(index, Interpreter::logStackElementSize, index); 2245 sub(Llocals, index, index); 2246 ldf(FloatRegisterImpl::S, index, 0, dst); 2247 } 2248 2249 2250 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { 2251 assert_not_delayed(); 2252 sll(index, Interpreter::logStackElementSize, index); 2253 sub(Llocals, index, index); 2254 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); 2255 } 2256 2257 2258 #ifdef ASSERT 2259 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { 2260 Label L; 2261 2262 assert(Rindex != Rscratch, "Registers cannot be same"); 2263 assert(Rindex != Rscratch1, "Registers cannot be same"); 2264 assert(Rlimit != Rscratch, "Registers cannot be same"); 2265 assert(Rlimit != Rscratch1, "Registers cannot be same"); 2266 assert(Rscratch1 != Rscratch, "Registers cannot be same"); 2267 2268 // untested("reg area corruption"); 2269 add(Rindex, offset, Rscratch); 2270 add(Rlimit, 64 + STACK_BIAS, Rscratch1); 2271 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); 2272 stop("regsave area is being clobbered"); 2273 bind(L); 2274 } 2275 #endif // ASSERT 2276 2277 2278 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { 2279 assert_not_delayed(); 2280 sll(index, Interpreter::logStackElementSize, index); 2281 sub(Llocals, index, index); 2282 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) 2283 st(src, index, 0); 2284 } 2285 2286 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { 2287 assert_not_delayed(); 2288 sll(index, Interpreter::logStackElementSize, index); 2289 sub(Llocals, index, index); 2290 #ifdef ASSERT 2291 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2292 #endif 2293 st_ptr(src, index, 0); 2294 } 2295 2296 2297 2298 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { 2299 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); 2300 } 2301 2302 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { 2303 assert_not_delayed(); 2304 sll(index, Interpreter::logStackElementSize, index); 2305 sub(Llocals, index, index); 2306 #ifdef ASSERT 2307 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2308 #endif 2309 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 2310 } 2311 2312 2313 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { 2314 assert_not_delayed(); 2315 sll(index, Interpreter::logStackElementSize, index); 2316 sub(Llocals, index, index); 2317 #ifdef ASSERT 2318 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2319 #endif 2320 stf(FloatRegisterImpl::S, src, index, 0); 2321 } 2322 2323 2324 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { 2325 assert_not_delayed(); 2326 sll(index, Interpreter::logStackElementSize, index); 2327 sub(Llocals, index, index); 2328 #ifdef ASSERT 2329 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2330 #endif 2331 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); 2332 } 2333 2334 2335 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { 2336 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2337 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); 2338 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; 2339 } 2340 2341 2342 Address InterpreterMacroAssembler::top_most_monitor() { 2343 return Address(FP, top_most_monitor_byte_offset()); 2344 } 2345 2346 2347 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { 2348 add( Lesp, wordSize, Rdest ); 2349 } 2350 2351 void InterpreterMacroAssembler::get_method_counters(Register method, 2352 Register Rcounters, 2353 Label& skip) { 2354 Label has_counters; 2355 Address method_counters(method, in_bytes(Method::method_counters_offset())); 2356 ld_ptr(method_counters, Rcounters); 2357 br_notnull_short(Rcounters, Assembler::pt, has_counters); 2358 call_VM(noreg, CAST_FROM_FN_PTR(address, 2359 InterpreterRuntime::build_method_counters), method); 2360 ld_ptr(method_counters, Rcounters); 2361 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory 2362 delayed()->nop(); 2363 bind(has_counters); 2364 } 2365 2366 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2367 assert(UseCompiler || LogTouchedMethods, "incrementing must be useful"); 2368 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2369 2370 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2371 InvocationCounter::counter_offset()); 2372 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2373 InvocationCounter::counter_offset()); 2374 int delta = InvocationCounter::count_increment; 2375 2376 // Load each counter in a register 2377 ld( inv_counter, Rtmp ); 2378 ld( be_counter, Rtmp2 ); 2379 2380 assert( is_simm13( delta ), " delta too large."); 2381 2382 // Add the delta to the invocation counter and store the result 2383 add( Rtmp, delta, Rtmp ); 2384 2385 // Mask the backedge counter 2386 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2387 2388 // Store value 2389 st( Rtmp, inv_counter); 2390 2391 // Add invocation counter + backedge counter 2392 add( Rtmp, Rtmp2, Rtmp); 2393 2394 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! 2395 } 2396 2397 void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2398 assert(UseCompiler, "incrementing must be useful"); 2399 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2400 2401 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2402 InvocationCounter::counter_offset()); 2403 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2404 InvocationCounter::counter_offset()); 2405 2406 int delta = InvocationCounter::count_increment; 2407 // Load each counter in a register 2408 ld( be_counter, Rtmp ); 2409 ld( inv_counter, Rtmp2 ); 2410 2411 // Add the delta to the backedge counter 2412 add( Rtmp, delta, Rtmp ); 2413 2414 // Mask the invocation counter, add to backedge counter 2415 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2416 2417 // and store the result to memory 2418 st( Rtmp, be_counter ); 2419 2420 // Add backedge + invocation counter 2421 add( Rtmp, Rtmp2, Rtmp ); 2422 2423 // Note that this macro must leave backedge_count + invocation_count in Rtmp! 2424 } 2425 2426 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, 2427 Register method_counters, 2428 Register branch_bcp, 2429 Register Rtmp ) { 2430 Label did_not_overflow; 2431 Label overflow_with_error; 2432 assert_different_registers(backedge_count, Rtmp, branch_bcp); 2433 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 2434 2435 Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2436 ld(limit, Rtmp); 2437 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2438 2439 // When ProfileInterpreter is on, the backedge_count comes from the 2440 // MethodData*, which value does not get reset on the call to 2441 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2442 // routine while the method is being compiled, add a second test to make sure 2443 // the overflow function is called only once every overflow_frequency. 2444 if (ProfileInterpreter) { 2445 const int overflow_frequency = 1024; 2446 andcc(backedge_count, overflow_frequency-1, Rtmp); 2447 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); 2448 delayed()->nop(); 2449 } 2450 2451 // overflow in loop, pass branch bytecode 2452 set(6,Rtmp); 2453 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); 2454 2455 // Was an OSR adapter generated? 2456 // O0 = osr nmethod 2457 br_null_short(O0, Assembler::pn, overflow_with_error); 2458 2459 // Has the nmethod been invalidated already? 2460 ldub(O0, nmethod::state_offset(), O2); 2461 cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, overflow_with_error); 2462 2463 // migrate the interpreter frame off of the stack 2464 2465 mov(G2_thread, L7); 2466 // save nmethod 2467 mov(O0, L6); 2468 set_last_Java_frame(SP, noreg); 2469 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 2470 reset_last_Java_frame(); 2471 mov(L7, G2_thread); 2472 2473 // move OSR nmethod to I1 2474 mov(L6, I1); 2475 2476 // OSR buffer to I0 2477 mov(O0, I0); 2478 2479 // remove the interpreter frame 2480 restore(I5_savedSP, 0, SP); 2481 2482 // Jump to the osr code. 2483 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 2484 jmp(O2, G0); 2485 delayed()->nop(); 2486 2487 bind(overflow_with_error); 2488 2489 bind(did_not_overflow); 2490 } 2491 2492 2493 2494 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { 2495 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2496 } 2497 2498 2499 // local helper function for the verify_oop_or_return_address macro 2500 static bool verify_return_address(Method* m, int bci) { 2501 #ifndef PRODUCT 2502 address pc = (address)(m->constMethod()) 2503 + in_bytes(ConstMethod::codes_offset()) + bci; 2504 // assume it is a valid return address if it is inside m and is preceded by a jsr 2505 if (!m->contains(pc)) return false; 2506 address jsr_pc; 2507 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2508 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2509 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2510 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2511 #endif // PRODUCT 2512 return false; 2513 } 2514 2515 2516 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2517 if (!VerifyOops) return; 2518 // the VM documentation for the astore[_wide] bytecode allows 2519 // the TOS to be not only an oop but also a return address 2520 Label test; 2521 Label skip; 2522 // See if it is an address (in the current method): 2523 2524 mov(reg, Rtmp); 2525 const int log2_bytecode_size_limit = 16; 2526 srl(Rtmp, log2_bytecode_size_limit, Rtmp); 2527 br_notnull_short( Rtmp, pt, test ); 2528 2529 // %%% should use call_VM_leaf here? 2530 save_frame_and_mov(0, Lmethod, O0, reg, O1); 2531 save_thread(L7_thread_cache); 2532 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); 2533 delayed()->nop(); 2534 restore_thread(L7_thread_cache); 2535 br_notnull( O0, false, pt, skip ); 2536 delayed()->restore(); 2537 2538 // Perform a more elaborate out-of-line call 2539 // Not an address; verify it: 2540 bind(test); 2541 verify_oop(reg); 2542 bind(skip); 2543 } 2544 2545 2546 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2547 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 2548 } 2549 2550 2551 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2552 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2553 int increment, Address mask_addr, 2554 Register scratch1, Register scratch2, 2555 Condition cond, Label *where) { 2556 ld(counter_addr, scratch1); 2557 add(scratch1, increment, scratch1); 2558 ld(mask_addr, scratch2); 2559 andcc(scratch1, scratch2, G0); 2560 br(cond, false, Assembler::pn, *where); 2561 delayed()->st(scratch1, counter_addr); 2562 } 2563 2564 // Inline assembly for: 2565 // 2566 // if (thread is in interp_only_mode) { 2567 // InterpreterRuntime::post_method_entry(); 2568 // } 2569 // if (DTraceMethodProbes) { 2570 // SharedRuntime::dtrace_method_entry(method, receiver); 2571 // } 2572 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2573 // SharedRuntime::rc_trace_method_entry(method, receiver); 2574 // } 2575 2576 void InterpreterMacroAssembler::notify_method_entry() { 2577 2578 // Whenever JVMTI puts a thread in interp_only_mode, method 2579 // entry/exit events are sent for that thread to track stack 2580 // depth. If it is possible to enter interp_only_mode we add 2581 // the code to check if the event should be sent. 2582 if (JvmtiExport::can_post_interpreter_events()) { 2583 Label L; 2584 Register temp_reg = O5; 2585 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2586 ld(interp_only, temp_reg); 2587 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2588 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 2589 bind(L); 2590 } 2591 2592 { 2593 Register temp_reg = O5; 2594 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2595 call_VM_leaf(noreg, 2596 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2597 G2_thread, Lmethod); 2598 } 2599 2600 // RedefineClasses() tracing support for obsolete method entry 2601 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2602 call_VM_leaf(noreg, 2603 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2604 G2_thread, Lmethod); 2605 } 2606 } 2607 2608 2609 // Inline assembly for: 2610 // 2611 // if (thread is in interp_only_mode) { 2612 // // save result 2613 // InterpreterRuntime::post_method_exit(); 2614 // // restore result 2615 // } 2616 // if (DTraceMethodProbes) { 2617 // SharedRuntime::dtrace_method_exit(thread, method); 2618 // } 2619 // 2620 // Native methods have their result stored in d_tmp and l_tmp 2621 // Java methods have their result stored in the expression stack 2622 2623 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, 2624 TosState state, 2625 NotifyMethodExitMode mode) { 2626 2627 // Whenever JVMTI puts a thread in interp_only_mode, method 2628 // entry/exit events are sent for that thread to track stack 2629 // depth. If it is possible to enter interp_only_mode we add 2630 // the code to check if the event should be sent. 2631 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2632 Label L; 2633 Register temp_reg = O5; 2634 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2635 ld(interp_only, temp_reg); 2636 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2637 2638 // Note: frame::interpreter_frame_result has a dependency on how the 2639 // method result is saved across the call to post_method_exit. For 2640 // native methods it assumes the result registers are saved to 2641 // l_scratch and d_scratch. If this changes then the interpreter_frame_result 2642 // implementation will need to be updated too. 2643 2644 save_return_value(state, is_native_method); 2645 call_VM(noreg, 2646 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2647 restore_return_value(state, is_native_method); 2648 bind(L); 2649 } 2650 2651 { 2652 Register temp_reg = O5; 2653 // Dtrace notification 2654 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2655 save_return_value(state, is_native_method); 2656 call_VM_leaf( 2657 noreg, 2658 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2659 G2_thread, Lmethod); 2660 restore_return_value(state, is_native_method); 2661 } 2662 } 2663 2664 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { 2665 if (is_native_call) { 2666 stf(FloatRegisterImpl::D, F0, d_tmp); 2667 #ifdef _LP64 2668 stx(O0, l_tmp); 2669 #else 2670 std(O0, l_tmp); 2671 #endif 2672 } else { 2673 push(state); 2674 } 2675 } 2676 2677 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { 2678 if (is_native_call) { 2679 ldf(FloatRegisterImpl::D, d_tmp, F0); 2680 #ifdef _LP64 2681 ldx(l_tmp, O0); 2682 #else 2683 ldd(l_tmp, O0); 2684 #endif 2685 } else { 2686 pop(state); 2687 } 2688 }