1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_sparc.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "logging/log.hpp" 30 #include "oops/arrayOop.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/method.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "prims/jvmtiThreadState.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/thread.inline.hpp" 41 42 #ifndef FAST_DISPATCH 43 #define FAST_DISPATCH 1 44 #endif 45 #undef FAST_DISPATCH 46 47 // Implementation of InterpreterMacroAssembler 48 49 // This file specializes the assember with interpreter-specific macros 50 51 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); 52 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); 53 54 void InterpreterMacroAssembler::jump_to_entry(address entry) { 55 assert(entry, "Entry must have been generated by now"); 56 AddressLiteral al(entry); 57 jump_to(al, G3_scratch); 58 delayed()->nop(); 59 } 60 61 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { 62 // Note: this algorithm is also used by C1's OSR entry sequence. 63 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). 64 assert_different_registers(args_size, locals_size); 65 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. 66 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words 67 // Use br/mov combination because it works on both V8 and V9 and is 68 // faster. 69 Label skip_move; 70 br(Assembler::negative, true, Assembler::pt, skip_move); 71 delayed()->mov(G0, delta); 72 bind(skip_move); 73 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) 74 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes 75 } 76 77 // Dispatch code executed in the prolog of a bytecode which does not do it's 78 // own dispatch. The dispatch address is computed and placed in IdispatchAddress 79 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 80 assert_not_delayed(); 81 #ifdef FAST_DISPATCH 82 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 83 // they both use I2. 84 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 85 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode 86 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 87 // add offset to correct dispatch table 88 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 89 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr 90 #else 91 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 92 // dispatch table to use 93 AddressLiteral tbl(Interpreter::dispatch_table(state)); 94 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 95 set(tbl, G3_scratch); // compute addr of table 96 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr 97 #endif 98 } 99 100 101 // Dispatch code executed in the epilog of a bytecode which does not do it's 102 // own dispatch. The dispatch address in IdispatchAddress is used for the 103 // dispatch. 104 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 105 assert_not_delayed(); 106 verify_FPU(1, state); 107 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 108 jmp( IdispatchAddress, 0 ); 109 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 110 else delayed()->nop(); 111 } 112 113 114 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 115 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 116 assert_not_delayed(); 117 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 118 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); 119 } 120 121 122 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { 123 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 124 assert_not_delayed(); 125 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 126 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); 127 } 128 129 130 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 131 // load current bytecode 132 assert_not_delayed(); 133 ldub( Lbcp, 0, Lbyte_code); // load next bytecode 134 dispatch_base(state, table); 135 } 136 137 138 void InterpreterMacroAssembler::call_VM_leaf_base( 139 Register java_thread, 140 address entry_point, 141 int number_of_arguments 142 ) { 143 if (!java_thread->is_valid()) 144 java_thread = L7_thread_cache; 145 // super call 146 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); 147 } 148 149 150 void InterpreterMacroAssembler::call_VM_base( 151 Register oop_result, 152 Register java_thread, 153 Register last_java_sp, 154 address entry_point, 155 int number_of_arguments, 156 bool check_exception 157 ) { 158 if (!java_thread->is_valid()) 159 java_thread = L7_thread_cache; 160 // See class ThreadInVMfromInterpreter, which assumes that the interpreter 161 // takes responsibility for setting its own thread-state on call-out. 162 // However, ThreadInVMfromInterpreter resets the state to "in_Java". 163 164 //save_bcp(); // save bcp 165 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); 166 //restore_bcp(); // restore bcp 167 //restore_locals(); // restore locals pointer 168 } 169 170 171 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 172 if (JvmtiExport::can_pop_frame()) { 173 Label L; 174 175 // Check the "pending popframe condition" flag in the current thread 176 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); 177 178 // Initiate popframe handling only if it is not already being processed. If the flag 179 // has the popframe_processing bit set, it means that this code is called *during* popframe 180 // handling - we don't want to reenter. 181 btst(JavaThread::popframe_pending_bit, scratch_reg); 182 br(zero, false, pt, L); 183 delayed()->nop(); 184 btst(JavaThread::popframe_processing_bit, scratch_reg); 185 br(notZero, false, pt, L); 186 delayed()->nop(); 187 188 // Call Interpreter::remove_activation_preserving_args_entry() to get the 189 // address of the same-named entrypoint in the generated interpreter code. 190 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 191 192 // Jump to Interpreter::_remove_activation_preserving_args_entry 193 jmpl(O0, G0, G0); 194 delayed()->nop(); 195 bind(L); 196 } 197 } 198 199 200 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 201 Register thr_state = G4_scratch; 202 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 203 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); 204 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); 205 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); 206 switch (state) { 207 case ltos: ld_long(val_addr, Otos_l); break; 208 case atos: ld_ptr(oop_addr, Otos_l); 209 st_ptr(G0, oop_addr); break; 210 case btos: // fall through 211 case ztos: // fall through 212 case ctos: // fall through 213 case stos: // fall through 214 case itos: ld(val_addr, Otos_l1); break; 215 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; 216 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; 217 case vtos: /* nothing to do */ break; 218 default : ShouldNotReachHere(); 219 } 220 // Clean up tos value in the jvmti thread state 221 or3(G0, ilgl, G3_scratch); 222 stw(G3_scratch, tos_addr); 223 st_long(G0, val_addr); 224 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 225 } 226 227 228 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 229 if (JvmtiExport::can_force_early_return()) { 230 Label L; 231 Register thr_state = G3_scratch; 232 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 233 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; 234 235 // Initiate earlyret handling only if it is not already being processed. 236 // If the flag has the earlyret_processing bit set, it means that this code 237 // is called *during* earlyret handling - we don't want to reenter. 238 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); 239 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); 240 241 // Call Interpreter::remove_activation_early_entry() to get the address of the 242 // same-named entrypoint in the generated interpreter code 243 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); 244 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); 245 246 // Jump to Interpreter::_remove_activation_early_entry 247 jmpl(O0, G0, G0); 248 delayed()->nop(); 249 bind(L); 250 } 251 } 252 253 254 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 255 mov(arg_1, O0); 256 mov(arg_2, O1); 257 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); 258 } 259 260 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { 261 assert_not_delayed(); 262 dispatch_Lbyte_code(state, table); 263 } 264 265 266 void InterpreterMacroAssembler::dispatch_normal(TosState state) { 267 dispatch_base(state, Interpreter::normal_table(state)); 268 } 269 270 271 void InterpreterMacroAssembler::dispatch_only(TosState state) { 272 dispatch_base(state, Interpreter::dispatch_table(state)); 273 } 274 275 276 // common code to dispatch and dispatch_only 277 // dispatch value in Lbyte_code and increment Lbcp 278 279 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { 280 verify_FPU(1, state); 281 // %%%%% maybe implement +VerifyActivationFrameSize here 282 //verify_thread(); //too slow; we will just verify on method entry & exit 283 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 284 #ifdef FAST_DISPATCH 285 if (table == Interpreter::dispatch_table(state)) { 286 // use IdispatchTables 287 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 288 // add offset to correct dispatch table 289 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 290 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr 291 } else { 292 #endif 293 // dispatch table to use 294 AddressLiteral tbl(table); 295 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 296 set(tbl, G3_scratch); // compute addr of table 297 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr 298 #ifdef FAST_DISPATCH 299 } 300 #endif 301 jmp( G3_scratch, 0 ); 302 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 303 else delayed()->nop(); 304 } 305 306 307 // Helpers for expression stack 308 309 // Longs and doubles are Category 2 computational types in the 310 // JVM specification (section 3.11.1) and take 2 expression stack or 311 // local slots. 312 // Aligning them on 32 bit with tagged stacks is hard because the code generated 313 // for the dup* bytecodes depends on what types are already on the stack. 314 // If the types are split into the two stack/local slots, that is much easier 315 // (and we can use 0 for non-reference tags). 316 317 // Known good alignment in _LP64 but unknown otherwise 318 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { 319 assert_not_delayed(); 320 321 #ifdef _LP64 322 ldf(FloatRegisterImpl::D, r1, offset, d); 323 #else 324 ldf(FloatRegisterImpl::S, r1, offset, d); 325 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); 326 #endif 327 } 328 329 // Known good alignment in _LP64 but unknown otherwise 330 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { 331 assert_not_delayed(); 332 333 #ifdef _LP64 334 stf(FloatRegisterImpl::D, d, r1, offset); 335 // store something more useful here 336 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 337 #else 338 stf(FloatRegisterImpl::S, d, r1, offset); 339 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); 340 #endif 341 } 342 343 344 // Known good alignment in _LP64 but unknown otherwise 345 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { 346 assert_not_delayed(); 347 #ifdef _LP64 348 ldx(r1, offset, rd); 349 #else 350 ld(r1, offset, rd); 351 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); 352 #endif 353 } 354 355 // Known good alignment in _LP64 but unknown otherwise 356 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { 357 assert_not_delayed(); 358 359 #ifdef _LP64 360 stx(l, r1, offset); 361 // store something more useful here 362 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 363 #else 364 st(l, r1, offset); 365 st(l->successor(), r1, offset + Interpreter::stackElementSize); 366 #endif 367 } 368 369 void InterpreterMacroAssembler::pop_i(Register r) { 370 assert_not_delayed(); 371 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); 372 inc(Lesp, Interpreter::stackElementSize); 373 debug_only(verify_esp(Lesp)); 374 } 375 376 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { 377 assert_not_delayed(); 378 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); 379 inc(Lesp, Interpreter::stackElementSize); 380 debug_only(verify_esp(Lesp)); 381 } 382 383 void InterpreterMacroAssembler::pop_l(Register r) { 384 assert_not_delayed(); 385 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); 386 inc(Lesp, 2*Interpreter::stackElementSize); 387 debug_only(verify_esp(Lesp)); 388 } 389 390 391 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { 392 assert_not_delayed(); 393 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); 394 inc(Lesp, Interpreter::stackElementSize); 395 debug_only(verify_esp(Lesp)); 396 } 397 398 399 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { 400 assert_not_delayed(); 401 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); 402 inc(Lesp, 2*Interpreter::stackElementSize); 403 debug_only(verify_esp(Lesp)); 404 } 405 406 407 void InterpreterMacroAssembler::push_i(Register r) { 408 assert_not_delayed(); 409 debug_only(verify_esp(Lesp)); 410 st(r, Lesp, 0); 411 dec(Lesp, Interpreter::stackElementSize); 412 } 413 414 void InterpreterMacroAssembler::push_ptr(Register r) { 415 assert_not_delayed(); 416 st_ptr(r, Lesp, 0); 417 dec(Lesp, Interpreter::stackElementSize); 418 } 419 420 // remember: our convention for longs in SPARC is: 421 // O0 (Otos_l1) has high-order part in first word, 422 // O1 (Otos_l2) has low-order part in second word 423 424 void InterpreterMacroAssembler::push_l(Register r) { 425 assert_not_delayed(); 426 debug_only(verify_esp(Lesp)); 427 // Longs are stored in memory-correct order, even if unaligned. 428 int offset = -Interpreter::stackElementSize; 429 store_unaligned_long(r, Lesp, offset); 430 dec(Lesp, 2 * Interpreter::stackElementSize); 431 } 432 433 434 void InterpreterMacroAssembler::push_f(FloatRegister f) { 435 assert_not_delayed(); 436 debug_only(verify_esp(Lesp)); 437 stf(FloatRegisterImpl::S, f, Lesp, 0); 438 dec(Lesp, Interpreter::stackElementSize); 439 } 440 441 442 void InterpreterMacroAssembler::push_d(FloatRegister d) { 443 assert_not_delayed(); 444 debug_only(verify_esp(Lesp)); 445 // Longs are stored in memory-correct order, even if unaligned. 446 int offset = -Interpreter::stackElementSize; 447 store_unaligned_double(d, Lesp, offset); 448 dec(Lesp, 2 * Interpreter::stackElementSize); 449 } 450 451 452 void InterpreterMacroAssembler::push(TosState state) { 453 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 454 switch (state) { 455 case atos: push_ptr(); break; 456 case btos: // fall through 457 case ztos: // fall through 458 case ctos: // fall through 459 case stos: // fall through 460 case itos: push_i(); break; 461 case ltos: push_l(); break; 462 case ftos: push_f(); break; 463 case dtos: push_d(); break; 464 case vtos: /* nothing to do */ break; 465 default : ShouldNotReachHere(); 466 } 467 } 468 469 470 void InterpreterMacroAssembler::pop(TosState state) { 471 switch (state) { 472 case atos: pop_ptr(); break; 473 case btos: // fall through 474 case ztos: // fall through 475 case ctos: // fall through 476 case stos: // fall through 477 case itos: pop_i(); break; 478 case ltos: pop_l(); break; 479 case ftos: pop_f(); break; 480 case dtos: pop_d(); break; 481 case vtos: /* nothing to do */ break; 482 default : ShouldNotReachHere(); 483 } 484 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 485 } 486 487 488 // Helpers for swap and dup 489 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 490 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); 491 } 492 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 493 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); 494 } 495 496 497 void InterpreterMacroAssembler::load_receiver(Register param_count, 498 Register recv) { 499 sll(param_count, Interpreter::logStackElementSize, param_count); 500 ld_ptr(Lesp, param_count, recv); // gets receiver oop 501 } 502 503 void InterpreterMacroAssembler::empty_expression_stack() { 504 // Reset Lesp. 505 sub( Lmonitors, wordSize, Lesp ); 506 507 // Reset SP by subtracting more space from Lesp. 508 Label done; 509 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 510 511 // A native does not need to do this, since its callee does not change SP. 512 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. 513 btst(JVM_ACC_NATIVE, Gframe_size); 514 br(Assembler::notZero, false, Assembler::pt, done); 515 delayed()->nop(); 516 517 // Compute max expression stack+register save area 518 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); 519 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. 520 add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size ); 521 522 // 523 // now set up a stack frame with the size computed above 524 // 525 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below 526 sll( Gframe_size, LogBytesPerWord, Gframe_size ); 527 sub( Lesp, Gframe_size, Gframe_size ); 528 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary 529 debug_only(verify_sp(Gframe_size, G4_scratch)); 530 #ifdef _LP64 531 sub(Gframe_size, STACK_BIAS, Gframe_size ); 532 #endif 533 mov(Gframe_size, SP); 534 535 bind(done); 536 } 537 538 539 #ifdef ASSERT 540 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { 541 Label Bad, OK; 542 543 // Saved SP must be aligned. 544 #ifdef _LP64 545 btst(2*BytesPerWord-1, Rsp); 546 #else 547 btst(LongAlignmentMask, Rsp); 548 #endif 549 br(Assembler::notZero, false, Assembler::pn, Bad); 550 delayed()->nop(); 551 552 // Saved SP, plus register window size, must not be above FP. 553 add(Rsp, frame::register_save_words * wordSize, Rtemp); 554 #ifdef _LP64 555 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP 556 #endif 557 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); 558 559 // Saved SP must not be ridiculously below current SP. 560 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); 561 set(maxstack, Rtemp); 562 sub(SP, Rtemp, Rtemp); 563 #ifdef _LP64 564 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp 565 #endif 566 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); 567 568 ba_short(OK); 569 570 bind(Bad); 571 stop("on return to interpreted call, restored SP is corrupted"); 572 573 bind(OK); 574 } 575 576 577 void InterpreterMacroAssembler::verify_esp(Register Resp) { 578 // about to read or write Resp[0] 579 // make sure it is not in the monitors or the register save area 580 Label OK1, OK2; 581 582 cmp(Resp, Lmonitors); 583 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); 584 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); 585 stop("too many pops: Lesp points into monitor area"); 586 bind(OK1); 587 #ifdef _LP64 588 sub(Resp, STACK_BIAS, Resp); 589 #endif 590 cmp(Resp, SP); 591 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); 592 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); 593 stop("too many pushes: Lesp points into register window"); 594 bind(OK2); 595 } 596 #endif // ASSERT 597 598 // Load compiled (i2c) or interpreter entry when calling from interpreted and 599 // do the call. Centralized so that all interpreter calls will do the same actions. 600 // If jvmti single stepping is on for a thread we must not call compiled code. 601 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 602 603 // Assume we want to go compiled if available 604 605 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); 606 607 if (JvmtiExport::can_post_interpreter_events()) { 608 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 609 // compiled code in threads for which the event is enabled. Check here for 610 // interp_only_mode if these events CAN be enabled. 611 verify_thread(); 612 Label skip_compiled_code; 613 614 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 615 ld(interp_only, scratch); 616 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 617 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 618 bind(skip_compiled_code); 619 } 620 621 // the i2c_adapters need Method* in G5_method (right? %%%) 622 // do the call 623 #ifdef ASSERT 624 { 625 Label ok; 626 br_notnull_short(target, Assembler::pt, ok); 627 stop("null entry point"); 628 bind(ok); 629 } 630 #endif // ASSERT 631 632 // Adjust Rret first so Llast_SP can be same as Rret 633 add(Rret, -frame::pc_return_offset, O7); 634 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer 635 // Record SP so we can remove any stack space allocated by adapter transition 636 jmp(target, 0); 637 delayed()->mov(SP, Llast_SP); 638 } 639 640 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { 641 assert_not_delayed(); 642 643 Label not_taken; 644 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); 645 else br (cc, false, Assembler::pn, not_taken); 646 delayed()->nop(); 647 648 TemplateTable::branch(false,false); 649 650 bind(not_taken); 651 652 profile_not_taken_branch(G3_scratch); 653 } 654 655 656 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( 657 int bcp_offset, 658 Register Rtmp, 659 Register Rdst, 660 signedOrNot is_signed, 661 setCCOrNot should_set_CC ) { 662 assert(Rtmp != Rdst, "need separate temp register"); 663 assert_not_delayed(); 664 switch (is_signed) { 665 default: ShouldNotReachHere(); 666 667 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte 668 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte 669 } 670 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte 671 sll( Rdst, BitsPerByte, Rdst); 672 switch (should_set_CC ) { 673 default: ShouldNotReachHere(); 674 675 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; 676 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; 677 } 678 } 679 680 681 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( 682 int bcp_offset, 683 Register Rtmp, 684 Register Rdst, 685 setCCOrNot should_set_CC ) { 686 assert(Rtmp != Rdst, "need separate temp register"); 687 assert_not_delayed(); 688 add( Lbcp, bcp_offset, Rtmp); 689 andcc( Rtmp, 3, G0); 690 Label aligned; 691 switch (should_set_CC ) { 692 default: ShouldNotReachHere(); 693 694 case set_CC: break; 695 case dont_set_CC: break; 696 } 697 698 br(Assembler::zero, true, Assembler::pn, aligned); 699 #ifdef _LP64 700 delayed()->ldsw(Rtmp, 0, Rdst); 701 #else 702 delayed()->ld(Rtmp, 0, Rdst); 703 #endif 704 705 ldub(Lbcp, bcp_offset + 3, Rdst); 706 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); 707 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 708 #ifdef _LP64 709 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 710 #else 711 // Unsigned load is faster than signed on some implementations 712 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 713 #endif 714 or3(Rtmp, Rdst, Rdst ); 715 716 bind(aligned); 717 if (should_set_CC == set_CC) tst(Rdst); 718 } 719 720 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, 721 int bcp_offset, size_t index_size) { 722 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 723 if (index_size == sizeof(u2)) { 724 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); 725 } else if (index_size == sizeof(u4)) { 726 get_4_byte_integer_at_bcp(bcp_offset, temp, index); 727 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 728 xor3(index, -1, index); // convert to plain index 729 } else if (index_size == sizeof(u1)) { 730 ldub(Lbcp, bcp_offset, index); 731 } else { 732 ShouldNotReachHere(); 733 } 734 } 735 736 737 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 738 int bcp_offset, size_t index_size) { 739 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 740 assert_different_registers(cache, tmp); 741 assert_not_delayed(); 742 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 743 // convert from field index to ConstantPoolCacheEntry index and from 744 // word index to byte offset 745 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 746 add(LcpoolCache, tmp, cache); 747 } 748 749 750 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 751 Register temp, 752 Register bytecode, 753 int byte_no, 754 int bcp_offset, 755 size_t index_size) { 756 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 757 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 758 const int shift_count = (1 + byte_no) * BitsPerByte; 759 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 760 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 761 "correct shift count"); 762 srl(bytecode, shift_count, bytecode); 763 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 764 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); 765 } 766 767 768 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 769 int bcp_offset, size_t index_size) { 770 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 771 assert_different_registers(cache, tmp); 772 assert_not_delayed(); 773 if (index_size == sizeof(u2)) { 774 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 775 } else { 776 ShouldNotReachHere(); // other sizes not supported here 777 } 778 // convert from field index to ConstantPoolCacheEntry index 779 // and from word index to byte offset 780 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 781 // skip past the header 782 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); 783 // construct pointer to cache entry 784 add(LcpoolCache, tmp, cache); 785 } 786 787 788 // Load object from cpool->resolved_references(index) 789 void InterpreterMacroAssembler::load_resolved_reference_at_index( 790 Register result, Register index) { 791 assert_different_registers(result, index); 792 assert_not_delayed(); 793 // convert from field index to resolved_references() index and from 794 // word index to byte offset. Since this is a java object, it can be compressed 795 Register tmp = index; // reuse 796 sll(index, LogBytesPerHeapOop, tmp); 797 get_constant_pool(result); 798 // load pointer for resolved_references[] objArray 799 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); 800 // JNIHandles::resolve(result) 801 ld_ptr(result, 0, result); 802 // Add in the index 803 add(result, tmp, result); 804 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 805 } 806 807 808 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 809 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 810 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 811 Register Rsuper_klass, 812 Register Rtmp1, 813 Register Rtmp2, 814 Register Rtmp3, 815 Label &ok_is_subtype ) { 816 Label not_subtype; 817 818 // Profile the not-null value's klass. 819 profile_typecheck(Rsub_klass, Rtmp1); 820 821 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, 822 Rtmp1, Rtmp2, 823 &ok_is_subtype, ¬_subtype, NULL); 824 825 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, 826 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, 827 &ok_is_subtype, NULL); 828 829 bind(not_subtype); 830 profile_typecheck_failed(Rtmp1); 831 } 832 833 // Separate these two to allow for delay slot in middle 834 // These are used to do a test and full jump to exception-throwing code. 835 836 // %%%%% Could possibly reoptimize this by testing to see if could use 837 // a single conditional branch (i.e. if span is small enough. 838 // If you go that route, than get rid of the split and give up 839 // on the delay-slot hack. 840 841 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 842 Label& ok ) { 843 assert_not_delayed(); 844 br(ok_condition, true, pt, ok); 845 // DELAY SLOT 846 } 847 848 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, 849 Label& ok ) { 850 assert_not_delayed(); 851 bp( ok_condition, true, Assembler::xcc, pt, ok); 852 // DELAY SLOT 853 } 854 855 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, 856 Label& ok ) { 857 assert_not_delayed(); 858 brx(ok_condition, true, pt, ok); 859 // DELAY SLOT 860 } 861 862 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, 863 Register Rscratch, 864 Label& ok ) { 865 assert(throw_entry_point != NULL, "entry point must be generated by now"); 866 AddressLiteral dest(throw_entry_point); 867 jump_to(dest, Rscratch); 868 delayed()->nop(); 869 bind(ok); 870 } 871 872 873 // And if you cannot use the delay slot, here is a shorthand: 874 875 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, 876 address throw_entry_point, 877 Register Rscratch ) { 878 Label ok; 879 if (ok_condition != never) { 880 throw_if_not_1_icc( ok_condition, ok); 881 delayed()->nop(); 882 } 883 throw_if_not_2( throw_entry_point, Rscratch, ok); 884 } 885 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, 886 address throw_entry_point, 887 Register Rscratch ) { 888 Label ok; 889 if (ok_condition != never) { 890 throw_if_not_1_xcc( ok_condition, ok); 891 delayed()->nop(); 892 } 893 throw_if_not_2( throw_entry_point, Rscratch, ok); 894 } 895 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, 896 address throw_entry_point, 897 Register Rscratch ) { 898 Label ok; 899 if (ok_condition != never) { 900 throw_if_not_1_x( ok_condition, ok); 901 delayed()->nop(); 902 } 903 throw_if_not_2( throw_entry_point, Rscratch, ok); 904 } 905 906 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res 907 // Note: res is still shy of address by array offset into object. 908 909 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { 910 assert_not_delayed(); 911 912 verify_oop(array); 913 #ifdef _LP64 914 // sign extend since tos (index) can be a 32bit value 915 sra(index, G0, index); 916 #endif // _LP64 917 918 // check array 919 Label ptr_ok; 920 tst(array); 921 throw_if_not_1_x( notZero, ptr_ok ); 922 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index 923 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); 924 925 Label index_ok; 926 cmp(index, tmp); 927 throw_if_not_1_icc( lessUnsigned, index_ok ); 928 if (index_shift > 0) delayed()->sll(index, index_shift, index); 929 else delayed()->add(array, index, res); // addr - const offset in index 930 // convention: move aberrant index into G3_scratch for exception message 931 mov(index, G3_scratch); 932 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); 933 934 // add offset if didn't do it in delay slot 935 if (index_shift > 0) add(array, index, res); // addr - const offset in index 936 } 937 938 939 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 940 assert_not_delayed(); 941 942 // pop array 943 pop_ptr(array); 944 945 // check array 946 index_check_without_pop(array, index, index_shift, tmp, res); 947 } 948 949 950 void InterpreterMacroAssembler::get_const(Register Rdst) { 951 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); 952 } 953 954 955 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 956 get_const(Rdst); 957 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 958 } 959 960 961 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 962 get_constant_pool(Rdst); 963 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 964 } 965 966 967 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 968 get_constant_pool(Rcpool); 969 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); 970 } 971 972 973 // unlock if synchronized method 974 // 975 // Unlock the receiver if this is a synchronized method. 976 // Unlock any Java monitors from syncronized blocks. 977 // 978 // If there are locked Java monitors 979 // If throw_monitor_exception 980 // throws IllegalMonitorStateException 981 // Else if install_monitor_exception 982 // installs IllegalMonitorStateException 983 // Else 984 // no error processing 985 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 986 bool throw_monitor_exception, 987 bool install_monitor_exception) { 988 Label unlocked, unlock, no_unlock; 989 990 // get the value of _do_not_unlock_if_synchronized into G1_scratch 991 const Address do_not_unlock_if_synchronized(G2_thread, 992 JavaThread::do_not_unlock_if_synchronized_offset()); 993 ldbool(do_not_unlock_if_synchronized, G1_scratch); 994 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 995 996 // check if synchronized method 997 const Address access_flags(Lmethod, Method::access_flags_offset()); 998 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 999 push(state); // save tos 1000 ld(access_flags, G3_scratch); // Load access flags. 1001 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 1002 br(zero, false, pt, unlocked); 1003 delayed()->nop(); 1004 1005 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1006 // is set. 1007 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); 1008 delayed()->nop(); 1009 1010 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1011 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1012 1013 //Intel: if (throw_monitor_exception) ... else ... 1014 // Entry already unlocked, need to throw exception 1015 //... 1016 1017 // pass top-most monitor elem 1018 add( top_most_monitor(), O1 ); 1019 1020 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); 1021 br_notnull_short(G3_scratch, pt, unlock); 1022 1023 if (throw_monitor_exception) { 1024 // Entry already unlocked need to throw an exception 1025 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1026 should_not_reach_here(); 1027 } else { 1028 // Monitor already unlocked during a stack unroll. 1029 // If requested, install an illegal_monitor_state_exception. 1030 // Continue with stack unrolling. 1031 if (install_monitor_exception) { 1032 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1033 } 1034 ba_short(unlocked); 1035 } 1036 1037 bind(unlock); 1038 1039 unlock_object(O1); 1040 1041 bind(unlocked); 1042 1043 // I0, I1: Might contain return value 1044 1045 // Check that all monitors are unlocked 1046 { Label loop, exception, entry, restart; 1047 1048 Register Rmptr = O0; 1049 Register Rtemp = O1; 1050 Register Rlimit = Lmonitors; 1051 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1052 assert( (delta & LongAlignmentMask) == 0, 1053 "sizeof BasicObjectLock must be even number of doublewords"); 1054 1055 #ifdef ASSERT 1056 add(top_most_monitor(), Rmptr, delta); 1057 { Label L; 1058 // ensure that Rmptr starts out above (or at) Rlimit 1059 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1060 stop("monitor stack has negative size"); 1061 bind(L); 1062 } 1063 #endif 1064 bind(restart); 1065 ba(entry); 1066 delayed()-> 1067 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry 1068 1069 // Entry is still locked, need to throw exception 1070 bind(exception); 1071 if (throw_monitor_exception) { 1072 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1073 should_not_reach_here(); 1074 } else { 1075 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 1076 // Unlock does not block, so don't have to worry about the frame 1077 unlock_object(Rmptr); 1078 if (install_monitor_exception) { 1079 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1080 } 1081 ba_short(restart); 1082 } 1083 1084 bind(loop); 1085 cmp(Rtemp, G0); // check if current entry is used 1086 brx(Assembler::notEqual, false, pn, exception); 1087 delayed()-> 1088 dec(Rmptr, delta); // otherwise advance to next entry 1089 #ifdef ASSERT 1090 { Label L; 1091 // ensure that Rmptr has not somehow stepped below Rlimit 1092 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1093 stop("ran off the end of the monitor stack"); 1094 bind(L); 1095 } 1096 #endif 1097 bind(entry); 1098 cmp(Rmptr, Rlimit); // check if bottom reached 1099 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry 1100 delayed()-> 1101 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); 1102 } 1103 1104 bind(no_unlock); 1105 pop(state); 1106 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1107 } 1108 1109 void InterpreterMacroAssembler::narrow(Register result) { 1110 1111 ld_ptr(Address(Lmethod, Method::const_offset()), G3_scratch); 1112 ldub(G3_scratch, in_bytes(ConstMethod::result_type_offset()), G3_scratch); 1113 1114 Label notBool, notByte, notChar, done; 1115 1116 // common case first 1117 cmp(G3_scratch, T_INT); 1118 br(Assembler::equal, true, pn, done); 1119 delayed()->nop(); 1120 1121 cmp(G3_scratch, T_BOOLEAN); 1122 br(Assembler::notEqual, true, pn, notBool); 1123 delayed()->cmp(G3_scratch, T_BYTE); 1124 and3(result, 1, result); 1125 ba(done); 1126 delayed()->nop(); 1127 1128 bind(notBool); 1129 // cmp(G3_scratch, T_BYTE); 1130 br(Assembler::notEqual, true, pn, notByte); 1131 delayed()->cmp(G3_scratch, T_CHAR); 1132 sll(result, 24, result); 1133 sra(result, 24, result); 1134 ba(done); 1135 delayed()->nop(); 1136 1137 bind(notByte); 1138 // cmp(G3_scratch, T_CHAR); 1139 sll(result, 16, result); 1140 br(Assembler::notEqual, true, pn, done); 1141 delayed()->sra(result, 16, result); 1142 // sll(result, 16, result); 1143 srl(result, 16, result); 1144 1145 // bind(notChar); 1146 // must be short, instructions already executed in delay slot 1147 // sll(result, 16, result); 1148 // sra(result, 16, result); 1149 1150 bind(done); 1151 } 1152 1153 // remove activation 1154 // 1155 // Unlock the receiver if this is a synchronized method. 1156 // Unlock any Java monitors from syncronized blocks. 1157 // Remove the activation from the stack. 1158 // 1159 // If there are locked Java monitors 1160 // If throw_monitor_exception 1161 // throws IllegalMonitorStateException 1162 // Else if install_monitor_exception 1163 // installs IllegalMonitorStateException 1164 // Else 1165 // no error processing 1166 void InterpreterMacroAssembler::remove_activation(TosState state, 1167 bool throw_monitor_exception, 1168 bool install_monitor_exception) { 1169 1170 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 1171 1172 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1173 notify_method_exit(false, state, NotifyJVMTI); 1174 1175 if (StackReservedPages > 0) { 1176 // testing if Stack Reserved Area needs to be re-enabled 1177 Label no_reserved_zone_enabling; 1178 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G3_scratch); 1179 cmp_and_brx_short(SP, G3_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 1180 1181 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 1182 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError), G2_thread); 1183 should_not_reach_here(); 1184 1185 bind(no_reserved_zone_enabling); 1186 } 1187 1188 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1189 verify_thread(); 1190 1191 // return tos 1192 assert(Otos_l1 == Otos_i, "adjust code below"); 1193 switch (state) { 1194 #ifdef _LP64 1195 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 1196 #else 1197 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 1198 #endif 1199 case btos: // fall through 1200 case ztos: // fall through 1201 case ctos: 1202 case stos: // fall through 1203 case atos: // fall through 1204 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 1205 case ftos: // fall through 1206 case dtos: // fall through 1207 case vtos: /* nothing to do */ break; 1208 default : ShouldNotReachHere(); 1209 } 1210 1211 #if defined(COMPILER2) && !defined(_LP64) 1212 if (state == ltos) { 1213 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1214 // or compiled so just be safe use G1 and O0/O1 1215 1216 // Shift bits into high (msb) of G1 1217 sllx(Otos_l1->after_save(), 32, G1); 1218 // Zero extend low bits 1219 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); 1220 or3 (Otos_l2->after_save(), G1, G1); 1221 } 1222 #endif /* COMPILER2 */ 1223 1224 } 1225 1226 // Lock object 1227 // 1228 // Argument - lock_reg points to the BasicObjectLock to be used for locking, 1229 // it must be initialized with the object to lock 1230 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { 1231 if (UseHeavyMonitors) { 1232 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1233 } 1234 else { 1235 Register obj_reg = Object; 1236 Register mark_reg = G4_scratch; 1237 Register temp_reg = G1_scratch; 1238 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); 1239 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1240 Label done; 1241 1242 Label slow_case; 1243 1244 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); 1245 1246 // load markOop from object into mark_reg 1247 ld_ptr(mark_addr, mark_reg); 1248 1249 if (UseBiasedLocking) { 1250 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); 1251 } 1252 1253 // get the address of basicLock on stack that will be stored in the object 1254 // we need a temporary register here as we do not want to clobber lock_reg 1255 // (cas clobbers the destination register) 1256 mov(lock_reg, temp_reg); 1257 // set mark reg to be (markOop of object | UNLOCK_VALUE) 1258 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); 1259 // initialize the box (Must happen before we update the object mark!) 1260 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1261 // compare and exchange object_addr, markOop | 1, stack address of basicLock 1262 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1263 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 1264 1265 // if the compare and exchange succeeded we are done (we saw an unlocked object) 1266 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 1267 1268 // We did not see an unlocked object so try the fast recursive case 1269 1270 // Check if owner is self by comparing the value in the markOop of object 1271 // with the stack pointer 1272 sub(temp_reg, SP, temp_reg); 1273 #ifdef _LP64 1274 sub(temp_reg, STACK_BIAS, temp_reg); 1275 #endif 1276 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 1277 1278 // Composite "andcc" test: 1279 // (a) %sp -vs- markword proximity check, and, 1280 // (b) verify mark word LSBs == 0 (Stack-locked). 1281 // 1282 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) 1283 // Note that the page size used for %sp proximity testing is arbitrary and is 1284 // unrelated to the actual MMU page size. We use a 'logical' page size of 1285 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate 1286 // field of the andcc instruction. 1287 andcc (temp_reg, 0xFFFFF003, G0) ; 1288 1289 // if condition is true we are done and hence we can store 0 in the displaced 1290 // header indicating it is a recursive lock and be done 1291 brx(Assembler::zero, true, Assembler::pt, done); 1292 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1293 1294 // none of the above fast optimizations worked so we have to get into the 1295 // slow case of monitor enter 1296 bind(slow_case); 1297 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1298 1299 bind(done); 1300 } 1301 } 1302 1303 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 1304 // 1305 // Argument - lock_reg points to the BasicObjectLock for lock 1306 // Throw IllegalMonitorException if object is not locked by current thread 1307 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1308 if (UseHeavyMonitors) { 1309 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1310 } else { 1311 Register obj_reg = G3_scratch; 1312 Register mark_reg = G4_scratch; 1313 Register displaced_header_reg = G1_scratch; 1314 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); 1315 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1316 Label done; 1317 1318 if (UseBiasedLocking) { 1319 // load the object out of the BasicObjectLock 1320 ld_ptr(lockobj_addr, obj_reg); 1321 biased_locking_exit(mark_addr, mark_reg, done, true); 1322 st_ptr(G0, lockobj_addr); // free entry 1323 } 1324 1325 // Test first if we are in the fast recursive case 1326 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); 1327 ld_ptr(lock_addr, displaced_header_reg); 1328 br_null(displaced_header_reg, true, Assembler::pn, done); 1329 delayed()->st_ptr(G0, lockobj_addr); // free entry 1330 1331 // See if it is still a light weight lock, if so we just unlock 1332 // the object and we are done 1333 1334 if (!UseBiasedLocking) { 1335 // load the object out of the BasicObjectLock 1336 ld_ptr(lockobj_addr, obj_reg); 1337 } 1338 1339 // we have the displaced header in displaced_header_reg 1340 // we expect to see the stack address of the basicLock in case the 1341 // lock is still a light weight lock (lock_reg) 1342 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1343 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); 1344 cmp(lock_reg, displaced_header_reg); 1345 brx(Assembler::equal, true, Assembler::pn, done); 1346 delayed()->st_ptr(G0, lockobj_addr); // free entry 1347 1348 // The lock has been converted into a heavy lock and hence 1349 // we need to get into the slow case 1350 1351 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1352 1353 bind(done); 1354 } 1355 } 1356 1357 // Get the method data pointer from the Method* and set the 1358 // specified register to its value. 1359 1360 void InterpreterMacroAssembler::set_method_data_pointer() { 1361 assert(ProfileInterpreter, "must be profiling interpreter"); 1362 Label get_continue; 1363 1364 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1365 test_method_data_pointer(get_continue); 1366 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1367 bind(get_continue); 1368 } 1369 1370 // Set the method data pointer for the current bcp. 1371 1372 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1373 assert(ProfileInterpreter, "must be profiling interpreter"); 1374 Label zero_continue; 1375 1376 // Test MDO to avoid the call if it is NULL. 1377 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1378 test_method_data_pointer(zero_continue); 1379 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1380 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1381 add(ImethodDataPtr, O0, ImethodDataPtr); 1382 bind(zero_continue); 1383 } 1384 1385 // Test ImethodDataPtr. If it is null, continue at the specified label 1386 1387 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1388 assert(ProfileInterpreter, "must be profiling interpreter"); 1389 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); 1390 } 1391 1392 void InterpreterMacroAssembler::verify_method_data_pointer() { 1393 assert(ProfileInterpreter, "must be profiling interpreter"); 1394 #ifdef ASSERT 1395 Label verify_continue; 1396 test_method_data_pointer(verify_continue); 1397 1398 // If the mdp is valid, it will point to a DataLayout header which is 1399 // consistent with the bcp. The converse is highly probable also. 1400 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1401 ld_ptr(Lmethod, Method::const_offset(), O5); 1402 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); 1403 add(G3_scratch, O5, G3_scratch); 1404 cmp(Lbcp, G3_scratch); 1405 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1406 1407 Register temp_reg = O5; 1408 delayed()->mov(ImethodDataPtr, temp_reg); 1409 // %%% should use call_VM_leaf here? 1410 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); 1411 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); 1412 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 1413 stf(FloatRegisterImpl::D, Ftos_d, d_save); 1414 mov(temp_reg->after_save(), O2); 1415 save_thread(L7_thread_cache); 1416 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); 1417 delayed()->nop(); 1418 restore_thread(L7_thread_cache); 1419 ldf(FloatRegisterImpl::D, d_save, Ftos_d); 1420 restore(); 1421 bind(verify_continue); 1422 #endif // ASSERT 1423 } 1424 1425 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1426 Register method_counters, 1427 Register Rtmp, 1428 Label &profile_continue) { 1429 assert(ProfileInterpreter, "must be profiling interpreter"); 1430 // Control will flow to "profile_continue" if the counter is less than the 1431 // limit or if we call profile_method() 1432 1433 Label done; 1434 1435 // if no method data exists, and the counter is high enough, make one 1436 br_notnull_short(ImethodDataPtr, Assembler::pn, done); 1437 1438 // Test to see if we should create a method data oop 1439 Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset()); 1440 ld(profile_limit, Rtmp); 1441 cmp(invocation_count, Rtmp); 1442 // Use long branches because call_VM() code and following code generated by 1443 // test_backedge_count_for_osr() is large in debug VM. 1444 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); 1445 delayed()->nop(); 1446 1447 // Build it now. 1448 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1449 set_method_data_pointer_for_bcp(); 1450 ba(profile_continue); 1451 delayed()->nop(); 1452 bind(done); 1453 } 1454 1455 // Store a value at some constant offset from the method data pointer. 1456 1457 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1458 assert(ProfileInterpreter, "must be profiling interpreter"); 1459 st_ptr(value, ImethodDataPtr, constant); 1460 } 1461 1462 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, 1463 Register bumped_count, 1464 bool decrement) { 1465 assert(ProfileInterpreter, "must be profiling interpreter"); 1466 1467 // Load the counter. 1468 ld_ptr(counter, bumped_count); 1469 1470 if (decrement) { 1471 // Decrement the register. Set condition codes. 1472 subcc(bumped_count, DataLayout::counter_increment, bumped_count); 1473 1474 // If the decrement causes the counter to overflow, stay negative 1475 Label L; 1476 brx(Assembler::negative, true, Assembler::pn, L); 1477 1478 // Store the decremented counter, if it is still negative. 1479 delayed()->st_ptr(bumped_count, counter); 1480 bind(L); 1481 } else { 1482 // Increment the register. Set carry flag. 1483 addcc(bumped_count, DataLayout::counter_increment, bumped_count); 1484 1485 // If the increment causes the counter to overflow, pull back by 1. 1486 assert(DataLayout::counter_increment == 1, "subc works"); 1487 subc(bumped_count, G0, bumped_count); 1488 1489 // Store the incremented counter. 1490 st_ptr(bumped_count, counter); 1491 } 1492 } 1493 1494 // Increment the value at some constant offset from the method data pointer. 1495 1496 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1497 Register bumped_count, 1498 bool decrement) { 1499 // Locate the counter at a fixed offset from the mdp: 1500 Address counter(ImethodDataPtr, constant); 1501 increment_mdp_data_at(counter, bumped_count, decrement); 1502 } 1503 1504 // Increment the value at some non-fixed (reg + constant) offset from 1505 // the method data pointer. 1506 1507 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1508 int constant, 1509 Register bumped_count, 1510 Register scratch2, 1511 bool decrement) { 1512 // Add the constant to reg to get the offset. 1513 add(ImethodDataPtr, reg, scratch2); 1514 Address counter(scratch2, constant); 1515 increment_mdp_data_at(counter, bumped_count, decrement); 1516 } 1517 1518 // Set a flag value at the current method data pointer position. 1519 // Updates a single byte of the header, to avoid races with other header bits. 1520 1521 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1522 Register scratch) { 1523 assert(ProfileInterpreter, "must be profiling interpreter"); 1524 // Load the data header 1525 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); 1526 1527 // Set the flag 1528 or3(scratch, flag_constant, scratch); 1529 1530 // Store the modified header. 1531 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); 1532 } 1533 1534 // Test the location at some offset from the method data pointer. 1535 // If it is not equal to value, branch to the not_equal_continue Label. 1536 // Set condition codes to match the nullness of the loaded value. 1537 1538 void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1539 Register value, 1540 Label& not_equal_continue, 1541 Register scratch) { 1542 assert(ProfileInterpreter, "must be profiling interpreter"); 1543 ld_ptr(ImethodDataPtr, offset, scratch); 1544 cmp(value, scratch); 1545 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); 1546 delayed()->tst(scratch); 1547 } 1548 1549 // Update the method data pointer by the displacement located at some fixed 1550 // offset from the method data pointer. 1551 1552 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1553 Register scratch) { 1554 assert(ProfileInterpreter, "must be profiling interpreter"); 1555 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); 1556 add(ImethodDataPtr, scratch, ImethodDataPtr); 1557 } 1558 1559 // Update the method data pointer by the displacement located at the 1560 // offset (reg + offset_of_disp). 1561 1562 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1563 int offset_of_disp, 1564 Register scratch) { 1565 assert(ProfileInterpreter, "must be profiling interpreter"); 1566 add(reg, offset_of_disp, scratch); 1567 ld_ptr(ImethodDataPtr, scratch, scratch); 1568 add(ImethodDataPtr, scratch, ImethodDataPtr); 1569 } 1570 1571 // Update the method data pointer by a simple constant displacement. 1572 1573 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1574 assert(ProfileInterpreter, "must be profiling interpreter"); 1575 add(ImethodDataPtr, constant, ImethodDataPtr); 1576 } 1577 1578 // Update the method data pointer for a _ret bytecode whose target 1579 // was not among our cached targets. 1580 1581 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1582 Register return_bci) { 1583 assert(ProfileInterpreter, "must be profiling interpreter"); 1584 push(state); 1585 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile 1586 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1587 ld_ptr(l_tmp, return_bci); 1588 pop(state); 1589 } 1590 1591 // Count a taken branch in the bytecodes. 1592 1593 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1594 if (ProfileInterpreter) { 1595 Label profile_continue; 1596 1597 // If no method data exists, go to profile_continue. 1598 test_method_data_pointer(profile_continue); 1599 1600 // We are taking a branch. Increment the taken count. 1601 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); 1602 1603 // The method data pointer needs to be updated to reflect the new target. 1604 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1605 bind (profile_continue); 1606 } 1607 } 1608 1609 1610 // Count a not-taken branch in the bytecodes. 1611 1612 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { 1613 if (ProfileInterpreter) { 1614 Label profile_continue; 1615 1616 // If no method data exists, go to profile_continue. 1617 test_method_data_pointer(profile_continue); 1618 1619 // We are taking a branch. Increment the not taken count. 1620 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); 1621 1622 // The method data pointer needs to be updated to correspond to the 1623 // next bytecode. 1624 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1625 bind (profile_continue); 1626 } 1627 } 1628 1629 1630 // Count a non-virtual call in the bytecodes. 1631 1632 void InterpreterMacroAssembler::profile_call(Register scratch) { 1633 if (ProfileInterpreter) { 1634 Label profile_continue; 1635 1636 // If no method data exists, go to profile_continue. 1637 test_method_data_pointer(profile_continue); 1638 1639 // We are making a call. Increment the count. 1640 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1641 1642 // The method data pointer needs to be updated to reflect the new target. 1643 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1644 bind (profile_continue); 1645 } 1646 } 1647 1648 1649 // Count a final call in the bytecodes. 1650 1651 void InterpreterMacroAssembler::profile_final_call(Register scratch) { 1652 if (ProfileInterpreter) { 1653 Label profile_continue; 1654 1655 // If no method data exists, go to profile_continue. 1656 test_method_data_pointer(profile_continue); 1657 1658 // We are making a call. Increment the count. 1659 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1660 1661 // The method data pointer needs to be updated to reflect the new target. 1662 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1663 bind (profile_continue); 1664 } 1665 } 1666 1667 1668 // Count a virtual call in the bytecodes. 1669 1670 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1671 Register scratch, 1672 bool receiver_can_be_null) { 1673 if (ProfileInterpreter) { 1674 Label profile_continue; 1675 1676 // If no method data exists, go to profile_continue. 1677 test_method_data_pointer(profile_continue); 1678 1679 1680 Label skip_receiver_profile; 1681 if (receiver_can_be_null) { 1682 Label not_null; 1683 br_notnull_short(receiver, Assembler::pt, not_null); 1684 // We are making a call. Increment the count for null receiver. 1685 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1686 ba_short(skip_receiver_profile); 1687 bind(not_null); 1688 } 1689 1690 // Record the receiver type. 1691 record_klass_in_profile(receiver, scratch, true); 1692 bind(skip_receiver_profile); 1693 1694 // The method data pointer needs to be updated to reflect the new target. 1695 #if INCLUDE_JVMCI 1696 if (MethodProfileWidth == 0) { 1697 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1698 } 1699 #else 1700 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1701 #endif 1702 bind(profile_continue); 1703 } 1704 } 1705 1706 #if INCLUDE_JVMCI 1707 void InterpreterMacroAssembler::profile_called_method(Register method, Register scratch) { 1708 assert_different_registers(method, scratch); 1709 if (ProfileInterpreter && MethodProfileWidth > 0) { 1710 Label profile_continue; 1711 1712 // If no method data exists, go to profile_continue. 1713 test_method_data_pointer(profile_continue); 1714 1715 Label done; 1716 record_item_in_profile_helper(method, scratch, 0, done, MethodProfileWidth, 1717 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1718 bind(done); 1719 1720 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1721 bind(profile_continue); 1722 } 1723 } 1724 #endif // INCLUDE_JVMCI 1725 1726 void InterpreterMacroAssembler::record_klass_in_profile_helper(Register receiver, Register scratch, 1727 Label& done, bool is_virtual_call) { 1728 if (TypeProfileWidth == 0) { 1729 if (is_virtual_call) { 1730 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1731 } 1732 #if INCLUDE_JVMCI 1733 else if (EnableJVMCI) { 1734 increment_mdp_data_at(in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()), scratch); 1735 } 1736 #endif 1737 } else { 1738 int non_profiled_offset = -1; 1739 if (is_virtual_call) { 1740 non_profiled_offset = in_bytes(CounterData::count_offset()); 1741 } 1742 #if INCLUDE_JVMCI 1743 else if (EnableJVMCI) { 1744 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1745 } 1746 #endif 1747 1748 record_item_in_profile_helper(receiver, scratch, 0, done, TypeProfileWidth, 1749 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1750 } 1751 } 1752 1753 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, 1754 Register scratch, int start_row, Label& done, int total_rows, 1755 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1756 int non_profiled_offset) { 1757 int last_row = total_rows - 1; 1758 assert(start_row <= last_row, "must be work left to do"); 1759 // Test this row for both the item and for null. 1760 // Take any of three different outcomes: 1761 // 1. found item => increment count and goto done 1762 // 2. found null => keep looking for case 1, maybe allocate this cell 1763 // 3. found something else => keep looking for cases 1 and 2 1764 // Case 3 is handled by a recursive call. 1765 for (int row = start_row; row <= last_row; row++) { 1766 Label next_test; 1767 bool test_for_null_also = (row == start_row); 1768 1769 // See if the item is item[n]. 1770 int item_offset = in_bytes(item_offset_fn(row)); 1771 test_mdp_data_at(item_offset, item, next_test, scratch); 1772 // delayed()->tst(scratch); 1773 1774 // The receiver is item[n]. Increment count[n]. 1775 int count_offset = in_bytes(item_count_offset_fn(row)); 1776 increment_mdp_data_at(count_offset, scratch); 1777 ba_short(done); 1778 bind(next_test); 1779 1780 if (test_for_null_also) { 1781 Label found_null; 1782 // Failed the equality check on item[n]... Test for null. 1783 if (start_row == last_row) { 1784 // The only thing left to do is handle the null case. 1785 if (non_profiled_offset >= 0) { 1786 brx(Assembler::zero, false, Assembler::pn, found_null); 1787 delayed()->nop(); 1788 // Item did not match any saved item and there is no empty row for it. 1789 // Increment total counter to indicate polymorphic case. 1790 increment_mdp_data_at(non_profiled_offset, scratch); 1791 ba_short(done); 1792 bind(found_null); 1793 } else { 1794 brx(Assembler::notZero, false, Assembler::pt, done); 1795 delayed()->nop(); 1796 } 1797 break; 1798 } 1799 // Since null is rare, make it be the branch-taken case. 1800 brx(Assembler::zero, false, Assembler::pn, found_null); 1801 delayed()->nop(); 1802 1803 // Put all the "Case 3" tests here. 1804 record_item_in_profile_helper(item, scratch, start_row + 1, done, total_rows, 1805 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1806 1807 // Found a null. Keep searching for a matching item, 1808 // but remember that this is an empty (unused) slot. 1809 bind(found_null); 1810 } 1811 } 1812 1813 // In the fall-through case, we found no matching item, but we 1814 // observed the item[start_row] is NULL. 1815 1816 // Fill in the item field and increment the count. 1817 int item_offset = in_bytes(item_offset_fn(start_row)); 1818 set_mdp_data_at(item_offset, item); 1819 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1820 mov(DataLayout::counter_increment, scratch); 1821 set_mdp_data_at(count_offset, scratch); 1822 if (start_row > 0) { 1823 ba_short(done); 1824 } 1825 } 1826 1827 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1828 Register scratch, bool is_virtual_call) { 1829 assert(ProfileInterpreter, "must be profiling"); 1830 Label done; 1831 1832 record_klass_in_profile_helper(receiver, scratch, done, is_virtual_call); 1833 1834 bind (done); 1835 } 1836 1837 1838 // Count a ret in the bytecodes. 1839 1840 void InterpreterMacroAssembler::profile_ret(TosState state, 1841 Register return_bci, 1842 Register scratch) { 1843 if (ProfileInterpreter) { 1844 Label profile_continue; 1845 uint row; 1846 1847 // If no method data exists, go to profile_continue. 1848 test_method_data_pointer(profile_continue); 1849 1850 // Update the total ret count. 1851 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1852 1853 for (row = 0; row < RetData::row_limit(); row++) { 1854 Label next_test; 1855 1856 // See if return_bci is equal to bci[n]: 1857 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), 1858 return_bci, next_test, scratch); 1859 1860 // return_bci is equal to bci[n]. Increment the count. 1861 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); 1862 1863 // The method data pointer needs to be updated to reflect the new target. 1864 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); 1865 ba_short(profile_continue); 1866 bind(next_test); 1867 } 1868 1869 update_mdp_for_ret(state, return_bci); 1870 1871 bind (profile_continue); 1872 } 1873 } 1874 1875 // Profile an unexpected null in the bytecodes. 1876 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { 1877 if (ProfileInterpreter) { 1878 Label profile_continue; 1879 1880 // If no method data exists, go to profile_continue. 1881 test_method_data_pointer(profile_continue); 1882 1883 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); 1884 1885 // The method data pointer needs to be updated. 1886 int mdp_delta = in_bytes(BitData::bit_data_size()); 1887 if (TypeProfileCasts) { 1888 mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size()); 1889 } 1890 update_mdp_by_constant(mdp_delta); 1891 1892 bind (profile_continue); 1893 } 1894 } 1895 1896 void InterpreterMacroAssembler::profile_typecheck(Register klass, 1897 Register scratch) { 1898 if (ProfileInterpreter) { 1899 Label profile_continue; 1900 1901 // If no method data exists, go to profile_continue. 1902 test_method_data_pointer(profile_continue); 1903 1904 int mdp_delta = in_bytes(BitData::bit_data_size()); 1905 if (TypeProfileCasts) { 1906 mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size()); 1907 1908 // Record the object type. 1909 record_klass_in_profile(klass, scratch, false); 1910 } 1911 1912 // The method data pointer needs to be updated. 1913 update_mdp_by_constant(mdp_delta); 1914 1915 bind (profile_continue); 1916 } 1917 } 1918 1919 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { 1920 if (ProfileInterpreter && TypeProfileCasts) { 1921 Label profile_continue; 1922 1923 // If no method data exists, go to profile_continue. 1924 test_method_data_pointer(profile_continue); 1925 1926 int count_offset = in_bytes(CounterData::count_offset()); 1927 // Back up the address, since we have already bumped the mdp. 1928 count_offset -= in_bytes(ReceiverTypeData::receiver_type_data_size()); 1929 1930 // *Decrement* the counter. We expect to see zero or small negatives. 1931 increment_mdp_data_at(count_offset, scratch, true); 1932 1933 bind (profile_continue); 1934 } 1935 } 1936 1937 // Count the default case of a switch construct. 1938 1939 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { 1940 if (ProfileInterpreter) { 1941 Label profile_continue; 1942 1943 // If no method data exists, go to profile_continue. 1944 test_method_data_pointer(profile_continue); 1945 1946 // Update the default case count 1947 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1948 scratch); 1949 1950 // The method data pointer needs to be updated. 1951 update_mdp_by_offset( 1952 in_bytes(MultiBranchData::default_displacement_offset()), 1953 scratch); 1954 1955 bind (profile_continue); 1956 } 1957 } 1958 1959 // Count the index'th case of a switch construct. 1960 1961 void InterpreterMacroAssembler::profile_switch_case(Register index, 1962 Register scratch, 1963 Register scratch2, 1964 Register scratch3) { 1965 if (ProfileInterpreter) { 1966 Label profile_continue; 1967 1968 // If no method data exists, go to profile_continue. 1969 test_method_data_pointer(profile_continue); 1970 1971 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1972 set(in_bytes(MultiBranchData::per_case_size()), scratch); 1973 smul(index, scratch, scratch); 1974 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); 1975 1976 // Update the case count 1977 increment_mdp_data_at(scratch, 1978 in_bytes(MultiBranchData::relative_count_offset()), 1979 scratch2, 1980 scratch3); 1981 1982 // The method data pointer needs to be updated. 1983 update_mdp_by_offset(scratch, 1984 in_bytes(MultiBranchData::relative_displacement_offset()), 1985 scratch2); 1986 1987 bind (profile_continue); 1988 } 1989 } 1990 1991 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { 1992 Label not_null, do_nothing, do_update; 1993 1994 assert_different_registers(obj, mdo_addr.base(), tmp); 1995 1996 verify_oop(obj); 1997 1998 ld_ptr(mdo_addr, tmp); 1999 2000 br_notnull_short(obj, pt, not_null); 2001 or3(tmp, TypeEntries::null_seen, tmp); 2002 ba_short(do_update); 2003 2004 bind(not_null); 2005 load_klass(obj, obj); 2006 2007 xor3(obj, tmp, obj); 2008 btst(TypeEntries::type_klass_mask, obj); 2009 // klass seen before, nothing to do. The unknown bit may have been 2010 // set already but no need to check. 2011 brx(zero, false, pt, do_nothing); 2012 delayed()-> 2013 2014 btst(TypeEntries::type_unknown, obj); 2015 // already unknown. Nothing to do anymore. 2016 brx(notZero, false, pt, do_nothing); 2017 delayed()-> 2018 2019 btst(TypeEntries::type_mask, tmp); 2020 brx(zero, true, pt, do_update); 2021 // first time here. Set profile type. 2022 delayed()->or3(tmp, obj, tmp); 2023 2024 // different than before. Cannot keep accurate profile. 2025 or3(tmp, TypeEntries::type_unknown, tmp); 2026 2027 bind(do_update); 2028 // update profile 2029 st_ptr(tmp, mdo_addr); 2030 2031 bind(do_nothing); 2032 } 2033 2034 void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { 2035 if (!ProfileInterpreter) { 2036 return; 2037 } 2038 2039 assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr); 2040 2041 if (MethodData::profile_arguments() || MethodData::profile_return()) { 2042 Label profile_continue; 2043 2044 test_method_data_pointer(profile_continue); 2045 2046 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 2047 2048 ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1); 2049 cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue); 2050 2051 if (MethodData::profile_arguments()) { 2052 Label done; 2053 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 2054 add(ImethodDataPtr, off_to_args, ImethodDataPtr); 2055 2056 for (int i = 0; i < TypeProfileArgsLimit; i++) { 2057 if (i > 0 || MethodData::profile_return()) { 2058 // If return value type is profiled we may have no argument to profile 2059 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 2060 sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1); 2061 cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done); 2062 } 2063 ld_ptr(Address(callee, Method::const_offset()), tmp1); 2064 lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1); 2065 // stack offset o (zero based) from the start of the argument 2066 // list, for n arguments translates into offset n - o - 1 from 2067 // the end of the argument list. But there's an extra slot at 2068 // the stop of the stack. So the offset is n - o from Lesp. 2069 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2); 2070 sub(tmp1, tmp2, tmp1); 2071 2072 // Can't use MacroAssembler::argument_address() which needs Gargs to be set up 2073 sll(tmp1, Interpreter::logStackElementSize, tmp1); 2074 ld_ptr(Lesp, tmp1, tmp1); 2075 2076 Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 2077 profile_obj_type(tmp1, mdo_arg_addr, tmp2); 2078 2079 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 2080 add(ImethodDataPtr, to_add, ImethodDataPtr); 2081 off_to_args += to_add; 2082 } 2083 2084 if (MethodData::profile_return()) { 2085 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 2086 sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1); 2087 } 2088 2089 bind(done); 2090 2091 if (MethodData::profile_return()) { 2092 // We're right after the type profile for the last 2093 // argument. tmp1 is the number of cells left in the 2094 // CallTypeData/VirtualCallTypeData to reach its end. Non null 2095 // if there's a return to profile. 2096 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 2097 sll(tmp1, exact_log2(DataLayout::cell_size), tmp1); 2098 add(ImethodDataPtr, tmp1, ImethodDataPtr); 2099 } 2100 } else { 2101 assert(MethodData::profile_return(), "either profile call args or call ret"); 2102 update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); 2103 } 2104 2105 // mdp points right after the end of the 2106 // CallTypeData/VirtualCallTypeData, right after the cells for the 2107 // return value type if there's one. 2108 2109 bind(profile_continue); 2110 } 2111 } 2112 2113 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { 2114 assert_different_registers(ret, tmp1, tmp2); 2115 if (ProfileInterpreter && MethodData::profile_return()) { 2116 Label profile_continue, done; 2117 2118 test_method_data_pointer(profile_continue); 2119 2120 if (MethodData::profile_return_jsr292_only()) { 2121 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 2122 2123 // If we don't profile all invoke bytecodes we must make sure 2124 // it's a bytecode we indeed profile. We can't go back to the 2125 // begining of the ProfileData we intend to update to check its 2126 // type because we're right after it and we don't known its 2127 // length. 2128 Label do_profile; 2129 ldub(Lbcp, 0, tmp1); 2130 cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile); 2131 cmp(tmp1, Bytecodes::_invokehandle); 2132 br(equal, false, pn, do_profile); 2133 delayed()->lduh(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1); 2134 cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue); 2135 2136 bind(do_profile); 2137 } 2138 2139 Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size())); 2140 mov(ret, tmp1); 2141 profile_obj_type(tmp1, mdo_ret_addr, tmp2); 2142 2143 bind(profile_continue); 2144 } 2145 } 2146 2147 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 2148 if (ProfileInterpreter && MethodData::profile_parameters()) { 2149 Label profile_continue, done; 2150 2151 test_method_data_pointer(profile_continue); 2152 2153 // Load the offset of the area within the MDO used for 2154 // parameters. If it's negative we're not profiling any parameters. 2155 lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1); 2156 cmp_and_br_short(tmp1, 0, less, pn, profile_continue); 2157 2158 // Compute a pointer to the area for parameters from the offset 2159 // and move the pointer to the slot for the last 2160 // parameters. Collect profiling from last parameter down. 2161 // mdo start + parameters offset + array length - 1 2162 2163 // Pointer to the parameter area in the MDO 2164 Register mdp = tmp1; 2165 add(ImethodDataPtr, tmp1, mdp); 2166 2167 // offset of the current profile entry to update 2168 Register entry_offset = tmp2; 2169 // entry_offset = array len in number of cells 2170 ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset); 2171 2172 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 2173 assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); 2174 2175 // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field 2176 sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset); 2177 // entry_offset in bytes 2178 sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset); 2179 2180 Label loop; 2181 bind(loop); 2182 2183 // load offset on the stack from the slot for this parameter 2184 ld_ptr(mdp, entry_offset, tmp3); 2185 sll(tmp3,Interpreter::logStackElementSize, tmp3); 2186 neg(tmp3); 2187 // read the parameter from the local area 2188 ld_ptr(Llocals, tmp3, tmp3); 2189 2190 // make entry_offset now point to the type field for this parameter 2191 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 2192 assert(type_base > off_base, "unexpected"); 2193 add(entry_offset, type_base - off_base, entry_offset); 2194 2195 // profile the parameter 2196 Address arg_type(mdp, entry_offset); 2197 profile_obj_type(tmp3, arg_type, tmp4); 2198 2199 // go to next parameter 2200 sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset); 2201 cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop); 2202 2203 bind(profile_continue); 2204 } 2205 } 2206 2207 // add a InterpMonitorElem to stack (see frame_sparc.hpp) 2208 2209 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, 2210 Register Rtemp, 2211 Register Rtemp2 ) { 2212 2213 Register Rlimit = Lmonitors; 2214 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2215 assert( (delta & LongAlignmentMask) == 0, 2216 "sizeof BasicObjectLock must be even number of doublewords"); 2217 2218 sub( SP, delta, SP); 2219 sub( Lesp, delta, Lesp); 2220 sub( Lmonitors, delta, Lmonitors); 2221 2222 if (!stack_is_empty) { 2223 2224 // must copy stack contents down 2225 2226 Label start_copying, next; 2227 2228 // untested("monitor stack expansion"); 2229 compute_stack_base(Rtemp); 2230 ba(start_copying); 2231 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below 2232 2233 // note: must copy from low memory upwards 2234 // On entry to loop, 2235 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) 2236 // Loop mutates Rtemp 2237 2238 bind( next); 2239 2240 st_ptr(Rtemp2, Rtemp, 0); 2241 inc(Rtemp, wordSize); 2242 cmp(Rtemp, Rlimit); // are we done? (duplicated above) 2243 2244 bind( start_copying ); 2245 2246 brx( notEqual, true, pn, next ); 2247 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); 2248 2249 // done copying stack 2250 } 2251 } 2252 2253 // Locals 2254 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { 2255 assert_not_delayed(); 2256 sll(index, Interpreter::logStackElementSize, index); 2257 sub(Llocals, index, index); 2258 ld_ptr(index, 0, dst); 2259 // Note: index must hold the effective address--the iinc template uses it 2260 } 2261 2262 // Just like access_local_ptr but the tag is a returnAddress 2263 void InterpreterMacroAssembler::access_local_returnAddress(Register index, 2264 Register dst ) { 2265 assert_not_delayed(); 2266 sll(index, Interpreter::logStackElementSize, index); 2267 sub(Llocals, index, index); 2268 ld_ptr(index, 0, dst); 2269 } 2270 2271 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { 2272 assert_not_delayed(); 2273 sll(index, Interpreter::logStackElementSize, index); 2274 sub(Llocals, index, index); 2275 ld(index, 0, dst); 2276 // Note: index must hold the effective address--the iinc template uses it 2277 } 2278 2279 2280 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { 2281 assert_not_delayed(); 2282 sll(index, Interpreter::logStackElementSize, index); 2283 sub(Llocals, index, index); 2284 // First half stored at index n+1 (which grows down from Llocals[n]) 2285 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); 2286 } 2287 2288 2289 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { 2290 assert_not_delayed(); 2291 sll(index, Interpreter::logStackElementSize, index); 2292 sub(Llocals, index, index); 2293 ldf(FloatRegisterImpl::S, index, 0, dst); 2294 } 2295 2296 2297 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { 2298 assert_not_delayed(); 2299 sll(index, Interpreter::logStackElementSize, index); 2300 sub(Llocals, index, index); 2301 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); 2302 } 2303 2304 2305 #ifdef ASSERT 2306 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { 2307 Label L; 2308 2309 assert(Rindex != Rscratch, "Registers cannot be same"); 2310 assert(Rindex != Rscratch1, "Registers cannot be same"); 2311 assert(Rlimit != Rscratch, "Registers cannot be same"); 2312 assert(Rlimit != Rscratch1, "Registers cannot be same"); 2313 assert(Rscratch1 != Rscratch, "Registers cannot be same"); 2314 2315 // untested("reg area corruption"); 2316 add(Rindex, offset, Rscratch); 2317 add(Rlimit, 64 + STACK_BIAS, Rscratch1); 2318 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); 2319 stop("regsave area is being clobbered"); 2320 bind(L); 2321 } 2322 #endif // ASSERT 2323 2324 2325 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { 2326 assert_not_delayed(); 2327 sll(index, Interpreter::logStackElementSize, index); 2328 sub(Llocals, index, index); 2329 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) 2330 st(src, index, 0); 2331 } 2332 2333 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { 2334 assert_not_delayed(); 2335 sll(index, Interpreter::logStackElementSize, index); 2336 sub(Llocals, index, index); 2337 #ifdef ASSERT 2338 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2339 #endif 2340 st_ptr(src, index, 0); 2341 } 2342 2343 2344 2345 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { 2346 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); 2347 } 2348 2349 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { 2350 assert_not_delayed(); 2351 sll(index, Interpreter::logStackElementSize, index); 2352 sub(Llocals, index, index); 2353 #ifdef ASSERT 2354 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2355 #endif 2356 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 2357 } 2358 2359 2360 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { 2361 assert_not_delayed(); 2362 sll(index, Interpreter::logStackElementSize, index); 2363 sub(Llocals, index, index); 2364 #ifdef ASSERT 2365 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2366 #endif 2367 stf(FloatRegisterImpl::S, src, index, 0); 2368 } 2369 2370 2371 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { 2372 assert_not_delayed(); 2373 sll(index, Interpreter::logStackElementSize, index); 2374 sub(Llocals, index, index); 2375 #ifdef ASSERT 2376 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2377 #endif 2378 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); 2379 } 2380 2381 2382 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { 2383 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2384 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); 2385 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; 2386 } 2387 2388 2389 Address InterpreterMacroAssembler::top_most_monitor() { 2390 return Address(FP, top_most_monitor_byte_offset()); 2391 } 2392 2393 2394 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { 2395 add( Lesp, wordSize, Rdest ); 2396 } 2397 2398 void InterpreterMacroAssembler::get_method_counters(Register method, 2399 Register Rcounters, 2400 Label& skip) { 2401 Label has_counters; 2402 Address method_counters(method, in_bytes(Method::method_counters_offset())); 2403 ld_ptr(method_counters, Rcounters); 2404 br_notnull_short(Rcounters, Assembler::pt, has_counters); 2405 call_VM(noreg, CAST_FROM_FN_PTR(address, 2406 InterpreterRuntime::build_method_counters), method); 2407 ld_ptr(method_counters, Rcounters); 2408 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory 2409 delayed()->nop(); 2410 bind(has_counters); 2411 } 2412 2413 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2414 assert(UseCompiler || LogTouchedMethods, "incrementing must be useful"); 2415 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2416 2417 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2418 InvocationCounter::counter_offset()); 2419 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2420 InvocationCounter::counter_offset()); 2421 int delta = InvocationCounter::count_increment; 2422 2423 // Load each counter in a register 2424 ld( inv_counter, Rtmp ); 2425 ld( be_counter, Rtmp2 ); 2426 2427 assert( is_simm13( delta ), " delta too large."); 2428 2429 // Add the delta to the invocation counter and store the result 2430 add( Rtmp, delta, Rtmp ); 2431 2432 // Mask the backedge counter 2433 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2434 2435 // Store value 2436 st( Rtmp, inv_counter); 2437 2438 // Add invocation counter + backedge counter 2439 add( Rtmp, Rtmp2, Rtmp); 2440 2441 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! 2442 } 2443 2444 void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2445 assert(UseCompiler, "incrementing must be useful"); 2446 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2447 2448 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2449 InvocationCounter::counter_offset()); 2450 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2451 InvocationCounter::counter_offset()); 2452 2453 int delta = InvocationCounter::count_increment; 2454 // Load each counter in a register 2455 ld( be_counter, Rtmp ); 2456 ld( inv_counter, Rtmp2 ); 2457 2458 // Add the delta to the backedge counter 2459 add( Rtmp, delta, Rtmp ); 2460 2461 // Mask the invocation counter, add to backedge counter 2462 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2463 2464 // and store the result to memory 2465 st( Rtmp, be_counter ); 2466 2467 // Add backedge + invocation counter 2468 add( Rtmp, Rtmp2, Rtmp ); 2469 2470 // Note that this macro must leave backedge_count + invocation_count in Rtmp! 2471 } 2472 2473 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, 2474 Register method_counters, 2475 Register branch_bcp, 2476 Register Rtmp ) { 2477 Label did_not_overflow; 2478 Label overflow_with_error; 2479 assert_different_registers(backedge_count, Rtmp, branch_bcp); 2480 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 2481 2482 Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2483 ld(limit, Rtmp); 2484 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2485 2486 // When ProfileInterpreter is on, the backedge_count comes from the 2487 // MethodData*, which value does not get reset on the call to 2488 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2489 // routine while the method is being compiled, add a second test to make sure 2490 // the overflow function is called only once every overflow_frequency. 2491 if (ProfileInterpreter) { 2492 const int overflow_frequency = 1024; 2493 andcc(backedge_count, overflow_frequency-1, Rtmp); 2494 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); 2495 delayed()->nop(); 2496 } 2497 2498 // overflow in loop, pass branch bytecode 2499 set(6,Rtmp); 2500 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); 2501 2502 // Was an OSR adapter generated? 2503 // O0 = osr nmethod 2504 br_null_short(O0, Assembler::pn, overflow_with_error); 2505 2506 // Has the nmethod been invalidated already? 2507 ldub(O0, nmethod::state_offset(), O2); 2508 cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, overflow_with_error); 2509 2510 // migrate the interpreter frame off of the stack 2511 2512 mov(G2_thread, L7); 2513 // save nmethod 2514 mov(O0, L6); 2515 set_last_Java_frame(SP, noreg); 2516 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 2517 reset_last_Java_frame(); 2518 mov(L7, G2_thread); 2519 2520 // move OSR nmethod to I1 2521 mov(L6, I1); 2522 2523 // OSR buffer to I0 2524 mov(O0, I0); 2525 2526 // remove the interpreter frame 2527 restore(I5_savedSP, 0, SP); 2528 2529 // Jump to the osr code. 2530 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 2531 jmp(O2, G0); 2532 delayed()->nop(); 2533 2534 bind(overflow_with_error); 2535 2536 bind(did_not_overflow); 2537 } 2538 2539 2540 2541 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { 2542 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2543 } 2544 2545 2546 // local helper function for the verify_oop_or_return_address macro 2547 static bool verify_return_address(Method* m, int bci) { 2548 #ifndef PRODUCT 2549 address pc = (address)(m->constMethod()) 2550 + in_bytes(ConstMethod::codes_offset()) + bci; 2551 // assume it is a valid return address if it is inside m and is preceded by a jsr 2552 if (!m->contains(pc)) return false; 2553 address jsr_pc; 2554 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2555 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2556 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2557 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2558 #endif // PRODUCT 2559 return false; 2560 } 2561 2562 2563 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2564 if (!VerifyOops) return; 2565 // the VM documentation for the astore[_wide] bytecode allows 2566 // the TOS to be not only an oop but also a return address 2567 Label test; 2568 Label skip; 2569 // See if it is an address (in the current method): 2570 2571 mov(reg, Rtmp); 2572 const int log2_bytecode_size_limit = 16; 2573 srl(Rtmp, log2_bytecode_size_limit, Rtmp); 2574 br_notnull_short( Rtmp, pt, test ); 2575 2576 // %%% should use call_VM_leaf here? 2577 save_frame_and_mov(0, Lmethod, O0, reg, O1); 2578 save_thread(L7_thread_cache); 2579 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); 2580 delayed()->nop(); 2581 restore_thread(L7_thread_cache); 2582 br_notnull( O0, false, pt, skip ); 2583 delayed()->restore(); 2584 2585 // Perform a more elaborate out-of-line call 2586 // Not an address; verify it: 2587 bind(test); 2588 verify_oop(reg); 2589 bind(skip); 2590 } 2591 2592 2593 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2594 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 2595 } 2596 2597 2598 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2599 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2600 int increment, Address mask_addr, 2601 Register scratch1, Register scratch2, 2602 Condition cond, Label *where) { 2603 ld(counter_addr, scratch1); 2604 add(scratch1, increment, scratch1); 2605 ld(mask_addr, scratch2); 2606 andcc(scratch1, scratch2, G0); 2607 br(cond, false, Assembler::pn, *where); 2608 delayed()->st(scratch1, counter_addr); 2609 } 2610 2611 // Inline assembly for: 2612 // 2613 // if (thread is in interp_only_mode) { 2614 // InterpreterRuntime::post_method_entry(); 2615 // } 2616 // if (DTraceMethodProbes) { 2617 // SharedRuntime::dtrace_method_entry(method, receiver); 2618 // } 2619 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2620 // SharedRuntime::rc_trace_method_entry(method, receiver); 2621 // } 2622 2623 void InterpreterMacroAssembler::notify_method_entry() { 2624 2625 // Whenever JVMTI puts a thread in interp_only_mode, method 2626 // entry/exit events are sent for that thread to track stack 2627 // depth. If it is possible to enter interp_only_mode we add 2628 // the code to check if the event should be sent. 2629 if (JvmtiExport::can_post_interpreter_events()) { 2630 Label L; 2631 Register temp_reg = O5; 2632 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2633 ld(interp_only, temp_reg); 2634 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2635 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 2636 bind(L); 2637 } 2638 2639 { 2640 Register temp_reg = O5; 2641 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2642 call_VM_leaf(noreg, 2643 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2644 G2_thread, Lmethod); 2645 } 2646 2647 // RedefineClasses() tracing support for obsolete method entry 2648 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2649 call_VM_leaf(noreg, 2650 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2651 G2_thread, Lmethod); 2652 } 2653 } 2654 2655 2656 // Inline assembly for: 2657 // 2658 // if (thread is in interp_only_mode) { 2659 // // save result 2660 // InterpreterRuntime::post_method_exit(); 2661 // // restore result 2662 // } 2663 // if (DTraceMethodProbes) { 2664 // SharedRuntime::dtrace_method_exit(thread, method); 2665 // } 2666 // 2667 // Native methods have their result stored in d_tmp and l_tmp 2668 // Java methods have their result stored in the expression stack 2669 2670 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, 2671 TosState state, 2672 NotifyMethodExitMode mode) { 2673 2674 // Whenever JVMTI puts a thread in interp_only_mode, method 2675 // entry/exit events are sent for that thread to track stack 2676 // depth. If it is possible to enter interp_only_mode we add 2677 // the code to check if the event should be sent. 2678 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2679 Label L; 2680 Register temp_reg = O5; 2681 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2682 ld(interp_only, temp_reg); 2683 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2684 2685 // Note: frame::interpreter_frame_result has a dependency on how the 2686 // method result is saved across the call to post_method_exit. For 2687 // native methods it assumes the result registers are saved to 2688 // l_scratch and d_scratch. If this changes then the interpreter_frame_result 2689 // implementation will need to be updated too. 2690 2691 save_return_value(state, is_native_method); 2692 call_VM(noreg, 2693 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2694 restore_return_value(state, is_native_method); 2695 bind(L); 2696 } 2697 2698 { 2699 Register temp_reg = O5; 2700 // Dtrace notification 2701 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2702 save_return_value(state, is_native_method); 2703 call_VM_leaf( 2704 noreg, 2705 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2706 G2_thread, Lmethod); 2707 restore_return_value(state, is_native_method); 2708 } 2709 } 2710 2711 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { 2712 if (is_native_call) { 2713 stf(FloatRegisterImpl::D, F0, d_tmp); 2714 #ifdef _LP64 2715 stx(O0, l_tmp); 2716 #else 2717 std(O0, l_tmp); 2718 #endif 2719 } else { 2720 push(state); 2721 } 2722 } 2723 2724 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { 2725 if (is_native_call) { 2726 ldf(FloatRegisterImpl::D, d_tmp, F0); 2727 #ifdef _LP64 2728 ldx(l_tmp, O0); 2729 #else 2730 ldd(l_tmp, O0); 2731 #endif 2732 } else { 2733 pop(state); 2734 } 2735 }