30 #include "oops/markOop.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/method.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "prims/jvmtiRedefineClassesTrace.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/thread.inline.hpp" 40 41 42 // Implementation of InterpreterMacroAssembler 43 #ifdef CC_INTERP 44 void InterpreterMacroAssembler::get_method(Register reg) { 45 movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); 46 movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); 47 } 48 #endif // CC_INTERP 49 50 51 #ifndef CC_INTERP 52 void InterpreterMacroAssembler::call_VM_leaf_base( 53 address entry_point, 54 int number_of_arguments 55 ) { 56 // interpreter specific 57 // 58 // Note: No need to save/restore bcp & locals (rsi & rdi) pointer 59 // since these are callee saved registers and no blocking/ 60 // GC can happen in leaf calls. 61 // Further Note: DO NOT save/restore bcp/locals. If a caller has 62 // already saved them so that it can use rsi/rdi as temporaries 63 // then a save/restore here will DESTROY the copy the caller 64 // saved! There used to be a save_bcp() that only happened in 65 // the ASSERT path (no restore_bcp). Which caused bizarre failures 66 // when jvm built with ASSERTs. 67 #ifdef ASSERT 68 { Label L; 69 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 70 jcc(Assembler::equal, L); 71 stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL"); 72 bind(L); 73 } 74 #endif 75 // super call 76 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 77 // interpreter specific 78 79 // Used to ASSERT that rsi/rdi were equal to frame's bcp/locals 80 // but since they may not have been saved (and we don't want to 81 // save them here (see note above) the assert is invalid. 82 } 83 84 85 void InterpreterMacroAssembler::call_VM_base( 86 Register oop_result, 87 Register java_thread, 88 Register last_java_sp, 89 address entry_point, 90 int number_of_arguments, 91 bool check_exceptions 92 ) { 93 #ifdef ASSERT 94 { Label L; 95 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 96 jcc(Assembler::equal, L); 97 stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL"); 98 bind(L); 99 } 100 #endif /* ASSERT */ 101 // interpreter specific 102 // 103 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 104 // really make a difference for these runtime calls, since they are 105 // slow anyway. Btw., bcp must be saved/restored since it may change 106 // due to GC. 107 assert(java_thread == noreg , "not expecting a precomputed java thread"); 108 save_bcp(); 109 // super call 110 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 111 // interpreter specific 112 restore_bcp(); 113 restore_locals(); 114 } 115 116 117 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 118 if (JvmtiExport::can_pop_frame()) { 119 Label L; 120 // Initiate popframe handling only if it is not already being processed. If the flag 121 // has the popframe_processing bit set, it means that this code is called *during* popframe 122 // handling - we don't want to reenter. 123 Register pop_cond = java_thread; // Not clear if any other register is available... 124 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset())); 125 testl(pop_cond, JavaThread::popframe_pending_bit); 126 jcc(Assembler::zero, L); 127 testl(pop_cond, JavaThread::popframe_processing_bit); 128 jcc(Assembler::notZero, L); 129 // Call Interpreter::remove_activation_preserving_args_entry() to get the 130 // address of the same-named entrypoint in the generated interpreter code. 131 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 132 jmp(rax); 133 bind(L); 134 get_thread(java_thread); 135 } 136 } 137 138 139 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 140 get_thread(rcx); 141 movl(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); 142 const Address tos_addr (rcx, JvmtiThreadState::earlyret_tos_offset()); 143 const Address oop_addr (rcx, JvmtiThreadState::earlyret_oop_offset()); 144 const Address val_addr (rcx, JvmtiThreadState::earlyret_value_offset()); 145 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() 146 + in_ByteSize(wordSize)); 147 switch (state) { 148 case atos: movptr(rax, oop_addr); 149 movptr(oop_addr, NULL_WORD); 150 verify_oop(rax, state); break; 151 case ltos: 152 movl(rdx, val_addr1); // fall through 153 case btos: // fall through 154 case ctos: // fall through 155 case stos: // fall through 156 case itos: movl(rax, val_addr); break; 157 case ftos: fld_s(val_addr); break; 158 case dtos: fld_d(val_addr); break; 159 case vtos: /* nothing to do */ break; 160 default : ShouldNotReachHere(); 161 } 162 // Clean up tos value in the thread object 163 movl(tos_addr, (int32_t) ilgl); 164 movptr(val_addr, NULL_WORD); 165 NOT_LP64(movptr(val_addr1, NULL_WORD)); 166 } 167 168 169 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 170 if (JvmtiExport::can_force_early_return()) { 171 Label L; 172 Register tmp = java_thread; 173 movptr(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset())); 174 testptr(tmp, tmp); 175 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; 176 177 // Initiate earlyret handling only if it is not already being processed. 178 // If the flag has the earlyret_processing bit set, it means that this code 179 // is called *during* earlyret handling - we don't want to reenter. 180 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset())); 181 cmpl(tmp, JvmtiThreadState::earlyret_pending); 182 jcc(Assembler::notEqual, L); 183 184 // Call Interpreter::remove_activation_early_entry() to get the address of the 185 // same-named entrypoint in the generated interpreter code. 186 get_thread(java_thread); 187 movptr(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset())); 188 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 189 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); 190 jmp(rax); 191 bind(L); 192 get_thread(java_thread); 193 } 194 } 195 196 197 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 198 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 199 load_unsigned_short(reg, Address(rsi, bcp_offset)); 200 bswapl(reg); 201 shrl(reg, 16); 202 } 203 204 205 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) { 206 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 207 if (index_size == sizeof(u2)) { 208 load_unsigned_short(reg, Address(rsi, bcp_offset)); 209 } else if (index_size == sizeof(u4)) { 210 movl(reg, Address(rsi, bcp_offset)); 211 // Check if the secondary index definition is still ~x, otherwise 212 // we have to change the following assembler code to calculate the 213 // plain index. 214 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 215 notl(reg); // convert to plain index 216 } else if (index_size == sizeof(u1)) { 217 load_unsigned_byte(reg, Address(rsi, bcp_offset)); 218 } else { 219 ShouldNotReachHere(); 220 } 221 } 222 223 224 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, 225 int bcp_offset, size_t index_size) { 226 assert_different_registers(cache, index); 227 get_cache_index_at_bcp(index, bcp_offset, index_size); 228 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 229 assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); 230 assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line"); 231 shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index 232 } 233 234 235 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 236 Register index, 237 Register bytecode, 238 int byte_no, 239 int bcp_offset, 240 size_t index_size) { 241 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 242 movptr(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 243 const int shift_count = (1 + byte_no) * BitsPerByte; 244 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 245 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 246 "correct shift count"); 247 shrptr(bytecode, shift_count); 248 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 249 andptr(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); 250 } 251 252 253 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 254 int bcp_offset, size_t index_size) { 255 assert(cache != tmp, "must use different register"); 256 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 257 assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); 258 // convert from field index to ConstantPoolCacheEntry index 259 // and from word offset to byte offset 260 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 261 shll(tmp, 2 + LogBytesPerWord); 262 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 263 // skip past the header 264 addptr(cache, in_bytes(ConstantPoolCache::base_offset())); 265 addptr(cache, tmp); // construct pointer to cache entry 266 } 267 268 // Load object from cpool->resolved_references(index) 269 void InterpreterMacroAssembler::load_resolved_reference_at_index( 270 Register result, Register index) { 271 assert_different_registers(result, index); 272 // convert from field index to resolved_references() index and from 273 // word index to byte offset. Since this is a java object, it can be compressed 274 Register tmp = index; // reuse 275 shll(tmp, LogBytesPerHeapOop); 276 277 get_constant_pool(result); 278 // load pointer for resolved_references[] objArray 279 movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes())); 280 // JNIHandles::resolve(obj); 281 movptr(result, Address(result, 0)); 282 // Add in the index 283 addptr(result, tmp); 284 load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 285 } 286 287 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 288 // a subtype of super_klass. EAX holds the super_klass. Blows ECX. 289 // Resets EDI to locals. Register sub_klass cannot be any of the above. 290 void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &ok_is_subtype ) { 291 assert( Rsub_klass != rax, "rax, holds superklass" ); 292 assert( Rsub_klass != rcx, "used as a temp" ); 293 assert( Rsub_klass != rdi, "used as a temp, restored from locals" ); 294 295 // Profile the not-null value's klass. 296 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi 297 298 // Do the check. 299 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx 300 301 // Profile the failure of the check. 302 profile_typecheck_failed(rcx); // blows rcx 303 } 304 305 void InterpreterMacroAssembler::f2ieee() { 306 if (IEEEPrecision) { 307 fstp_s(Address(rsp, 0)); 308 fld_s(Address(rsp, 0)); 309 } 310 } 311 312 313 void InterpreterMacroAssembler::d2ieee() { 314 if (IEEEPrecision) { 315 fstp_d(Address(rsp, 0)); 316 fld_d(Address(rsp, 0)); 317 } 318 } 319 320 // Java Expression Stack 321 322 void InterpreterMacroAssembler::pop_ptr(Register r) { 323 pop(r); 324 } 325 326 void InterpreterMacroAssembler::pop_i(Register r) { 327 pop(r); 328 } 329 330 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 331 pop(lo); 332 pop(hi); 333 } 334 335 void InterpreterMacroAssembler::pop_f() { 336 fld_s(Address(rsp, 0)); 337 addptr(rsp, 1 * wordSize); 338 } 339 340 void InterpreterMacroAssembler::pop_d() { 341 fld_d(Address(rsp, 0)); 342 addptr(rsp, 2 * wordSize); 343 } 344 345 346 void InterpreterMacroAssembler::pop(TosState state) { 347 switch (state) { 348 case atos: pop_ptr(rax); break; 349 case btos: // fall through 350 case ctos: // fall through 351 case stos: // fall through 352 case itos: pop_i(rax); break; 353 case ltos: pop_l(rax, rdx); break; 354 case ftos: pop_f(); break; 355 case dtos: pop_d(); break; 356 case vtos: /* nothing to do */ break; 357 default : ShouldNotReachHere(); 358 } 359 verify_oop(rax, state); 360 } 361 362 void InterpreterMacroAssembler::push_ptr(Register r) { 363 push(r); 364 } 365 366 void InterpreterMacroAssembler::push_i(Register r) { 367 push(r); 368 } 369 370 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 371 push(hi); 372 push(lo); 373 } 374 375 void InterpreterMacroAssembler::push_f() { 376 // Do not schedule for no AGI! Never write beyond rsp! 377 subptr(rsp, 1 * wordSize); 378 fstp_s(Address(rsp, 0)); 379 } 380 381 void InterpreterMacroAssembler::push_d(Register r) { 382 // Do not schedule for no AGI! Never write beyond rsp! 383 subptr(rsp, 2 * wordSize); 384 fstp_d(Address(rsp, 0)); 385 } 386 387 388 void InterpreterMacroAssembler::push(TosState state) { 389 verify_oop(rax, state); 390 switch (state) { 391 case atos: push_ptr(rax); break; 392 case btos: // fall through 393 case ctos: // fall through 394 case stos: // fall through 395 case itos: push_i(rax); break; 396 case ltos: push_l(rax, rdx); break; 397 case ftos: push_f(); break; 398 case dtos: push_d(rax); break; 399 case vtos: /* nothing to do */ break; 400 default : ShouldNotReachHere(); 401 } 402 } 403 404 405 // Helpers for swap and dup 406 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 407 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); 408 } 409 410 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 411 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); 412 } 413 414 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 415 // set sender sp 416 lea(rsi, Address(rsp, wordSize)); 417 // record last_sp 418 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi); 419 } 420 421 422 // Jump to from_interpreted entry of a call unless single stepping is possible 423 // in this thread in which case we must call the i2i entry 424 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 425 prepare_to_jump_from_interpreted(); 426 427 if (JvmtiExport::can_post_interpreter_events()) { 428 Label run_compiled_code; 429 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 430 // compiled code in threads for which the event is enabled. Check here for 431 // interp_only_mode if these events CAN be enabled. 432 get_thread(temp); 433 // interp_only is an int, on little endian it is sufficient to test the byte only 434 // Is a cmpl faster? 435 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 436 jccb(Assembler::zero, run_compiled_code); 437 jmp(Address(method, Method::interpreter_entry_offset())); 438 bind(run_compiled_code); 439 } 440 441 jmp(Address(method, Method::from_interpreted_offset())); 442 443 } 444 445 446 // The following two routines provide a hook so that an implementation 447 // can schedule the dispatch in two parts. Intel does not do this. 448 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 449 // Nothing Intel-specific to be done here. 450 } 451 452 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 453 dispatch_next(state, step); 454 } 455 456 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, 457 bool verifyoop) { 458 verify_FPU(1, state); 459 if (VerifyActivationFrameSize) { 460 Label L; 461 mov(rcx, rbp); 462 subptr(rcx, rsp); 463 int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; 464 cmpptr(rcx, min_frame_size); 465 jcc(Assembler::greaterEqual, L); 466 stop("broken stack frame"); 467 bind(L); 468 } 469 if (verifyoop) verify_oop(rax, state); 470 Address index(noreg, rbx, Address::times_ptr); 471 ExternalAddress tbl((address)table); 472 ArrayAddress dispatch(tbl, index); 473 jump(dispatch); 474 } 475 476 477 void InterpreterMacroAssembler::dispatch_only(TosState state) { 478 dispatch_base(state, Interpreter::dispatch_table(state)); 479 } 480 481 482 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 483 dispatch_base(state, Interpreter::normal_table(state)); 484 } 485 486 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 487 dispatch_base(state, Interpreter::normal_table(state), false); 488 } 489 490 491 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { 492 // load next bytecode (load before advancing rsi to prevent AGI) 493 load_unsigned_byte(rbx, Address(rsi, step)); 494 // advance rsi 495 increment(rsi, step); 496 dispatch_base(state, Interpreter::dispatch_table(state)); 497 } 498 499 500 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 501 // load current bytecode 502 load_unsigned_byte(rbx, Address(rsi, 0)); 503 dispatch_base(state, table); 504 } 505 506 // remove activation 507 // 508 // Unlock the receiver if this is a synchronized method. 509 // Unlock any Java monitors from syncronized blocks. 510 // Remove the activation from the stack. 511 // 512 // If there are locked Java monitors 513 // If throw_monitor_exception 514 // throws IllegalMonitorStateException 515 // Else if install_monitor_exception 516 // installs IllegalMonitorStateException 517 // Else 518 // no error processing 519 void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_addr, 520 bool throw_monitor_exception, 521 bool install_monitor_exception, 522 bool notify_jvmdi) { 523 // Note: Registers rax, rdx and FPU ST(0) may be in use for the result 524 // check if synchronized method 525 Label unlocked, unlock, no_unlock; 526 527 get_thread(rcx); 528 const Address do_not_unlock_if_synchronized(rcx, 529 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 530 531 movbool(rbx, do_not_unlock_if_synchronized); 532 mov(rdi,rbx); 533 movbool(do_not_unlock_if_synchronized, false); // reset the flag 534 535 movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags 536 movl(rcx, Address(rbx, Method::access_flags_offset())); 537 538 testl(rcx, JVM_ACC_SYNCHRONIZED); 539 jcc(Assembler::zero, unlocked); 540 541 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 542 // is set. 543 mov(rcx,rdi); 544 testbool(rcx); 545 jcc(Assembler::notZero, no_unlock); 546 547 // unlock monitor 548 push(state); // save result 549 550 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 551 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 552 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 553 lea (rdx, monitor); // address of first monitor 554 555 movptr (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); 556 testptr(rax, rax); 557 jcc (Assembler::notZero, unlock); 558 559 pop(state); 560 if (throw_monitor_exception) { 561 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow 562 563 // Entry already unlocked, need to throw exception 564 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 565 should_not_reach_here(); 566 } else { 567 // Monitor already unlocked during a stack unroll. 568 // If requested, install an illegal_monitor_state_exception. 569 // Continue with stack unrolling. 570 if (install_monitor_exception) { 571 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow 572 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 573 } 574 jmp(unlocked); 575 } 576 577 bind(unlock); 578 unlock_object(rdx); 579 pop(state); 580 581 // Check that for block-structured locking (i.e., that all locked objects has been unlocked) 582 bind(unlocked); 583 584 // rax, rdx: Might contain return value 585 586 // Check that all monitors are unlocked 587 { 588 Label loop, exception, entry, restart; 589 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 590 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 591 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 592 593 bind(restart); 594 movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry 595 lea(rbx, monitor_block_bot); // points to word before bottom of monitor block 596 jmp(entry); 597 598 // Entry already locked, need to throw exception 599 bind(exception); 600 601 if (throw_monitor_exception) { 602 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow 603 604 // Throw exception 605 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 606 should_not_reach_here(); 607 } else { 608 // Stack unrolling. Unlock object and install illegal_monitor_exception 609 // Unlock does not block, so don't have to worry about the frame 610 611 push(state); 612 mov(rdx, rcx); 613 unlock_object(rdx); 614 pop(state); 615 616 if (install_monitor_exception) { 617 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow 618 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 619 } 620 621 jmp(restart); 622 } 623 624 bind(loop); 625 cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used 626 jcc(Assembler::notEqual, exception); 627 628 addptr(rcx, entry_size); // otherwise advance to next entry 629 bind(entry); 630 cmpptr(rcx, rbx); // check if bottom reached 631 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry 632 } 633 634 bind(no_unlock); 635 636 // jvmti support 637 if (notify_jvmdi) { 638 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 639 } else { 640 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 641 } 642 643 // remove activation 644 movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 645 leave(); // remove frame anchor 646 pop(ret_addr); // get return address 647 mov(rsp, rbx); // set sp to sender sp 648 if (UseSSE) { 649 // float and double are returned in xmm register in SSE-mode 650 if (state == ftos && UseSSE >= 1) { 651 subptr(rsp, wordSize); 652 fstp_s(Address(rsp, 0)); 653 movflt(xmm0, Address(rsp, 0)); 654 addptr(rsp, wordSize); 655 } else if (state == dtos && UseSSE >= 2) { 656 subptr(rsp, 2*wordSize); 657 fstp_d(Address(rsp, 0)); 658 movdbl(xmm0, Address(rsp, 0)); 659 addptr(rsp, 2*wordSize); 660 } 661 } 662 } 663 664 #endif /* !CC_INTERP */ 665 666 void InterpreterMacroAssembler::get_method_counters(Register method, 667 Register mcs, Label& skip) { 668 Label has_counters; 669 movptr(mcs, Address(method, Method::method_counters_offset())); 670 testptr(mcs, mcs); 671 jcc(Assembler::notZero, has_counters); 672 call_VM(noreg, CAST_FROM_FN_PTR(address, 673 InterpreterRuntime::build_method_counters), method); 674 movptr(mcs, Address(method,Method::method_counters_offset())); 675 testptr(mcs, mcs); 676 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory 677 bind(has_counters); 678 } 679 680 681 // Lock object 682 // 683 // Argument: rdx : Points to BasicObjectLock to be used for locking. Must 684 // be initialized with object to lock 685 void InterpreterMacroAssembler::lock_object(Register lock_reg) { 686 assert(lock_reg == rdx, "The argument is only for looks. It must be rdx"); 687 688 if (UseHeavyMonitors) { 689 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 690 } else { 691 692 Label done; 693 694 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction 695 const Register obj_reg = rcx; // Will contain the oop 696 697 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 698 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 699 const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes(); 700 701 Label slow_case; 702 703 // Load object pointer into obj_reg %rcx 704 movptr(obj_reg, Address(lock_reg, obj_offset)); 705 706 if (UseBiasedLocking) { 707 // Note: we use noreg for the temporary register since it's hard 708 // to come up with a free register on all incoming code paths 709 biased_locking_enter(lock_reg, obj_reg, swap_reg, noreg, false, done, &slow_case); 710 } 711 712 // Load immediate 1 into swap_reg %rax, 713 movptr(swap_reg, (int32_t)1); 714 715 // Load (object->mark() | 1) into swap_reg %rax, 716 orptr(swap_reg, Address(obj_reg, 0)); 717 718 // Save (object->mark() | 1) into BasicLock's displaced header 719 movptr(Address(lock_reg, mark_offset), swap_reg); 720 721 assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); 722 if (os::is_MP()) { 723 lock(); 724 } 725 cmpxchgptr(lock_reg, Address(obj_reg, 0)); 726 if (PrintBiasedLockingStatistics) { 727 cond_inc32(Assembler::zero, 728 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 729 } 730 jcc(Assembler::zero, done); 731 732 // Test if the oopMark is an obvious stack pointer, i.e., 733 // 1) (mark & 3) == 0, and 734 // 2) rsp <= mark < mark + os::pagesize() 735 // 736 // These 3 tests can be done by evaluating the following 737 // expression: ((mark - rsp) & (3 - os::vm_page_size())), 738 // assuming both stack pointer and pagesize have their 739 // least significant 2 bits clear. 740 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg 741 subptr(swap_reg, rsp); 742 andptr(swap_reg, 3 - os::vm_page_size()); 743 744 // Save the test result, for recursive case, the result is zero 745 movptr(Address(lock_reg, mark_offset), swap_reg); 746 747 if (PrintBiasedLockingStatistics) { 748 cond_inc32(Assembler::zero, 749 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 750 } 751 jcc(Assembler::zero, done); 752 753 bind(slow_case); 754 755 // Call the runtime routine for slow case 756 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 757 758 bind(done); 759 } 760 } 761 762 763 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 764 // 765 // Argument: rdx : Points to BasicObjectLock structure for lock 766 // Throw an IllegalMonitorException if object is not locked by current thread 767 // 768 // Uses: rax, rbx, rcx, rdx 769 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 770 assert(lock_reg == rdx, "The argument is only for looks. It must be rdx"); 771 772 if (UseHeavyMonitors) { 773 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 774 } else { 775 Label done; 776 777 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction 778 const Register header_reg = rbx; // Will contain the old oopMark 779 const Register obj_reg = rcx; // Will contain the oop 780 781 save_bcp(); // Save in case of exception 782 783 // Convert from BasicObjectLock structure to object and BasicLock structure 784 // Store the BasicLock address into %rax, 785 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 786 787 // Load oop into obj_reg(%rcx) 788 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ())); 789 790 // Free entry 791 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); 792 793 if (UseBiasedLocking) { 794 biased_locking_exit(obj_reg, header_reg, done); 795 } 796 797 // Load the old header from BasicLock structure 798 movptr(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes())); 799 800 // Test for recursion 801 testptr(header_reg, header_reg); 802 803 // zero for recursive case 804 jcc(Assembler::zero, done); 805 806 // Atomic swap back the old header 807 if (os::is_MP()) lock(); 808 cmpxchgptr(header_reg, Address(obj_reg, 0)); 809 810 // zero for recursive case 811 jcc(Assembler::zero, done); 812 813 // Call the runtime routine for slow case. 814 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj 815 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 816 817 bind(done); 818 819 restore_bcp(); 820 } 821 } 822 823 824 #ifndef CC_INTERP 825 826 // Test ImethodDataPtr. If it is null, continue at the specified label 827 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { 828 assert(ProfileInterpreter, "must be profiling interpreter"); 829 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize)); 830 testptr(mdp, mdp); 831 jcc(Assembler::zero, zero_continue); 832 } 833 834 835 // Set the method data pointer for the current bcp. 836 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 837 assert(ProfileInterpreter, "must be profiling interpreter"); 838 Label set_mdp; 839 push(rax); 840 push(rbx); 841 842 get_method(rbx); 843 // Test MDO to avoid the call if it is NULL. 844 movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); 845 testptr(rax, rax); 846 jcc(Assembler::zero, set_mdp); 847 // rbx,: method 848 // rsi: bcp 849 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi); 850 // rax,: mdi 851 // mdo is guaranteed to be non-zero here, we checked for it before the call. 852 movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset()))); 853 addptr(rbx, in_bytes(MethodData::data_offset())); 854 addptr(rax, rbx); 855 bind(set_mdp); 856 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax); 857 pop(rbx); 858 pop(rax); 859 } 860 861 void InterpreterMacroAssembler::verify_method_data_pointer() { 862 assert(ProfileInterpreter, "must be profiling interpreter"); 863 #ifdef ASSERT 864 Label verify_continue; 865 push(rax); 866 push(rbx); 867 push(rcx); 868 push(rdx); 869 test_method_data_pointer(rcx, verify_continue); // If mdp is zero, continue 870 get_method(rbx); 871 872 // If the mdp is valid, it will point to a DataLayout header which is 873 // consistent with the bcp. The converse is highly probable also. 874 load_unsigned_short(rdx, Address(rcx, in_bytes(DataLayout::bci_offset()))); 875 addptr(rdx, Address(rbx, Method::const_offset())); 876 lea(rdx, Address(rdx, ConstMethod::codes_offset())); 877 cmpptr(rdx, rsi); 878 jcc(Assembler::equal, verify_continue); 879 // rbx,: method 880 // rsi: bcp 881 // rcx: mdp 882 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, rsi, rcx); 883 bind(verify_continue); 884 pop(rdx); 885 pop(rcx); 886 pop(rbx); 887 pop(rax); 888 #endif // ASSERT 889 } 890 891 892 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) { 893 // %%% this seems to be used to store counter data which is surely 32bits 894 // however 64bit side stores 64 bits which seems wrong 895 assert(ProfileInterpreter, "must be profiling interpreter"); 896 Address data(mdp_in, constant); 897 movptr(data, value); 898 } 899 900 901 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 902 int constant, 903 bool decrement) { 904 // Counter address 905 Address data(mdp_in, constant); 906 907 increment_mdp_data_at(data, decrement); 908 } 909 910 911 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 912 bool decrement) { 913 914 assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" ); 915 assert(ProfileInterpreter, "must be profiling interpreter"); 916 917 // %%% 64bit treats this as 64 bit which seems unlikely 918 if (decrement) { 919 // Decrement the register. Set condition codes. 920 addl(data, -DataLayout::counter_increment); 921 // If the decrement causes the counter to overflow, stay negative 922 Label L; 923 jcc(Assembler::negative, L); 924 addl(data, DataLayout::counter_increment); 925 bind(L); 926 } else { 927 assert(DataLayout::counter_increment == 1, 928 "flow-free idiom only works with 1"); 929 // Increment the register. Set carry flag. 930 addl(data, DataLayout::counter_increment); 931 // If the increment causes the counter to overflow, pull back by 1. 932 sbbl(data, 0); 933 } 934 } 935 936 937 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 938 Register reg, 939 int constant, 940 bool decrement) { 941 Address data(mdp_in, reg, Address::times_1, constant); 942 943 increment_mdp_data_at(data, decrement); 944 } 945 946 947 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) { 948 assert(ProfileInterpreter, "must be profiling interpreter"); 949 int header_offset = in_bytes(DataLayout::header_offset()); 950 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); 951 // Set the flag 952 orl(Address(mdp_in, header_offset), header_bits); 953 } 954 955 956 957 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 958 int offset, 959 Register value, 960 Register test_value_out, 961 Label& not_equal_continue) { 962 assert(ProfileInterpreter, "must be profiling interpreter"); 963 if (test_value_out == noreg) { 964 cmpptr(value, Address(mdp_in, offset)); 965 } else { 966 // Put the test value into a register, so caller can use it: 967 movptr(test_value_out, Address(mdp_in, offset)); 968 cmpptr(test_value_out, value); 969 } 970 jcc(Assembler::notEqual, not_equal_continue); 971 } 972 973 974 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) { 975 assert(ProfileInterpreter, "must be profiling interpreter"); 976 Address disp_address(mdp_in, offset_of_disp); 977 addptr(mdp_in,disp_address); 978 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 979 } 980 981 982 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) { 983 assert(ProfileInterpreter, "must be profiling interpreter"); 984 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); 985 addptr(mdp_in, disp_address); 986 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 987 } 988 989 990 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { 991 assert(ProfileInterpreter, "must be profiling interpreter"); 992 addptr(mdp_in, constant); 993 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 994 } 995 996 997 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 998 assert(ProfileInterpreter, "must be profiling interpreter"); 999 push(return_bci); // save/restore across call_VM 1000 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1001 pop(return_bci); 1002 } 1003 1004 1005 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) { 1006 if (ProfileInterpreter) { 1007 Label profile_continue; 1008 1009 // If no method data exists, go to profile_continue. 1010 // Otherwise, assign to mdp 1011 test_method_data_pointer(mdp, profile_continue); 1012 1013 // We are taking a branch. Increment the taken count. 1014 // We inline increment_mdp_data_at to return bumped_count in a register 1015 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1016 Address data(mdp, in_bytes(JumpData::taken_offset())); 1017 1018 // %%% 64bit treats these cells as 64 bit but they seem to be 32 bit 1019 movl(bumped_count,data); 1020 assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" ); 1021 addl(bumped_count, DataLayout::counter_increment); 1022 sbbl(bumped_count, 0); 1023 movl(data,bumped_count); // Store back out 1024 1025 // The method data pointer needs to be updated to reflect the new target. 1026 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1027 bind (profile_continue); 1028 } 1029 } 1030 1031 1032 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1033 if (ProfileInterpreter) { 1034 Label profile_continue; 1035 1036 // If no method data exists, go to profile_continue. 1037 test_method_data_pointer(mdp, profile_continue); 1038 1039 // We are taking a branch. Increment the not taken count. 1040 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1041 1042 // The method data pointer needs to be updated to correspond to the next bytecode 1043 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1044 bind (profile_continue); 1045 } 1046 } 1047 1048 void InterpreterMacroAssembler::profile_call(Register mdp) { 1049 if (ProfileInterpreter) { 1050 Label profile_continue; 1051 1052 // If no method data exists, go to profile_continue. 1053 test_method_data_pointer(mdp, profile_continue); 1054 1055 // We are making a call. Increment the count. 1056 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1057 1058 // The method data pointer needs to be updated to reflect the new target. 1059 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1060 bind (profile_continue); 1061 } 1062 } 1063 1064 1065 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1066 if (ProfileInterpreter) { 1067 Label profile_continue; 1068 1069 // If no method data exists, go to profile_continue. 1070 test_method_data_pointer(mdp, profile_continue); 1071 1072 // We are making a call. Increment the count. 1073 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1074 1075 // The method data pointer needs to be updated to reflect the new target. 1076 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1077 bind (profile_continue); 1078 } 1079 } 1080 1081 1082 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp, 1083 Register reg2, 1084 bool receiver_can_be_null) { 1085 if (ProfileInterpreter) { 1086 Label profile_continue; 1087 1088 // If no method data exists, go to profile_continue. 1089 test_method_data_pointer(mdp, profile_continue); 1090 1091 Label skip_receiver_profile; 1092 if (receiver_can_be_null) { 1093 Label not_null; 1094 testptr(receiver, receiver); 1095 jccb(Assembler::notZero, not_null); 1096 // We are making a call. Increment the count for null receiver. 1097 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1098 jmp(skip_receiver_profile); 1099 bind(not_null); 1100 } 1101 1102 // Record the receiver type. 1103 record_klass_in_profile(receiver, mdp, reg2, true); 1104 bind(skip_receiver_profile); 1105 1106 // The method data pointer needs to be updated to reflect the new target. 1107 update_mdp_by_constant(mdp, 1108 in_bytes(VirtualCallData:: 1109 virtual_call_data_size())); 1110 bind(profile_continue); 1111 } 1112 } 1113 1114 1115 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1116 Register receiver, Register mdp, 1117 Register reg2, int start_row, 1118 Label& done, bool is_virtual_call) { 1119 if (TypeProfileWidth == 0) { 1120 if (is_virtual_call) { 1121 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1122 } 1123 return; 1124 } 1125 1126 int last_row = VirtualCallData::row_limit() - 1; 1127 assert(start_row <= last_row, "must be work left to do"); 1128 // Test this row for both the receiver and for null. 1129 // Take any of three different outcomes: 1130 // 1. found receiver => increment count and goto done 1131 // 2. found null => keep looking for case 1, maybe allocate this cell 1132 // 3. found something else => keep looking for cases 1 and 2 1133 // Case 3 is handled by a recursive call. 1134 for (int row = start_row; row <= last_row; row++) { 1135 Label next_test; 1136 bool test_for_null_also = (row == start_row); 1137 1138 // See if the receiver is receiver[n]. 1139 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1140 test_mdp_data_at(mdp, recvr_offset, receiver, 1141 (test_for_null_also ? reg2 : noreg), 1142 next_test); 1143 // (Reg2 now contains the receiver from the CallData.) 1144 1145 // The receiver is receiver[n]. Increment count[n]. 1146 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1147 increment_mdp_data_at(mdp, count_offset); 1148 jmp(done); 1149 bind(next_test); 1150 1151 if (row == start_row) { 1152 Label found_null; 1153 // Failed the equality check on receiver[n]... Test for null. 1154 testptr(reg2, reg2); 1155 if (start_row == last_row) { 1156 // The only thing left to do is handle the null case. 1157 if (is_virtual_call) { 1158 jccb(Assembler::zero, found_null); 1159 // Receiver did not match any saved receiver and there is no empty row for it. 1160 // Increment total counter to indicate polymorphic case. 1161 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1162 jmp(done); 1163 bind(found_null); 1164 } else { 1165 jcc(Assembler::notZero, done); 1166 } 1167 break; 1168 } 1169 // Since null is rare, make it be the branch-taken case. 1170 jcc(Assembler::zero, found_null); 1171 1172 // Put all the "Case 3" tests here. 1173 record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call); 1174 1175 // Found a null. Keep searching for a matching receiver, 1176 // but remember that this is an empty (unused) slot. 1177 bind(found_null); 1178 } 1179 } 1180 1181 // In the fall-through case, we found no matching receiver, but we 1182 // observed the receiver[start_row] is NULL. 1183 1184 // Fill in the receiver field and increment the count. 1185 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1186 set_mdp_data_at(mdp, recvr_offset, receiver); 1187 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1188 movptr(reg2, (intptr_t)DataLayout::counter_increment); 1189 set_mdp_data_at(mdp, count_offset, reg2); 1190 if (start_row > 0) { 1191 jmp(done); 1192 } 1193 } 1194 1195 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1196 Register mdp, Register reg2, 1197 bool is_virtual_call) { 1198 assert(ProfileInterpreter, "must be profiling"); 1199 Label done; 1200 1201 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1202 1203 bind (done); 1204 } 1205 1206 void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) { 1207 if (ProfileInterpreter) { 1208 Label profile_continue; 1209 uint row; 1210 1211 // If no method data exists, go to profile_continue. 1212 test_method_data_pointer(mdp, profile_continue); 1213 1214 // Update the total ret count. 1215 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1216 1217 for (row = 0; row < RetData::row_limit(); row++) { 1218 Label next_test; 1219 1220 // See if return_bci is equal to bci[n]: 1221 test_mdp_data_at(mdp, in_bytes(RetData::bci_offset(row)), return_bci, 1222 noreg, next_test); 1223 1224 // return_bci is equal to bci[n]. Increment the count. 1225 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1226 1227 // The method data pointer needs to be updated to reflect the new target. 1228 update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row))); 1229 jmp(profile_continue); 1230 bind(next_test); 1231 } 1232 1233 update_mdp_for_ret(return_bci); 1234 1235 bind (profile_continue); 1236 } 1237 } 1238 1239 1240 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1241 if (ProfileInterpreter) { 1242 Label profile_continue; 1243 1244 // If no method data exists, go to profile_continue. 1245 test_method_data_pointer(mdp, profile_continue); 1246 1247 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1248 1249 // The method data pointer needs to be updated. 1250 int mdp_delta = in_bytes(BitData::bit_data_size()); 1251 if (TypeProfileCasts) { 1252 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1253 } 1254 update_mdp_by_constant(mdp, mdp_delta); 1255 1256 bind (profile_continue); 1257 } 1258 } 1259 1260 1261 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1262 if (ProfileInterpreter && TypeProfileCasts) { 1263 Label profile_continue; 1264 1265 // If no method data exists, go to profile_continue. 1266 test_method_data_pointer(mdp, profile_continue); 1267 1268 int count_offset = in_bytes(CounterData::count_offset()); 1269 // Back up the address, since we have already bumped the mdp. 1270 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1271 1272 // *Decrement* the counter. We expect to see zero or small negatives. 1273 increment_mdp_data_at(mdp, count_offset, true); 1274 1275 bind (profile_continue); 1276 } 1277 } 1278 1279 1280 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) 1281 { 1282 if (ProfileInterpreter) { 1283 Label profile_continue; 1284 1285 // If no method data exists, go to profile_continue. 1286 test_method_data_pointer(mdp, profile_continue); 1287 1288 // The method data pointer needs to be updated. 1289 int mdp_delta = in_bytes(BitData::bit_data_size()); 1290 if (TypeProfileCasts) { 1291 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1292 1293 // Record the object type. 1294 record_klass_in_profile(klass, mdp, reg2, false); 1295 assert(reg2 == rdi, "we know how to fix this blown reg"); 1296 restore_locals(); // Restore EDI 1297 } 1298 update_mdp_by_constant(mdp, mdp_delta); 1299 1300 bind(profile_continue); 1301 } 1302 } 1303 1304 1305 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1306 if (ProfileInterpreter) { 1307 Label profile_continue; 1308 1309 // If no method data exists, go to profile_continue. 1310 test_method_data_pointer(mdp, profile_continue); 1311 1312 // Update the default case count 1313 increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset())); 1314 1315 // The method data pointer needs to be updated. 1316 update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset())); 1317 1318 bind (profile_continue); 1319 } 1320 } 1321 1322 1323 void InterpreterMacroAssembler::profile_switch_case(Register index, Register mdp, Register reg2) { 1324 if (ProfileInterpreter) { 1325 Label profile_continue; 1326 1327 // If no method data exists, go to profile_continue. 1328 test_method_data_pointer(mdp, profile_continue); 1329 1330 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1331 movptr(reg2, (intptr_t)in_bytes(MultiBranchData::per_case_size())); 1332 // index is positive and so should have correct value if this code were 1333 // used on 64bits 1334 imulptr(index, reg2); 1335 addptr(index, in_bytes(MultiBranchData::case_array_offset())); 1336 1337 // Update the case count 1338 increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset())); 1339 1340 // The method data pointer needs to be updated. 1341 update_mdp_by_offset(mdp, index, in_bytes(MultiBranchData::relative_displacement_offset())); 1342 1343 bind (profile_continue); 1344 } 1345 } 1346 1347 #endif // !CC_INTERP 1348 1349 1350 1351 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 1352 if (state == atos) MacroAssembler::verify_oop(reg); 1353 } 1354 1355 1356 #ifndef CC_INTERP 1357 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 1358 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 1359 } 1360 1361 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1362 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1363 int increment, Address mask, 1364 Register scratch, bool preloaded, 1365 Condition cond, Label* where) { 1366 if (!preloaded) { 1367 movl(scratch, counter_addr); 1368 } 1369 incrementl(scratch, increment); 1370 movl(counter_addr, scratch); 1371 andl(scratch, mask); 1372 jcc(cond, *where); 1373 } 1374 #endif /* CC_INTERP */ 1375 1376 1377 void InterpreterMacroAssembler::notify_method_entry() { 1378 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1379 // track stack depth. If it is possible to enter interp_only_mode we add 1380 // the code to check if the event should be sent. 1381 if (JvmtiExport::can_post_interpreter_events()) { 1382 Label L; 1383 get_thread(rcx); 1384 movl(rcx, Address(rcx, JavaThread::interp_only_mode_offset())); 1385 testl(rcx,rcx); 1386 jcc(Assembler::zero, L); 1387 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 1388 bind(L); 1389 } 1390 1391 { 1392 SkipIfEqual skip_if(this, &DTraceMethodProbes, 0); 1393 get_thread(rcx); 1394 get_method(rbx); 1395 call_VM_leaf( 1396 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), rcx, rbx); 1397 } 1398 1399 // RedefineClasses() tracing support for obsolete method entry 1400 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 1401 get_thread(rcx); 1402 get_method(rbx); 1403 call_VM_leaf( 1404 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1405 rcx, rbx); 1406 } 1407 } 1408 1409 1410 void InterpreterMacroAssembler::notify_method_exit( 1411 TosState state, NotifyMethodExitMode mode) { 1412 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1413 // track stack depth. If it is possible to enter interp_only_mode we add 1414 // the code to check if the event should be sent. 1415 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 1416 Label L; 1417 // Note: frame::interpreter_frame_result has a dependency on how the 1418 // method result is saved across the call to post_method_exit. If this 1419 // is changed then the interpreter_frame_result implementation will 1420 // need to be updated too. 1421 1422 // For c++ interpreter the result is always stored at a known location in the frame 1423 // template interpreter will leave it on the top of the stack. 1424 NOT_CC_INTERP(push(state);) 1425 get_thread(rcx); 1426 movl(rcx, Address(rcx, JavaThread::interp_only_mode_offset())); 1427 testl(rcx,rcx); 1428 jcc(Assembler::zero, L); 1429 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 1430 bind(L); 1431 NOT_CC_INTERP(pop(state);) 1432 } 1433 1434 { 1435 SkipIfEqual skip_if(this, &DTraceMethodProbes, 0); 1436 NOT_CC_INTERP(push(state)); 1437 get_thread(rbx); 1438 get_method(rcx); 1439 call_VM_leaf( 1440 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 1441 rbx, rcx); 1442 NOT_CC_INTERP(pop(state)); 1443 } 1444 } | 30 #include "oops/markOop.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/method.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "prims/jvmtiRedefineClassesTrace.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/thread.inline.hpp" 40 41 42 // Implementation of InterpreterMacroAssembler 43 #ifdef CC_INTERP 44 void InterpreterMacroAssembler::get_method(Register reg) { 45 movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); 46 movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); 47 } 48 #endif // CC_INTERP 49 50 #ifndef CC_INTERP 51 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 52 int number_of_arguments) { 53 // interpreter specific 54 // 55 // Note: No need to save/restore bcp & locals (r13 & r14) pointer 56 // since these are callee saved registers and no blocking/ 57 // GC can happen in leaf calls. 58 // Further Note: DO NOT save/restore bcp/locals. If a caller has 59 // already saved them so that it can use esi/edi as temporaries 60 // then a save/restore here will DESTROY the copy the caller 61 // saved! There used to be a save_bcp() that only happened in 62 // the ASSERT path (no restore_bcp). Which caused bizarre failures 63 // when jvm built with ASSERTs. 64 #ifdef ASSERT 65 { 66 Label L; 67 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 68 jcc(Assembler::equal, L); 69 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 70 " last_sp != NULL"); 71 bind(L); 72 } 73 #endif 74 // super call 75 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 76 // interpreter specific 77 // Used to ASSERT that r13/r14 were equal to frame's bcp/locals 78 // but since they may not have been saved (and we don't want to 79 // save them here (see note above) the assert is invalid. 80 } 81 82 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 83 Register java_thread, 84 Register last_java_sp, 85 address entry_point, 86 int number_of_arguments, 87 bool check_exceptions) { 88 // interpreter specific 89 // 90 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 91 // really make a difference for these runtime calls, since they are 92 // slow anyway. Btw., bcp must be saved/restored since it may change 93 // due to GC. 94 NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");) 95 save_bcp(); 96 #ifdef ASSERT 97 { 98 Label L; 99 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 100 jcc(Assembler::equal, L); 101 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 102 " last_sp != NULL"); 103 bind(L); 104 } 105 #endif /* ASSERT */ 106 // super call 107 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 108 entry_point, number_of_arguments, 109 check_exceptions); 110 // interpreter specific 111 restore_bcp(); 112 restore_locals(); 113 } 114 115 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 116 if (JvmtiExport::can_pop_frame()) { 117 Label L; 118 // Initiate popframe handling only if it is not already being 119 // processed. If the flag has the popframe_processing bit set, it 120 // means that this code is called *during* popframe handling - we 121 // don't want to reenter. 122 // This method is only called just after the call into the vm in 123 // call_VM_base, so the arg registers are available. 124 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit 125 LP64_ONLY(c_rarg0); 126 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset())); 127 testl(pop_cond, JavaThread::popframe_pending_bit); 128 jcc(Assembler::zero, L); 129 testl(pop_cond, JavaThread::popframe_processing_bit); 130 jcc(Assembler::notZero, L); 131 // Call Interpreter::remove_activation_preserving_args_entry() to get the 132 // address of the same-named entrypoint in the generated interpreter code. 133 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 134 jmp(rax); 135 bind(L); 136 NOT_LP64(get_thread(java_thread);) 137 } 138 } 139 140 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 141 Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 142 NOT_LP64(get_thread(thread);) 143 movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 144 const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); 145 const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); 146 const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); 147 #ifdef _LP64 148 switch (state) { 149 case atos: movptr(rax, oop_addr); 150 movptr(oop_addr, (int32_t)NULL_WORD); 151 verify_oop(rax, state); break; 152 case ltos: movptr(rax, val_addr); break; 153 case btos: // fall through 154 case ctos: // fall through 155 case stos: // fall through 156 case itos: movl(rax, val_addr); break; 157 case ftos: movflt(xmm0, val_addr); break; 158 case dtos: movdbl(xmm0, val_addr); break; 159 case vtos: /* nothing to do */ break; 160 default : ShouldNotReachHere(); 161 } 162 // Clean up tos value in the thread object 163 movl(tos_addr, (int) ilgl); 164 movl(val_addr, (int32_t) NULL_WORD); 165 #else 166 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() 167 + in_ByteSize(wordSize)); 168 switch (state) { 169 case atos: movptr(rax, oop_addr); 170 movptr(oop_addr, NULL_WORD); 171 verify_oop(rax, state); break; 172 case ltos: 173 movl(rdx, val_addr1); // fall through 174 case btos: // fall through 175 case ctos: // fall through 176 case stos: // fall through 177 case itos: movl(rax, val_addr); break; 178 case ftos: fld_s(val_addr); break; 179 case dtos: fld_d(val_addr); break; 180 case vtos: /* nothing to do */ break; 181 default : ShouldNotReachHere(); 182 } 183 #endif // _LP64 184 // Clean up tos value in the thread object 185 movl(tos_addr, (int32_t) ilgl); 186 movptr(val_addr, NULL_WORD); 187 NOT_LP64(movptr(val_addr1, NULL_WORD);) 188 } 189 190 191 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 192 if (JvmtiExport::can_force_early_return()) { 193 Label L; 194 Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread); 195 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread); 196 197 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 198 testptr(tmp, tmp); 199 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; 200 201 // Initiate earlyret handling only if it is not already being processed. 202 // If the flag has the earlyret_processing bit set, it means that this code 203 // is called *during* earlyret handling - we don't want to reenter. 204 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset())); 205 cmpl(tmp, JvmtiThreadState::earlyret_pending); 206 jcc(Assembler::notEqual, L); 207 208 // Call Interpreter::remove_activation_early_entry() to get the address of the 209 // same-named entrypoint in the generated interpreter code. 210 NOT_LP64(get_thread(java_thread);) 211 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 212 #ifdef _LP64 213 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 214 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp); 215 #else 216 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 217 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); 218 #endif // _LP64 219 jmp(rax); 220 bind(L); 221 NOT_LP64(get_thread(java_thread);) 222 } 223 } 224 225 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 226 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 227 load_unsigned_short(reg, Address(_bcp_register, bcp_offset)); 228 bswapl(reg); 229 shrl(reg, 16); 230 } 231 232 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 233 int bcp_offset, 234 size_t index_size) { 235 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 236 if (index_size == sizeof(u2)) { 237 load_unsigned_short(index, Address(_bcp_register, bcp_offset)); 238 } else if (index_size == sizeof(u4)) { 239 movl(index, Address(_bcp_register, bcp_offset)); 240 // Check if the secondary index definition is still ~x, otherwise 241 // we have to change the following assembler code to calculate the 242 // plain index. 243 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 244 notl(index); // convert to plain index 245 } else if (index_size == sizeof(u1)) { 246 load_unsigned_byte(index, Address(_bcp_register, bcp_offset)); 247 } else { 248 ShouldNotReachHere(); 249 } 250 } 251 252 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 253 Register index, 254 int bcp_offset, 255 size_t index_size) { 256 assert_different_registers(cache, index); 257 get_cache_index_at_bcp(index, bcp_offset, index_size); 258 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 259 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 260 // convert from field index to ConstantPoolCacheEntry index 261 assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line"); 262 shll(index, 2); 263 } 264 265 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 266 Register index, 267 Register bytecode, 268 int byte_no, 269 int bcp_offset, 270 size_t index_size) { 271 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 272 // We use a 32-bit load here since the layout of 64-bit words on 273 // little-endian machines allow us that. 274 movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 275 const int shift_count = (1 + byte_no) * BitsPerByte; 276 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 277 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 278 "correct shift count"); 279 shrl(bytecode, shift_count); 280 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 281 andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); 282 } 283 284 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 285 Register tmp, 286 int bcp_offset, 287 size_t index_size) { 288 assert(cache != tmp, "must use different register"); 289 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 290 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 291 // convert from field index to ConstantPoolCacheEntry index 292 // and from word offset to byte offset 293 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 294 shll(tmp, 2 + LogBytesPerWord); 295 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 296 // skip past the header 297 addptr(cache, in_bytes(ConstantPoolCache::base_offset())); 298 addptr(cache, tmp); // construct pointer to cache entry 299 } 300 301 // Load object from cpool->resolved_references(index) 302 void InterpreterMacroAssembler::load_resolved_reference_at_index( 303 Register result, Register index) { 304 assert_different_registers(result, index); 305 // convert from field index to resolved_references() index and from 306 // word index to byte offset. Since this is a java object, it can be compressed 307 Register tmp = index; // reuse 308 shll(tmp, LogBytesPerHeapOop); 309 310 get_constant_pool(result); 311 // load pointer for resolved_references[] objArray 312 movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes())); 313 // JNIHandles::resolve(obj); 314 movptr(result, Address(result, 0)); 315 // Add in the index 316 addptr(result, tmp); 317 load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 318 } 319 320 321 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 322 // subtype of super_klass. 323 // 324 // Args: 325 // rax: superklass 326 // Rsub_klass: subklass 327 // 328 // Kills: 329 // rcx, rdi 330 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 331 Label& ok_is_subtype) { 332 assert(Rsub_klass != rax, "rax holds superklass"); 333 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");) 334 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");) 335 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); 336 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); 337 338 // Profile the not-null value's klass. 339 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi 340 341 // Do the check. 342 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx 343 344 // Profile the failure of the check. 345 profile_typecheck_failed(rcx); // blows rcx 346 } 347 348 349 #ifndef _LP64 350 void InterpreterMacroAssembler::f2ieee() { 351 if (IEEEPrecision) { 352 fstp_s(Address(rsp, 0)); 353 fld_s(Address(rsp, 0)); 354 } 355 } 356 357 358 void InterpreterMacroAssembler::d2ieee() { 359 if (IEEEPrecision) { 360 fstp_d(Address(rsp, 0)); 361 fld_d(Address(rsp, 0)); 362 } 363 } 364 #endif // _LP64 365 366 // Java Expression Stack 367 368 void InterpreterMacroAssembler::pop_ptr(Register r) { 369 pop(r); 370 } 371 372 void InterpreterMacroAssembler::push_ptr(Register r) { 373 push(r); 374 } 375 376 void InterpreterMacroAssembler::push_i(Register r) { 377 push(r); 378 } 379 380 #ifdef _LP64 381 void InterpreterMacroAssembler::pop_i(Register r) { 382 // XXX can't use pop currently, upper half non clean 383 movl(r, Address(rsp, 0)); 384 addptr(rsp, wordSize); 385 } 386 387 void InterpreterMacroAssembler::pop_l(Register r) { 388 movq(r, Address(rsp, 0)); 389 addptr(rsp, 2 * Interpreter::stackElementSize); 390 } 391 392 void InterpreterMacroAssembler::pop_f(XMMRegister r) { 393 movflt(r, Address(rsp, 0)); 394 addptr(rsp, wordSize); 395 } 396 397 void InterpreterMacroAssembler::pop_d(XMMRegister r) { 398 movdbl(r, Address(rsp, 0)); 399 addptr(rsp, 2 * Interpreter::stackElementSize); 400 } 401 402 void InterpreterMacroAssembler::push_l(Register r) { 403 subptr(rsp, 2 * wordSize); 404 movq(Address(rsp, 0), r); 405 } 406 407 void InterpreterMacroAssembler::push_f(XMMRegister r) { 408 subptr(rsp, wordSize); 409 movflt(Address(rsp, 0), r); 410 } 411 412 void InterpreterMacroAssembler::push_d(XMMRegister r) { 413 subptr(rsp, 2 * wordSize); 414 movdbl(Address(rsp, 0), r); 415 } 416 417 void InterpreterMacroAssembler::pop(TosState state) { 418 switch (state) { 419 case atos: pop_ptr(); break; 420 case btos: 421 case ctos: 422 case stos: 423 case itos: pop_i(); break; 424 case ltos: pop_l(); break; 425 case ftos: pop_f(); break; 426 case dtos: pop_d(); break; 427 case vtos: /* nothing to do */ break; 428 default: ShouldNotReachHere(); 429 } 430 verify_oop(rax, state); 431 } 432 433 void InterpreterMacroAssembler::push(TosState state) { 434 verify_oop(rax, state); 435 switch (state) { 436 case atos: push_ptr(); break; 437 case btos: 438 case ctos: 439 case stos: 440 case itos: push_i(); break; 441 case ltos: push_l(); break; 442 case ftos: push_f(); break; 443 case dtos: push_d(); break; 444 case vtos: /* nothing to do */ break; 445 default : ShouldNotReachHere(); 446 } 447 } 448 #else 449 void InterpreterMacroAssembler::pop_i(Register r) { 450 pop(r); 451 } 452 453 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 454 pop(lo); 455 pop(hi); 456 } 457 458 void InterpreterMacroAssembler::pop_f() { 459 fld_s(Address(rsp, 0)); 460 addptr(rsp, 1 * wordSize); 461 } 462 463 void InterpreterMacroAssembler::pop_d() { 464 fld_d(Address(rsp, 0)); 465 addptr(rsp, 2 * wordSize); 466 } 467 468 469 void InterpreterMacroAssembler::pop(TosState state) { 470 switch (state) { 471 case atos: pop_ptr(rax); break; 472 case btos: // fall through 473 case ctos: // fall through 474 case stos: // fall through 475 case itos: pop_i(rax); break; 476 case ltos: pop_l(rax, rdx); break; 477 case ftos: pop_f(); break; 478 case dtos: pop_d(); break; 479 case vtos: /* nothing to do */ break; 480 default : ShouldNotReachHere(); 481 } 482 verify_oop(rax, state); 483 } 484 485 486 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 487 push(hi); 488 push(lo); 489 } 490 491 void InterpreterMacroAssembler::push_f() { 492 // Do not schedule for no AGI! Never write beyond rsp! 493 subptr(rsp, 1 * wordSize); 494 fstp_s(Address(rsp, 0)); 495 } 496 497 void InterpreterMacroAssembler::push_d(Register r) { 498 // Do not schedule for no AGI! Never write beyond rsp! 499 subptr(rsp, 2 * wordSize); 500 fstp_d(Address(rsp, 0)); 501 } 502 503 504 void InterpreterMacroAssembler::push(TosState state) { 505 verify_oop(rax, state); 506 switch (state) { 507 case atos: push_ptr(rax); break; 508 case btos: // fall through 509 case ctos: // fall through 510 case stos: // fall through 511 case itos: push_i(rax); break; 512 case ltos: push_l(rax, rdx); break; 513 case ftos: push_f(); break; 514 case dtos: push_d(rax); break; 515 case vtos: /* nothing to do */ break; 516 default : ShouldNotReachHere(); 517 } 518 } 519 #endif // _LP64 520 521 522 // Helpers for swap and dup 523 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 524 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); 525 } 526 527 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 528 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); 529 } 530 531 532 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 533 // set sender sp 534 lea(_bcp_register, Address(rsp, wordSize)); 535 // record last_sp 536 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register); 537 } 538 539 540 // Jump to from_interpreted entry of a call unless single stepping is possible 541 // in this thread in which case we must call the i2i entry 542 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 543 prepare_to_jump_from_interpreted(); 544 545 if (JvmtiExport::can_post_interpreter_events()) { 546 Label run_compiled_code; 547 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 548 // compiled code in threads for which the event is enabled. Check here for 549 // interp_only_mode if these events CAN be enabled. 550 // interp_only is an int, on little endian it is sufficient to test the byte only 551 // Is a cmpl faster? 552 LP64_ONLY(temp = r15_thread;) 553 NOT_LP64(get_thread(temp);) 554 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 555 jccb(Assembler::zero, run_compiled_code); 556 jmp(Address(method, Method::interpreter_entry_offset())); 557 bind(run_compiled_code); 558 } 559 560 jmp(Address(method, Method::from_interpreted_offset())); 561 } 562 563 // The following two routines provide a hook so that an implementation 564 // can schedule the dispatch in two parts. x86 does not do this. 565 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 566 // Nothing x86 specific to be done here 567 } 568 569 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 570 dispatch_next(state, step); 571 } 572 573 void InterpreterMacroAssembler::dispatch_base(TosState state, 574 address* table, 575 bool verifyoop) { 576 verify_FPU(1, state); 577 if (VerifyActivationFrameSize) { 578 Label L; 579 mov(rcx, rbp); 580 subptr(rcx, rsp); 581 int32_t min_frame_size = 582 (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * 583 wordSize; 584 cmpptr(rcx, (int32_t)min_frame_size); 585 jcc(Assembler::greaterEqual, L); 586 stop("broken stack frame"); 587 bind(L); 588 } 589 if (verifyoop) { 590 verify_oop(rax, state); 591 } 592 #ifdef _LP64 593 lea(rscratch1, ExternalAddress((address)table)); 594 jmp(Address(rscratch1, rbx, Address::times_8)); 595 #else 596 Address index(noreg, rbx, Address::times_ptr); 597 ExternalAddress tbl((address)table); 598 ArrayAddress dispatch(tbl, index); 599 jump(dispatch); 600 #endif // _LP64 601 } 602 603 void InterpreterMacroAssembler::dispatch_only(TosState state) { 604 dispatch_base(state, Interpreter::dispatch_table(state)); 605 } 606 607 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 608 dispatch_base(state, Interpreter::normal_table(state)); 609 } 610 611 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 612 dispatch_base(state, Interpreter::normal_table(state), false); 613 } 614 615 616 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { 617 // load next bytecode (load before advancing _bcp_register to prevent AGI) 618 load_unsigned_byte(rbx, Address(_bcp_register, step)); 619 // advance _bcp_register 620 increment(_bcp_register, step); 621 dispatch_base(state, Interpreter::dispatch_table(state)); 622 } 623 624 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 625 // load current bytecode 626 load_unsigned_byte(rbx, Address(_bcp_register, 0)); 627 dispatch_base(state, table); 628 } 629 630 // remove activation 631 // 632 // Unlock the receiver if this is a synchronized method. 633 // Unlock any Java monitors from syncronized blocks. 634 // Remove the activation from the stack. 635 // 636 // If there are locked Java monitors 637 // If throw_monitor_exception 638 // throws IllegalMonitorStateException 639 // Else if install_monitor_exception 640 // installs IllegalMonitorStateException 641 // Else 642 // no error processing 643 void InterpreterMacroAssembler::remove_activation( 644 TosState state, 645 Register ret_addr, 646 bool throw_monitor_exception, 647 bool install_monitor_exception, 648 bool notify_jvmdi) { 649 // Note: Registers rdx xmm0 may be in use for the 650 // result check if synchronized method 651 Label unlocked, unlock, no_unlock; 652 653 const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 654 const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx); 655 const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx); 656 // monitor pointers need different register 657 // because rdx may have the result in it 658 NOT_LP64(get_thread(rcx);) 659 660 // get the value of _do_not_unlock_if_synchronized into rdx 661 const Address do_not_unlock_if_synchronized(rthread, 662 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 663 movbool(rbx, do_not_unlock_if_synchronized); 664 movbool(do_not_unlock_if_synchronized, false); // reset the flag 665 666 // get method access flags 667 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 668 movl(rcx, Address(rcx, Method::access_flags_offset())); 669 testl(rcx, JVM_ACC_SYNCHRONIZED); 670 jcc(Assembler::zero, unlocked); 671 672 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 673 // is set. 674 testbool(rbx); 675 jcc(Assembler::notZero, no_unlock); 676 677 // unlock monitor 678 push(state); // save result 679 680 // BasicObjectLock will be first in list, since this is a 681 // synchronized method. However, need to check that the object has 682 // not been unlocked by an explicit monitorexit bytecode. 683 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * 684 wordSize - (int) sizeof(BasicObjectLock)); 685 // We use c_rarg1/rdx so that if we go slow path it will be the correct 686 // register for unlock_object to pass to VM directly 687 lea(robj, monitor); // address of first monitor 688 689 movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes())); 690 testptr(rax, rax); 691 jcc(Assembler::notZero, unlock); 692 693 pop(state); 694 if (throw_monitor_exception) { 695 // Entry already unlocked, need to throw exception 696 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow 697 call_VM(noreg, CAST_FROM_FN_PTR(address, 698 InterpreterRuntime::throw_illegal_monitor_state_exception)); 699 should_not_reach_here(); 700 } else { 701 // Monitor already unlocked during a stack unroll. If requested, 702 // install an illegal_monitor_state_exception. Continue with 703 // stack unrolling. 704 if (install_monitor_exception) { 705 NOT_LP64(empty_FPU_stack();) 706 call_VM(noreg, CAST_FROM_FN_PTR(address, 707 InterpreterRuntime::new_illegal_monitor_state_exception)); 708 } 709 jmp(unlocked); 710 } 711 712 bind(unlock); 713 unlock_object(robj); 714 pop(state); 715 716 // Check that for block-structured locking (i.e., that all locked 717 // objects has been unlocked) 718 bind(unlocked); 719 720 // rax, rdx: Might contain return value 721 722 // Check that all monitors are unlocked 723 { 724 Label loop, exception, entry, restart; 725 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 726 const Address monitor_block_top( 727 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 728 const Address monitor_block_bot( 729 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 730 731 bind(restart); 732 // We use c_rarg1 so that if we go slow path it will be the correct 733 // register for unlock_object to pass to VM directly 734 movptr(rmon, monitor_block_top); // points to current entry, starting 735 // with top-most entry 736 lea(rbx, monitor_block_bot); // points to word before bottom of 737 // monitor block 738 jmp(entry); 739 740 // Entry already locked, need to throw exception 741 bind(exception); 742 743 if (throw_monitor_exception) { 744 // Throw exception 745 NOT_LP64(empty_FPU_stack();) 746 MacroAssembler::call_VM(noreg, 747 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 748 throw_illegal_monitor_state_exception)); 749 should_not_reach_here(); 750 } else { 751 // Stack unrolling. Unlock object and install illegal_monitor_exception. 752 // Unlock does not block, so don't have to worry about the frame. 753 // We don't have to preserve c_rarg1 since we are going to throw an exception. 754 755 push(state); 756 mov(robj, rmon); // nop if robj and rmon are the same 757 unlock_object(robj); 758 pop(state); 759 760 if (install_monitor_exception) { 761 NOT_LP64(empty_FPU_stack();) 762 call_VM(noreg, CAST_FROM_FN_PTR(address, 763 InterpreterRuntime:: 764 new_illegal_monitor_state_exception)); 765 } 766 767 jmp(restart); 768 } 769 770 bind(loop); 771 // check if current entry is used 772 cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); 773 jcc(Assembler::notEqual, exception); 774 775 addptr(rmon, entry_size); // otherwise advance to next entry 776 bind(entry); 777 cmpptr(rmon, rbx); // check if bottom reached 778 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry 779 } 780 781 bind(no_unlock); 782 783 // jvmti support 784 if (notify_jvmdi) { 785 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 786 } else { 787 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 788 } 789 790 // remove activation 791 // get sender sp 792 movptr(rbx, 793 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 794 leave(); // remove frame anchor 795 pop(ret_addr); // get return address 796 mov(rsp, rbx); // set sp to sender sp 797 #ifndef _LP64 798 if (UseSSE) { 799 // float and double are returned in xmm register in SSE-mode 800 if (state == ftos && UseSSE >= 1) { 801 subptr(rsp, wordSize); 802 fstp_s(Address(rsp, 0)); 803 movflt(xmm0, Address(rsp, 0)); 804 addptr(rsp, wordSize); 805 } else if (state == dtos && UseSSE >= 2) { 806 subptr(rsp, 2*wordSize); 807 fstp_d(Address(rsp, 0)); 808 movdbl(xmm0, Address(rsp, 0)); 809 addptr(rsp, 2*wordSize); 810 } 811 } 812 #endif // _LP64 813 } 814 #endif /* !CC_INTERP */ 815 816 void InterpreterMacroAssembler::get_method_counters(Register method, 817 Register mcs, Label& skip) { 818 Label has_counters; 819 movptr(mcs, Address(method, Method::method_counters_offset())); 820 testptr(mcs, mcs); 821 jcc(Assembler::notZero, has_counters); 822 call_VM(noreg, CAST_FROM_FN_PTR(address, 823 InterpreterRuntime::build_method_counters), method); 824 movptr(mcs, Address(method,Method::method_counters_offset())); 825 testptr(mcs, mcs); 826 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory 827 bind(has_counters); 828 } 829 830 831 // Lock object 832 // 833 // Args: 834 // rdx, c_rarg1: BasicObjectLock to be used for locking 835 // 836 // Kills: 837 // rax 838 // rscratch1 (scratch regs) 839 void InterpreterMacroAssembler::lock_object(Register lock_reg) { 840 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 841 "The argument is only for looks. It must be c_rarg1"); 842 843 if (UseHeavyMonitors) { 844 call_VM(noreg, 845 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 846 lock_reg); 847 } else { 848 Label done; 849 850 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 851 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 852 853 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 854 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 855 const int mark_offset = lock_offset + 856 BasicLock::displaced_header_offset_in_bytes(); 857 858 Label slow_case; 859 860 // Load object pointer into obj_reg 861 movptr(obj_reg, Address(lock_reg, obj_offset)); 862 863 if (UseBiasedLocking) { 864 biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case); 865 } 866 867 // Load immediate 1 into swap_reg %rax 868 movl(swap_reg, (int32_t)1); 869 870 // Load (object->mark() | 1) into swap_reg %rax 871 orptr(swap_reg, Address(obj_reg, 0)); 872 873 // Save (object->mark() | 1) into BasicLock's displaced header 874 movptr(Address(lock_reg, mark_offset), swap_reg); 875 876 assert(lock_offset == 0, 877 "displached header must be first word in BasicObjectLock"); 878 879 if (os::is_MP()) lock(); 880 cmpxchgptr(lock_reg, Address(obj_reg, 0)); 881 if (PrintBiasedLockingStatistics) { 882 cond_inc32(Assembler::zero, 883 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 884 } 885 jcc(Assembler::zero, done); 886 887 const int zero_bits = LP64_ONLY(7) NOT_LP64(3); 888 889 // Test if the oopMark is an obvious stack pointer, i.e., 890 // 1) (mark & zero_bits) == 0, and 891 // 2) rsp <= mark < mark + os::pagesize() 892 // 893 // These 3 tests can be done by evaluating the following 894 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())), 895 // assuming both stack pointer and pagesize have their 896 // least significant bits clear. 897 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg 898 subptr(swap_reg, rsp); 899 andptr(swap_reg, zero_bits - os::vm_page_size()); 900 901 // Save the test result, for recursive case, the result is zero 902 movptr(Address(lock_reg, mark_offset), swap_reg); 903 904 if (PrintBiasedLockingStatistics) { 905 cond_inc32(Assembler::zero, 906 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 907 } 908 jcc(Assembler::zero, done); 909 910 bind(slow_case); 911 912 // Call the runtime routine for slow case 913 call_VM(noreg, 914 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 915 lock_reg); 916 917 bind(done); 918 } 919 } 920 921 922 // Unlocks an object. Used in monitorexit bytecode and 923 // remove_activation. Throws an IllegalMonitorException if object is 924 // not locked by current thread. 925 // 926 // Args: 927 // rdx, c_rarg1: BasicObjectLock for lock 928 // 929 // Kills: 930 // rax 931 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 932 // rscratch1, rscratch2 (scratch regs) 933 // rax, rbx, rcx, rdx 934 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 935 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 936 "The argument is only for looks. It must be c_rarg1"); 937 938 if (UseHeavyMonitors) { 939 call_VM(noreg, 940 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 941 lock_reg); 942 } else { 943 Label done; 944 945 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 946 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark 947 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 948 949 save_bcp(); // Save in case of exception 950 951 // Convert from BasicObjectLock structure to object and BasicLock 952 // structure Store the BasicLock address into %rax 953 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 954 955 // Load oop into obj_reg(%c_rarg3) 956 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 957 958 // Free entry 959 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); 960 961 if (UseBiasedLocking) { 962 biased_locking_exit(obj_reg, header_reg, done); 963 } 964 965 // Load the old header from BasicLock structure 966 movptr(header_reg, Address(swap_reg, 967 BasicLock::displaced_header_offset_in_bytes())); 968 969 // Test for recursion 970 testptr(header_reg, header_reg); 971 972 // zero for recursive case 973 jcc(Assembler::zero, done); 974 975 // Atomic swap back the old header 976 if (os::is_MP()) lock(); 977 cmpxchgptr(header_reg, Address(obj_reg, 0)); 978 979 // zero for recursive case 980 jcc(Assembler::zero, done); 981 982 // Call the runtime routine for slow case. 983 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), 984 obj_reg); // restore obj 985 call_VM(noreg, 986 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 987 lock_reg); 988 989 bind(done); 990 991 restore_bcp(); 992 } 993 } 994 #ifndef CC_INTERP 995 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 996 Label& zero_continue) { 997 assert(ProfileInterpreter, "must be profiling interpreter"); 998 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize)); 999 testptr(mdp, mdp); 1000 jcc(Assembler::zero, zero_continue); 1001 } 1002 1003 1004 // Set the method data pointer for the current bcp. 1005 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1006 assert(ProfileInterpreter, "must be profiling interpreter"); 1007 Label set_mdp; 1008 push(rax); 1009 push(rbx); 1010 1011 get_method(rbx); 1012 // Test MDO to avoid the call if it is NULL. 1013 movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); 1014 testptr(rax, rax); 1015 jcc(Assembler::zero, set_mdp); 1016 // rbx: method 1017 // _bcp_register: bcp 1018 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register); 1019 // rax: mdi 1020 // mdo is guaranteed to be non-zero here, we checked for it before the call. 1021 movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset()))); 1022 addptr(rbx, in_bytes(MethodData::data_offset())); 1023 addptr(rax, rbx); 1024 bind(set_mdp); 1025 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax); 1026 pop(rbx); 1027 pop(rax); 1028 } 1029 1030 void InterpreterMacroAssembler::verify_method_data_pointer() { 1031 assert(ProfileInterpreter, "must be profiling interpreter"); 1032 #ifdef ASSERT 1033 Label verify_continue; 1034 push(rax); 1035 push(rbx); 1036 Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); 1037 Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx); 1038 push(arg3_reg); 1039 push(arg2_reg); 1040 test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue 1041 get_method(rbx); 1042 1043 // If the mdp is valid, it will point to a DataLayout header which is 1044 // consistent with the bcp. The converse is highly probable also. 1045 load_unsigned_short(arg2_reg, 1046 Address(arg3_reg, in_bytes(DataLayout::bci_offset()))); 1047 addptr(arg2_reg, Address(rbx, Method::const_offset())); 1048 lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset())); 1049 cmpptr(arg2_reg, _bcp_register); 1050 jcc(Assembler::equal, verify_continue); 1051 // rbx: method 1052 // _bcp_register: bcp 1053 // c_rarg3: mdp 1054 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 1055 rbx, _bcp_register, arg3_reg); 1056 bind(verify_continue); 1057 pop(arg2_reg); 1058 pop(arg3_reg); 1059 pop(rbx); 1060 pop(rax); 1061 #endif // ASSERT 1062 } 1063 1064 1065 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 1066 int constant, 1067 Register value) { 1068 assert(ProfileInterpreter, "must be profiling interpreter"); 1069 Address data(mdp_in, constant); 1070 movptr(data, value); 1071 } 1072 1073 1074 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1075 int constant, 1076 bool decrement) { 1077 // Counter address 1078 Address data(mdp_in, constant); 1079 1080 increment_mdp_data_at(data, decrement); 1081 } 1082 1083 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 1084 bool decrement) { 1085 assert(ProfileInterpreter, "must be profiling interpreter"); 1086 // %%% this does 64bit counters at best it is wasting space 1087 // at worst it is a rare bug when counters overflow 1088 1089 if (decrement) { 1090 // Decrement the register. Set condition codes. 1091 addptr(data, (int32_t) -DataLayout::counter_increment); 1092 // If the decrement causes the counter to overflow, stay negative 1093 Label L; 1094 jcc(Assembler::negative, L); 1095 addptr(data, (int32_t) DataLayout::counter_increment); 1096 bind(L); 1097 } else { 1098 assert(DataLayout::counter_increment == 1, 1099 "flow-free idiom only works with 1"); 1100 // Increment the register. Set carry flag. 1101 addptr(data, DataLayout::counter_increment); 1102 // If the increment causes the counter to overflow, pull back by 1. 1103 sbbptr(data, (int32_t)0); 1104 } 1105 } 1106 1107 1108 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1109 Register reg, 1110 int constant, 1111 bool decrement) { 1112 Address data(mdp_in, reg, Address::times_1, constant); 1113 1114 increment_mdp_data_at(data, decrement); 1115 } 1116 1117 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 1118 int flag_byte_constant) { 1119 assert(ProfileInterpreter, "must be profiling interpreter"); 1120 int header_offset = in_bytes(DataLayout::header_offset()); 1121 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); 1122 // Set the flag 1123 orl(Address(mdp_in, header_offset), header_bits); 1124 } 1125 1126 1127 1128 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 1129 int offset, 1130 Register value, 1131 Register test_value_out, 1132 Label& not_equal_continue) { 1133 assert(ProfileInterpreter, "must be profiling interpreter"); 1134 if (test_value_out == noreg) { 1135 cmpptr(value, Address(mdp_in, offset)); 1136 } else { 1137 // Put the test value into a register, so caller can use it: 1138 movptr(test_value_out, Address(mdp_in, offset)); 1139 cmpptr(test_value_out, value); 1140 } 1141 jcc(Assembler::notEqual, not_equal_continue); 1142 } 1143 1144 1145 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1146 int offset_of_disp) { 1147 assert(ProfileInterpreter, "must be profiling interpreter"); 1148 Address disp_address(mdp_in, offset_of_disp); 1149 addptr(mdp_in, disp_address); 1150 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1151 } 1152 1153 1154 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1155 Register reg, 1156 int offset_of_disp) { 1157 assert(ProfileInterpreter, "must be profiling interpreter"); 1158 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); 1159 addptr(mdp_in, disp_address); 1160 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1161 } 1162 1163 1164 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 1165 int constant) { 1166 assert(ProfileInterpreter, "must be profiling interpreter"); 1167 addptr(mdp_in, constant); 1168 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1169 } 1170 1171 1172 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1173 assert(ProfileInterpreter, "must be profiling interpreter"); 1174 push(return_bci); // save/restore across call_VM 1175 call_VM(noreg, 1176 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1177 return_bci); 1178 pop(return_bci); 1179 } 1180 1181 1182 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1183 Register bumped_count) { 1184 if (ProfileInterpreter) { 1185 Label profile_continue; 1186 1187 // If no method data exists, go to profile_continue. 1188 // Otherwise, assign to mdp 1189 test_method_data_pointer(mdp, profile_continue); 1190 1191 // We are taking a branch. Increment the taken count. 1192 // We inline increment_mdp_data_at to return bumped_count in a register 1193 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1194 Address data(mdp, in_bytes(JumpData::taken_offset())); 1195 movptr(bumped_count, data); 1196 assert(DataLayout::counter_increment == 1, 1197 "flow-free idiom only works with 1"); 1198 addptr(bumped_count, DataLayout::counter_increment); 1199 sbbptr(bumped_count, 0); 1200 movptr(data, bumped_count); // Store back out 1201 1202 // The method data pointer needs to be updated to reflect the new target. 1203 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1204 bind(profile_continue); 1205 } 1206 } 1207 1208 1209 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1210 if (ProfileInterpreter) { 1211 Label profile_continue; 1212 1213 // If no method data exists, go to profile_continue. 1214 test_method_data_pointer(mdp, profile_continue); 1215 1216 // We are taking a branch. Increment the not taken count. 1217 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1218 1219 // The method data pointer needs to be updated to correspond to 1220 // the next bytecode 1221 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1222 bind(profile_continue); 1223 } 1224 } 1225 1226 void InterpreterMacroAssembler::profile_call(Register mdp) { 1227 if (ProfileInterpreter) { 1228 Label profile_continue; 1229 1230 // If no method data exists, go to profile_continue. 1231 test_method_data_pointer(mdp, profile_continue); 1232 1233 // We are making a call. Increment the count. 1234 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1235 1236 // The method data pointer needs to be updated to reflect the new target. 1237 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1238 bind(profile_continue); 1239 } 1240 } 1241 1242 1243 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1244 if (ProfileInterpreter) { 1245 Label profile_continue; 1246 1247 // If no method data exists, go to profile_continue. 1248 test_method_data_pointer(mdp, profile_continue); 1249 1250 // We are making a call. Increment the count. 1251 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1252 1253 // The method data pointer needs to be updated to reflect the new target. 1254 update_mdp_by_constant(mdp, 1255 in_bytes(VirtualCallData:: 1256 virtual_call_data_size())); 1257 bind(profile_continue); 1258 } 1259 } 1260 1261 1262 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1263 Register mdp, 1264 Register reg2, 1265 bool receiver_can_be_null) { 1266 if (ProfileInterpreter) { 1267 Label profile_continue; 1268 1269 // If no method data exists, go to profile_continue. 1270 test_method_data_pointer(mdp, profile_continue); 1271 1272 Label skip_receiver_profile; 1273 if (receiver_can_be_null) { 1274 Label not_null; 1275 testptr(receiver, receiver); 1276 jccb(Assembler::notZero, not_null); 1277 // We are making a call. Increment the count for null receiver. 1278 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1279 jmp(skip_receiver_profile); 1280 bind(not_null); 1281 } 1282 1283 // Record the receiver type. 1284 record_klass_in_profile(receiver, mdp, reg2, true); 1285 bind(skip_receiver_profile); 1286 1287 // The method data pointer needs to be updated to reflect the new target. 1288 update_mdp_by_constant(mdp, 1289 in_bytes(VirtualCallData:: 1290 virtual_call_data_size())); 1291 bind(profile_continue); 1292 } 1293 } 1294 1295 // This routine creates a state machine for updating the multi-row 1296 // type profile at a virtual call site (or other type-sensitive bytecode). 1297 // The machine visits each row (of receiver/count) until the receiver type 1298 // is found, or until it runs out of rows. At the same time, it remembers 1299 // the location of the first empty row. (An empty row records null for its 1300 // receiver, and can be allocated for a newly-observed receiver type.) 1301 // Because there are two degrees of freedom in the state, a simple linear 1302 // search will not work; it must be a decision tree. Hence this helper 1303 // function is recursive, to generate the required tree structured code. 1304 // It's the interpreter, so we are trading off code space for speed. 1305 // See below for example code. 1306 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1307 Register receiver, Register mdp, 1308 Register reg2, int start_row, 1309 Label& done, bool is_virtual_call) { 1310 if (TypeProfileWidth == 0) { 1311 if (is_virtual_call) { 1312 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1313 } 1314 return; 1315 } 1316 1317 int last_row = VirtualCallData::row_limit() - 1; 1318 assert(start_row <= last_row, "must be work left to do"); 1319 // Test this row for both the receiver and for null. 1320 // Take any of three different outcomes: 1321 // 1. found receiver => increment count and goto done 1322 // 2. found null => keep looking for case 1, maybe allocate this cell 1323 // 3. found something else => keep looking for cases 1 and 2 1324 // Case 3 is handled by a recursive call. 1325 for (int row = start_row; row <= last_row; row++) { 1326 Label next_test; 1327 bool test_for_null_also = (row == start_row); 1328 1329 // See if the receiver is receiver[n]. 1330 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1331 test_mdp_data_at(mdp, recvr_offset, receiver, 1332 (test_for_null_also ? reg2 : noreg), 1333 next_test); 1334 // (Reg2 now contains the receiver from the CallData.) 1335 1336 // The receiver is receiver[n]. Increment count[n]. 1337 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1338 increment_mdp_data_at(mdp, count_offset); 1339 jmp(done); 1340 bind(next_test); 1341 1342 if (test_for_null_also) { 1343 Label found_null; 1344 // Failed the equality check on receiver[n]... Test for null. 1345 testptr(reg2, reg2); 1346 if (start_row == last_row) { 1347 // The only thing left to do is handle the null case. 1348 if (is_virtual_call) { 1349 jccb(Assembler::zero, found_null); 1350 // Receiver did not match any saved receiver and there is no empty row for it. 1351 // Increment total counter to indicate polymorphic case. 1352 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1353 jmp(done); 1354 bind(found_null); 1355 } else { 1356 jcc(Assembler::notZero, done); 1357 } 1358 break; 1359 } 1360 // Since null is rare, make it be the branch-taken case. 1361 jcc(Assembler::zero, found_null); 1362 1363 // Put all the "Case 3" tests here. 1364 record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call); 1365 1366 // Found a null. Keep searching for a matching receiver, 1367 // but remember that this is an empty (unused) slot. 1368 bind(found_null); 1369 } 1370 } 1371 1372 // In the fall-through case, we found no matching receiver, but we 1373 // observed the receiver[start_row] is NULL. 1374 1375 // Fill in the receiver field and increment the count. 1376 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1377 set_mdp_data_at(mdp, recvr_offset, receiver); 1378 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1379 movl(reg2, DataLayout::counter_increment); 1380 set_mdp_data_at(mdp, count_offset, reg2); 1381 if (start_row > 0) { 1382 jmp(done); 1383 } 1384 } 1385 1386 // Example state machine code for three profile rows: 1387 // // main copy of decision tree, rooted at row[1] 1388 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1389 // if (row[0].rec != NULL) { 1390 // // inner copy of decision tree, rooted at row[1] 1391 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1392 // if (row[1].rec != NULL) { 1393 // // degenerate decision tree, rooted at row[2] 1394 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1395 // if (row[2].rec != NULL) { count.incr(); goto done; } // overflow 1396 // row[2].init(rec); goto done; 1397 // } else { 1398 // // remember row[1] is empty 1399 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1400 // row[1].init(rec); goto done; 1401 // } 1402 // } else { 1403 // // remember row[0] is empty 1404 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1405 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1406 // row[0].init(rec); goto done; 1407 // } 1408 // done: 1409 1410 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1411 Register mdp, Register reg2, 1412 bool is_virtual_call) { 1413 assert(ProfileInterpreter, "must be profiling"); 1414 Label done; 1415 1416 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1417 1418 bind (done); 1419 } 1420 1421 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1422 Register mdp) { 1423 if (ProfileInterpreter) { 1424 Label profile_continue; 1425 uint row; 1426 1427 // If no method data exists, go to profile_continue. 1428 test_method_data_pointer(mdp, profile_continue); 1429 1430 // Update the total ret count. 1431 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1432 1433 for (row = 0; row < RetData::row_limit(); row++) { 1434 Label next_test; 1435 1436 // See if return_bci is equal to bci[n]: 1437 test_mdp_data_at(mdp, 1438 in_bytes(RetData::bci_offset(row)), 1439 return_bci, noreg, 1440 next_test); 1441 1442 // return_bci is equal to bci[n]. Increment the count. 1443 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1444 1445 // The method data pointer needs to be updated to reflect the new target. 1446 update_mdp_by_offset(mdp, 1447 in_bytes(RetData::bci_displacement_offset(row))); 1448 jmp(profile_continue); 1449 bind(next_test); 1450 } 1451 1452 update_mdp_for_ret(return_bci); 1453 1454 bind(profile_continue); 1455 } 1456 } 1457 1458 1459 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1460 if (ProfileInterpreter) { 1461 Label profile_continue; 1462 1463 // If no method data exists, go to profile_continue. 1464 test_method_data_pointer(mdp, profile_continue); 1465 1466 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1467 1468 // The method data pointer needs to be updated. 1469 int mdp_delta = in_bytes(BitData::bit_data_size()); 1470 if (TypeProfileCasts) { 1471 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1472 } 1473 update_mdp_by_constant(mdp, mdp_delta); 1474 1475 bind(profile_continue); 1476 } 1477 } 1478 1479 1480 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1481 if (ProfileInterpreter && TypeProfileCasts) { 1482 Label profile_continue; 1483 1484 // If no method data exists, go to profile_continue. 1485 test_method_data_pointer(mdp, profile_continue); 1486 1487 int count_offset = in_bytes(CounterData::count_offset()); 1488 // Back up the address, since we have already bumped the mdp. 1489 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1490 1491 // *Decrement* the counter. We expect to see zero or small negatives. 1492 increment_mdp_data_at(mdp, count_offset, true); 1493 1494 bind (profile_continue); 1495 } 1496 } 1497 1498 1499 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1500 if (ProfileInterpreter) { 1501 Label profile_continue; 1502 1503 // If no method data exists, go to profile_continue. 1504 test_method_data_pointer(mdp, profile_continue); 1505 1506 // The method data pointer needs to be updated. 1507 int mdp_delta = in_bytes(BitData::bit_data_size()); 1508 if (TypeProfileCasts) { 1509 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1510 1511 // Record the object type. 1512 record_klass_in_profile(klass, mdp, reg2, false); 1513 NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");) 1514 NOT_LP64(restore_locals();) // Restore EDI 1515 } 1516 update_mdp_by_constant(mdp, mdp_delta); 1517 1518 bind(profile_continue); 1519 } 1520 } 1521 1522 1523 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1524 if (ProfileInterpreter) { 1525 Label profile_continue; 1526 1527 // If no method data exists, go to profile_continue. 1528 test_method_data_pointer(mdp, profile_continue); 1529 1530 // Update the default case count 1531 increment_mdp_data_at(mdp, 1532 in_bytes(MultiBranchData::default_count_offset())); 1533 1534 // The method data pointer needs to be updated. 1535 update_mdp_by_offset(mdp, 1536 in_bytes(MultiBranchData:: 1537 default_displacement_offset())); 1538 1539 bind(profile_continue); 1540 } 1541 } 1542 1543 1544 void InterpreterMacroAssembler::profile_switch_case(Register index, 1545 Register mdp, 1546 Register reg2) { 1547 if (ProfileInterpreter) { 1548 Label profile_continue; 1549 1550 // If no method data exists, go to profile_continue. 1551 test_method_data_pointer(mdp, profile_continue); 1552 1553 // Build the base (index * per_case_size_in_bytes()) + 1554 // case_array_offset_in_bytes() 1555 movl(reg2, in_bytes(MultiBranchData::per_case_size())); 1556 imulptr(index, reg2); // XXX l ? 1557 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? 1558 1559 // Update the case count 1560 increment_mdp_data_at(mdp, 1561 index, 1562 in_bytes(MultiBranchData::relative_count_offset())); 1563 1564 // The method data pointer needs to be updated. 1565 update_mdp_by_offset(mdp, 1566 index, 1567 in_bytes(MultiBranchData:: 1568 relative_displacement_offset())); 1569 1570 bind(profile_continue); 1571 } 1572 } 1573 1574 1575 1576 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 1577 if (state == atos) { 1578 MacroAssembler::verify_oop(reg); 1579 } 1580 } 1581 1582 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 1583 #ifndef _LP64 1584 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 1585 #endif 1586 } 1587 1588 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1589 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1590 int increment, Address mask, 1591 Register scratch, bool preloaded, 1592 Condition cond, Label* where) { 1593 if (!preloaded) { 1594 movl(scratch, counter_addr); 1595 } 1596 incrementl(scratch, increment); 1597 movl(counter_addr, scratch); 1598 andl(scratch, mask); 1599 jcc(cond, *where); 1600 } 1601 #endif /* CC_INTERP */ 1602 1603 void InterpreterMacroAssembler::notify_method_entry() { 1604 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1605 // track stack depth. If it is possible to enter interp_only_mode we add 1606 // the code to check if the event should be sent. 1607 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1608 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 1609 if (JvmtiExport::can_post_interpreter_events()) { 1610 Label L; 1611 NOT_LP64(get_thread(rthread);) 1612 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 1613 testl(rdx, rdx); 1614 jcc(Assembler::zero, L); 1615 call_VM(noreg, CAST_FROM_FN_PTR(address, 1616 InterpreterRuntime::post_method_entry)); 1617 bind(L); 1618 } 1619 1620 { 1621 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1622 NOT_LP64(get_thread(rthread);) 1623 get_method(rarg); 1624 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1625 rthread, rarg); 1626 } 1627 1628 // RedefineClasses() tracing support for obsolete method entry 1629 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 1630 NOT_LP64(get_thread(rthread);) 1631 get_method(rarg); 1632 call_VM_leaf( 1633 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1634 rthread, rarg); 1635 } 1636 } 1637 1638 1639 void InterpreterMacroAssembler::notify_method_exit( 1640 TosState state, NotifyMethodExitMode mode) { 1641 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1642 // track stack depth. If it is possible to enter interp_only_mode we add 1643 // the code to check if the event should be sent. 1644 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1645 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 1646 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 1647 Label L; 1648 // Note: frame::interpreter_frame_result has a dependency on how the 1649 // method result is saved across the call to post_method_exit. If this 1650 // is changed then the interpreter_frame_result implementation will 1651 // need to be updated too. 1652 1653 // For c++ interpreter the result is always stored at a known location in the frame 1654 // template interpreter will leave it on the top of the stack. 1655 NOT_CC_INTERP(push(state);) 1656 get_thread(rthread); 1657 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 1658 testl(rdx, rdx); 1659 jcc(Assembler::zero, L); 1660 call_VM(noreg, 1661 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 1662 bind(L); 1663 NOT_CC_INTERP(pop(state)); 1664 } 1665 1666 { 1667 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1668 NOT_CC_INTERP(push(state)); 1669 get_thread(rthread); 1670 get_method(rarg); 1671 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 1672 rthread, rarg); 1673 NOT_CC_INTERP(pop(state)); 1674 } 1675 } |