1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_x86.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "logging/log.hpp" 30 #include "oops/arrayOop.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/method.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/safepointMechanism.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/thread.inline.hpp" 41 42 // Implementation of InterpreterMacroAssembler 43 44 void InterpreterMacroAssembler::jump_to_entry(address entry) { 45 assert(entry, "Entry must have been generated by now"); 46 jump(RuntimeAddress(entry)); 47 } 48 49 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { 50 Label update, next, none; 51 52 verify_oop(obj); 53 54 testptr(obj, obj); 55 jccb(Assembler::notZero, update); 56 orptr(mdo_addr, TypeEntries::null_seen); 57 jmpb(next); 58 59 bind(update); 60 load_klass(obj, obj); 61 62 xorptr(obj, mdo_addr); 63 testptr(obj, TypeEntries::type_klass_mask); 64 jccb(Assembler::zero, next); // klass seen before, nothing to 65 // do. The unknown bit may have been 66 // set already but no need to check. 67 68 testptr(obj, TypeEntries::type_unknown); 69 jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 70 71 cmpptr(mdo_addr, 0); 72 jccb(Assembler::equal, none); 73 cmpptr(mdo_addr, TypeEntries::null_seen); 74 jccb(Assembler::equal, none); 75 // There is a chance that the checks above (re-reading profiling 76 // data from memory) fail if another thread has just set the 77 // profiling to this obj's klass 78 xorptr(obj, mdo_addr); 79 testptr(obj, TypeEntries::type_klass_mask); 80 jccb(Assembler::zero, next); 81 82 // different than before. Cannot keep accurate profile. 83 orptr(mdo_addr, TypeEntries::type_unknown); 84 jmpb(next); 85 86 bind(none); 87 // first time here. Set profile type. 88 movptr(mdo_addr, obj); 89 90 bind(next); 91 } 92 93 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { 94 if (!ProfileInterpreter) { 95 return; 96 } 97 98 if (MethodData::profile_arguments() || MethodData::profile_return()) { 99 Label profile_continue; 100 101 test_method_data_pointer(mdp, profile_continue); 102 103 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 104 105 cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 106 jcc(Assembler::notEqual, profile_continue); 107 108 if (MethodData::profile_arguments()) { 109 Label done; 110 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 111 addptr(mdp, off_to_args); 112 113 for (int i = 0; i < TypeProfileArgsLimit; i++) { 114 if (i > 0 || MethodData::profile_return()) { 115 // If return value type is profiled we may have no argument to profile 116 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 117 subl(tmp, i*TypeStackSlotEntries::per_arg_count()); 118 cmpl(tmp, TypeStackSlotEntries::per_arg_count()); 119 jcc(Assembler::less, done); 120 } 121 movptr(tmp, Address(callee, Method::const_offset())); 122 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); 123 // stack offset o (zero based) from the start of the argument 124 // list, for n arguments translates into offset n - o - 1 from 125 // the end of the argument list 126 subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args)); 127 subl(tmp, 1); 128 Address arg_addr = argument_address(tmp); 129 movptr(tmp, arg_addr); 130 131 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 132 profile_obj_type(tmp, mdo_arg_addr); 133 134 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 135 addptr(mdp, to_add); 136 off_to_args += to_add; 137 } 138 139 if (MethodData::profile_return()) { 140 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 141 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 142 } 143 144 bind(done); 145 146 if (MethodData::profile_return()) { 147 // We're right after the type profile for the last 148 // argument. tmp is the number of cells left in the 149 // CallTypeData/VirtualCallTypeData to reach its end. Non null 150 // if there's a return to profile. 151 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 152 shll(tmp, exact_log2(DataLayout::cell_size)); 153 addptr(mdp, tmp); 154 } 155 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp); 156 } else { 157 assert(MethodData::profile_return(), "either profile call args or call ret"); 158 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); 159 } 160 161 // mdp points right after the end of the 162 // CallTypeData/VirtualCallTypeData, right after the cells for the 163 // return value type if there's one 164 165 bind(profile_continue); 166 } 167 } 168 169 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { 170 assert_different_registers(mdp, ret, tmp, _bcp_register); 171 if (ProfileInterpreter && MethodData::profile_return()) { 172 Label profile_continue, done; 173 174 test_method_data_pointer(mdp, profile_continue); 175 176 if (MethodData::profile_return_jsr292_only()) { 177 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 178 179 // If we don't profile all invoke bytecodes we must make sure 180 // it's a bytecode we indeed profile. We can't go back to the 181 // begining of the ProfileData we intend to update to check its 182 // type because we're right after it and we don't known its 183 // length 184 Label do_profile; 185 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic); 186 jcc(Assembler::equal, do_profile); 187 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle); 188 jcc(Assembler::equal, do_profile); 189 get_method(tmp); 190 cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm); 191 jcc(Assembler::notEqual, profile_continue); 192 193 bind(do_profile); 194 } 195 196 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); 197 mov(tmp, ret); 198 profile_obj_type(tmp, mdo_ret_addr); 199 200 bind(profile_continue); 201 } 202 } 203 204 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { 205 if (ProfileInterpreter && MethodData::profile_parameters()) { 206 Label profile_continue, done; 207 208 test_method_data_pointer(mdp, profile_continue); 209 210 // Load the offset of the area within the MDO used for 211 // parameters. If it's negative we're not profiling any parameters 212 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); 213 testl(tmp1, tmp1); 214 jcc(Assembler::negative, profile_continue); 215 216 // Compute a pointer to the area for parameters from the offset 217 // and move the pointer to the slot for the last 218 // parameters. Collect profiling from last parameter down. 219 // mdo start + parameters offset + array length - 1 220 addptr(mdp, tmp1); 221 movptr(tmp1, Address(mdp, ArrayData::array_len_offset())); 222 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 223 224 Label loop; 225 bind(loop); 226 227 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 228 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 229 Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size); 230 Address arg_off(mdp, tmp1, per_arg_scale, off_base); 231 Address arg_type(mdp, tmp1, per_arg_scale, type_base); 232 233 // load offset on the stack from the slot for this parameter 234 movptr(tmp2, arg_off); 235 negptr(tmp2); 236 // read the parameter from the local area 237 movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale())); 238 239 // profile the parameter 240 profile_obj_type(tmp2, arg_type); 241 242 // go to next parameter 243 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 244 jcc(Assembler::positive, loop); 245 246 bind(profile_continue); 247 } 248 } 249 250 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 251 int number_of_arguments) { 252 // interpreter specific 253 // 254 // Note: No need to save/restore bcp & locals registers 255 // since these are callee saved registers and no blocking/ 256 // GC can happen in leaf calls. 257 // Further Note: DO NOT save/restore bcp/locals. If a caller has 258 // already saved them so that it can use rsi/rdi as temporaries 259 // then a save/restore here will DESTROY the copy the caller 260 // saved! There used to be a save_bcp() that only happened in 261 // the ASSERT path (no restore_bcp). Which caused bizarre failures 262 // when jvm built with ASSERTs. 263 #ifdef ASSERT 264 { 265 Label L; 266 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 267 jcc(Assembler::equal, L); 268 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 269 " last_sp != NULL"); 270 bind(L); 271 } 272 #endif 273 // super call 274 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 275 // interpreter specific 276 // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals 277 // but since they may not have been saved (and we don't want to 278 // save them here (see note above) the assert is invalid. 279 } 280 281 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 282 Register java_thread, 283 Register last_java_sp, 284 address entry_point, 285 int number_of_arguments, 286 bool check_exceptions) { 287 // interpreter specific 288 // 289 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 290 // really make a difference for these runtime calls, since they are 291 // slow anyway. Btw., bcp must be saved/restored since it may change 292 // due to GC. 293 NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");) 294 save_bcp(); 295 #ifdef ASSERT 296 { 297 Label L; 298 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 299 jcc(Assembler::equal, L); 300 stop("InterpreterMacroAssembler::call_VM_base:" 301 " last_sp != NULL"); 302 bind(L); 303 } 304 #endif /* ASSERT */ 305 // super call 306 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 307 entry_point, number_of_arguments, 308 check_exceptions); 309 // interpreter specific 310 restore_bcp(); 311 restore_locals(); 312 } 313 314 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 315 if (JvmtiExport::can_pop_frame()) { 316 Label L; 317 // Initiate popframe handling only if it is not already being 318 // processed. If the flag has the popframe_processing bit set, it 319 // means that this code is called *during* popframe handling - we 320 // don't want to reenter. 321 // This method is only called just after the call into the vm in 322 // call_VM_base, so the arg registers are available. 323 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit 324 LP64_ONLY(c_rarg0); 325 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset())); 326 testl(pop_cond, JavaThread::popframe_pending_bit); 327 jcc(Assembler::zero, L); 328 testl(pop_cond, JavaThread::popframe_processing_bit); 329 jcc(Assembler::notZero, L); 330 // Call Interpreter::remove_activation_preserving_args_entry() to get the 331 // address of the same-named entrypoint in the generated interpreter code. 332 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 333 jmp(rax); 334 bind(L); 335 NOT_LP64(get_thread(java_thread);) 336 } 337 } 338 339 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 340 Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 341 NOT_LP64(get_thread(thread);) 342 movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 343 const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); 344 const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); 345 const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); 346 #ifdef _LP64 347 switch (state) { 348 case atos: movptr(rax, oop_addr); 349 movptr(oop_addr, (int32_t)NULL_WORD); 350 verify_oop(rax, state); break; 351 case ltos: movptr(rax, val_addr); break; 352 case btos: // fall through 353 case ztos: // fall through 354 case ctos: // fall through 355 case stos: // fall through 356 case itos: movl(rax, val_addr); break; 357 case ftos: load_float(val_addr); break; 358 case dtos: load_double(val_addr); break; 359 case vtos: /* nothing to do */ break; 360 default : ShouldNotReachHere(); 361 } 362 // Clean up tos value in the thread object 363 movl(tos_addr, (int) ilgl); 364 movl(val_addr, (int32_t) NULL_WORD); 365 #else 366 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() 367 + in_ByteSize(wordSize)); 368 switch (state) { 369 case atos: movptr(rax, oop_addr); 370 movptr(oop_addr, NULL_WORD); 371 verify_oop(rax, state); break; 372 case ltos: 373 movl(rdx, val_addr1); // fall through 374 case btos: // fall through 375 case ztos: // fall through 376 case ctos: // fall through 377 case stos: // fall through 378 case itos: movl(rax, val_addr); break; 379 case ftos: load_float(val_addr); break; 380 case dtos: load_double(val_addr); break; 381 case vtos: /* nothing to do */ break; 382 default : ShouldNotReachHere(); 383 } 384 #endif // _LP64 385 // Clean up tos value in the thread object 386 movl(tos_addr, (int32_t) ilgl); 387 movptr(val_addr, NULL_WORD); 388 NOT_LP64(movptr(val_addr1, NULL_WORD);) 389 } 390 391 392 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 393 if (JvmtiExport::can_force_early_return()) { 394 Label L; 395 Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread); 396 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread); 397 398 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 399 testptr(tmp, tmp); 400 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; 401 402 // Initiate earlyret handling only if it is not already being processed. 403 // If the flag has the earlyret_processing bit set, it means that this code 404 // is called *during* earlyret handling - we don't want to reenter. 405 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset())); 406 cmpl(tmp, JvmtiThreadState::earlyret_pending); 407 jcc(Assembler::notEqual, L); 408 409 // Call Interpreter::remove_activation_early_entry() to get the address of the 410 // same-named entrypoint in the generated interpreter code. 411 NOT_LP64(get_thread(java_thread);) 412 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 413 #ifdef _LP64 414 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 415 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp); 416 #else 417 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 418 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); 419 #endif // _LP64 420 jmp(rax); 421 bind(L); 422 NOT_LP64(get_thread(java_thread);) 423 } 424 } 425 426 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 427 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 428 load_unsigned_short(reg, Address(_bcp_register, bcp_offset)); 429 bswapl(reg); 430 shrl(reg, 16); 431 } 432 433 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 434 int bcp_offset, 435 size_t index_size) { 436 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 437 if (index_size == sizeof(u2)) { 438 load_unsigned_short(index, Address(_bcp_register, bcp_offset)); 439 } else if (index_size == sizeof(u4)) { 440 movl(index, Address(_bcp_register, bcp_offset)); 441 // Check if the secondary index definition is still ~x, otherwise 442 // we have to change the following assembler code to calculate the 443 // plain index. 444 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 445 notl(index); // convert to plain index 446 } else if (index_size == sizeof(u1)) { 447 load_unsigned_byte(index, Address(_bcp_register, bcp_offset)); 448 } else { 449 ShouldNotReachHere(); 450 } 451 } 452 453 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 454 Register index, 455 int bcp_offset, 456 size_t index_size) { 457 assert_different_registers(cache, index); 458 get_cache_index_at_bcp(index, bcp_offset, index_size); 459 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 460 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 461 // convert from field index to ConstantPoolCacheEntry index 462 assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line"); 463 shll(index, 2); 464 } 465 466 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 467 Register index, 468 Register bytecode, 469 int byte_no, 470 int bcp_offset, 471 size_t index_size) { 472 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 473 // We use a 32-bit load here since the layout of 64-bit words on 474 // little-endian machines allow us that. 475 movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 476 const int shift_count = (1 + byte_no) * BitsPerByte; 477 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 478 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 479 "correct shift count"); 480 shrl(bytecode, shift_count); 481 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 482 andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); 483 } 484 485 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 486 Register tmp, 487 int bcp_offset, 488 size_t index_size) { 489 assert(cache != tmp, "must use different register"); 490 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 491 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 492 // convert from field index to ConstantPoolCacheEntry index 493 // and from word offset to byte offset 494 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 495 shll(tmp, 2 + LogBytesPerWord); 496 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 497 // skip past the header 498 addptr(cache, in_bytes(ConstantPoolCache::base_offset())); 499 addptr(cache, tmp); // construct pointer to cache entry 500 } 501 502 // Load object from cpool->resolved_references(index) 503 void InterpreterMacroAssembler::load_resolved_reference_at_index( 504 Register result, Register index) { 505 assert_different_registers(result, index); 506 // convert from field index to resolved_references() index and from 507 // word index to byte offset. Since this is a java object, it can be compressed 508 Register tmp = index; // reuse 509 shll(tmp, LogBytesPerHeapOop); 510 511 get_constant_pool(result); 512 // load pointer for resolved_references[] objArray 513 movptr(result, Address(result, ConstantPool::cache_offset_in_bytes())); 514 movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); 515 resolve_oop_handle(result); 516 // Add in the index 517 addptr(result, tmp); 518 load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 519 } 520 521 // load cpool->resolved_klass_at(index) 522 void InterpreterMacroAssembler::load_resolved_klass_at_index(Register cpool, 523 Register index, Register klass) { 524 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool))); 525 Register resolved_klasses = cpool; 526 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); 527 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes())); 528 } 529 530 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 531 // subtype of super_klass. 532 // 533 // Args: 534 // rax: superklass 535 // Rsub_klass: subklass 536 // 537 // Kills: 538 // rcx, rdi 539 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 540 Label& ok_is_subtype) { 541 assert(Rsub_klass != rax, "rax holds superklass"); 542 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");) 543 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");) 544 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); 545 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); 546 547 // Profile the not-null value's klass. 548 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi 549 550 // Do the check. 551 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx 552 553 // Profile the failure of the check. 554 profile_typecheck_failed(rcx); // blows rcx 555 } 556 557 558 #ifndef _LP64 559 void InterpreterMacroAssembler::f2ieee() { 560 if (IEEEPrecision) { 561 fstp_s(Address(rsp, 0)); 562 fld_s(Address(rsp, 0)); 563 } 564 } 565 566 567 void InterpreterMacroAssembler::d2ieee() { 568 if (IEEEPrecision) { 569 fstp_d(Address(rsp, 0)); 570 fld_d(Address(rsp, 0)); 571 } 572 } 573 #endif // _LP64 574 575 // Java Expression Stack 576 577 void InterpreterMacroAssembler::pop_ptr(Register r) { 578 pop(r); 579 } 580 581 void InterpreterMacroAssembler::push_ptr(Register r) { 582 push(r); 583 } 584 585 void InterpreterMacroAssembler::push_i(Register r) { 586 push(r); 587 } 588 589 void InterpreterMacroAssembler::push_f(XMMRegister r) { 590 subptr(rsp, wordSize); 591 movflt(Address(rsp, 0), r); 592 } 593 594 void InterpreterMacroAssembler::pop_f(XMMRegister r) { 595 movflt(r, Address(rsp, 0)); 596 addptr(rsp, wordSize); 597 } 598 599 void InterpreterMacroAssembler::push_d(XMMRegister r) { 600 subptr(rsp, 2 * wordSize); 601 movdbl(Address(rsp, 0), r); 602 } 603 604 void InterpreterMacroAssembler::pop_d(XMMRegister r) { 605 movdbl(r, Address(rsp, 0)); 606 addptr(rsp, 2 * Interpreter::stackElementSize); 607 } 608 609 #ifdef _LP64 610 void InterpreterMacroAssembler::pop_i(Register r) { 611 // XXX can't use pop currently, upper half non clean 612 movl(r, Address(rsp, 0)); 613 addptr(rsp, wordSize); 614 } 615 616 void InterpreterMacroAssembler::pop_l(Register r) { 617 movq(r, Address(rsp, 0)); 618 addptr(rsp, 2 * Interpreter::stackElementSize); 619 } 620 621 void InterpreterMacroAssembler::push_l(Register r) { 622 subptr(rsp, 2 * wordSize); 623 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r ); 624 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD ); 625 } 626 627 void InterpreterMacroAssembler::pop(TosState state) { 628 switch (state) { 629 case atos: pop_ptr(); break; 630 case btos: 631 case ztos: 632 case ctos: 633 case stos: 634 case itos: pop_i(); break; 635 case ltos: pop_l(); break; 636 case ftos: pop_f(xmm0); break; 637 case dtos: pop_d(xmm0); break; 638 case vtos: /* nothing to do */ break; 639 default: ShouldNotReachHere(); 640 } 641 verify_oop(rax, state); 642 } 643 644 void InterpreterMacroAssembler::push(TosState state) { 645 verify_oop(rax, state); 646 switch (state) { 647 case atos: push_ptr(); break; 648 case btos: 649 case ztos: 650 case ctos: 651 case stos: 652 case itos: push_i(); break; 653 case ltos: push_l(); break; 654 case ftos: push_f(xmm0); break; 655 case dtos: push_d(xmm0); break; 656 case vtos: /* nothing to do */ break; 657 default : ShouldNotReachHere(); 658 } 659 } 660 #else 661 void InterpreterMacroAssembler::pop_i(Register r) { 662 pop(r); 663 } 664 665 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 666 pop(lo); 667 pop(hi); 668 } 669 670 void InterpreterMacroAssembler::pop_f() { 671 fld_s(Address(rsp, 0)); 672 addptr(rsp, 1 * wordSize); 673 } 674 675 void InterpreterMacroAssembler::pop_d() { 676 fld_d(Address(rsp, 0)); 677 addptr(rsp, 2 * wordSize); 678 } 679 680 681 void InterpreterMacroAssembler::pop(TosState state) { 682 switch (state) { 683 case atos: pop_ptr(rax); break; 684 case btos: // fall through 685 case ztos: // fall through 686 case ctos: // fall through 687 case stos: // fall through 688 case itos: pop_i(rax); break; 689 case ltos: pop_l(rax, rdx); break; 690 case ftos: 691 if (UseSSE >= 1) { 692 pop_f(xmm0); 693 } else { 694 pop_f(); 695 } 696 break; 697 case dtos: 698 if (UseSSE >= 2) { 699 pop_d(xmm0); 700 } else { 701 pop_d(); 702 } 703 break; 704 case vtos: /* nothing to do */ break; 705 default : ShouldNotReachHere(); 706 } 707 verify_oop(rax, state); 708 } 709 710 711 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 712 push(hi); 713 push(lo); 714 } 715 716 void InterpreterMacroAssembler::push_f() { 717 // Do not schedule for no AGI! Never write beyond rsp! 718 subptr(rsp, 1 * wordSize); 719 fstp_s(Address(rsp, 0)); 720 } 721 722 void InterpreterMacroAssembler::push_d() { 723 // Do not schedule for no AGI! Never write beyond rsp! 724 subptr(rsp, 2 * wordSize); 725 fstp_d(Address(rsp, 0)); 726 } 727 728 729 void InterpreterMacroAssembler::push(TosState state) { 730 verify_oop(rax, state); 731 switch (state) { 732 case atos: push_ptr(rax); break; 733 case btos: // fall through 734 case ztos: // fall through 735 case ctos: // fall through 736 case stos: // fall through 737 case itos: push_i(rax); break; 738 case ltos: push_l(rax, rdx); break; 739 case ftos: 740 if (UseSSE >= 1) { 741 push_f(xmm0); 742 } else { 743 push_f(); 744 } 745 break; 746 case dtos: 747 if (UseSSE >= 2) { 748 push_d(xmm0); 749 } else { 750 push_d(); 751 } 752 break; 753 case vtos: /* nothing to do */ break; 754 default : ShouldNotReachHere(); 755 } 756 } 757 #endif // _LP64 758 759 760 // Helpers for swap and dup 761 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 762 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); 763 } 764 765 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 766 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); 767 } 768 769 770 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 771 // set sender sp 772 lea(_bcp_register, Address(rsp, wordSize)); 773 // record last_sp 774 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register); 775 } 776 777 778 // Jump to from_interpreted entry of a call unless single stepping is possible 779 // in this thread in which case we must call the i2i entry 780 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 781 prepare_to_jump_from_interpreted(); 782 783 if (JvmtiExport::can_post_interpreter_events()) { 784 Label run_compiled_code; 785 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 786 // compiled code in threads for which the event is enabled. Check here for 787 // interp_only_mode if these events CAN be enabled. 788 // interp_only is an int, on little endian it is sufficient to test the byte only 789 // Is a cmpl faster? 790 LP64_ONLY(temp = r15_thread;) 791 NOT_LP64(get_thread(temp);) 792 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 793 jccb(Assembler::zero, run_compiled_code); 794 jmp(Address(method, Method::interpreter_entry_offset())); 795 bind(run_compiled_code); 796 } 797 798 jmp(Address(method, Method::from_interpreted_offset())); 799 } 800 801 // The following two routines provide a hook so that an implementation 802 // can schedule the dispatch in two parts. x86 does not do this. 803 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 804 // Nothing x86 specific to be done here 805 } 806 807 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 808 dispatch_next(state, step); 809 } 810 811 void InterpreterMacroAssembler::dispatch_base(TosState state, 812 address* table, 813 bool verifyoop, 814 bool generate_poll) { 815 verify_FPU(1, state); 816 if (VerifyActivationFrameSize) { 817 Label L; 818 mov(rcx, rbp); 819 subptr(rcx, rsp); 820 int32_t min_frame_size = 821 (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * 822 wordSize; 823 cmpptr(rcx, (int32_t)min_frame_size); 824 jcc(Assembler::greaterEqual, L); 825 stop("broken stack frame"); 826 bind(L); 827 } 828 if (verifyoop) { 829 verify_oop(rax, state); 830 } 831 #ifdef _LP64 832 833 Label no_safepoint, dispatch; 834 address* const safepoint_table = Interpreter::safept_table(state); 835 if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) { 836 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 837 838 testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); 839 840 jccb(Assembler::zero, no_safepoint); 841 lea(rscratch1, ExternalAddress((address)safepoint_table)); 842 jmpb(dispatch); 843 } 844 845 bind(no_safepoint); 846 lea(rscratch1, ExternalAddress((address)table)); 847 bind(dispatch); 848 jmp(Address(rscratch1, rbx, Address::times_8)); 849 850 #else 851 Address index(noreg, rbx, Address::times_ptr); 852 ExternalAddress tbl((address)table); 853 ArrayAddress dispatch(tbl, index); 854 jump(dispatch); 855 #endif // _LP64 856 } 857 858 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) { 859 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 860 } 861 862 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 863 dispatch_base(state, Interpreter::normal_table(state)); 864 } 865 866 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 867 dispatch_base(state, Interpreter::normal_table(state), false); 868 } 869 870 871 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { 872 // load next bytecode (load before advancing _bcp_register to prevent AGI) 873 load_unsigned_byte(rbx, Address(_bcp_register, step)); 874 // advance _bcp_register 875 increment(_bcp_register, step); 876 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 877 } 878 879 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 880 // load current bytecode 881 load_unsigned_byte(rbx, Address(_bcp_register, 0)); 882 dispatch_base(state, table); 883 } 884 885 void InterpreterMacroAssembler::narrow(Register result) { 886 887 // Get method->_constMethod->_result_type 888 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 889 movptr(rcx, Address(rcx, Method::const_offset())); 890 load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset())); 891 892 Label done, notBool, notByte, notChar; 893 894 // common case first 895 cmpl(rcx, T_INT); 896 jcc(Assembler::equal, done); 897 898 // mask integer result to narrower return type. 899 cmpl(rcx, T_BOOLEAN); 900 jcc(Assembler::notEqual, notBool); 901 andl(result, 0x1); 902 jmp(done); 903 904 bind(notBool); 905 cmpl(rcx, T_BYTE); 906 jcc(Assembler::notEqual, notByte); 907 LP64_ONLY(movsbl(result, result);) 908 NOT_LP64(shll(result, 24);) // truncate upper 24 bits 909 NOT_LP64(sarl(result, 24);) // and sign-extend byte 910 jmp(done); 911 912 bind(notByte); 913 cmpl(rcx, T_CHAR); 914 jcc(Assembler::notEqual, notChar); 915 LP64_ONLY(movzwl(result, result);) 916 NOT_LP64(andl(result, 0xFFFF);) // truncate upper 16 bits 917 jmp(done); 918 919 bind(notChar); 920 // cmpl(rcx, T_SHORT); // all that's left 921 // jcc(Assembler::notEqual, done); 922 LP64_ONLY(movswl(result, result);) 923 NOT_LP64(shll(result, 16);) // truncate upper 16 bits 924 NOT_LP64(sarl(result, 16);) // and sign-extend short 925 926 // Nothing to do for T_INT 927 bind(done); 928 } 929 930 // remove activation 931 // 932 // Unlock the receiver if this is a synchronized method. 933 // Unlock any Java monitors from syncronized blocks. 934 // Remove the activation from the stack. 935 // 936 // If there are locked Java monitors 937 // If throw_monitor_exception 938 // throws IllegalMonitorStateException 939 // Else if install_monitor_exception 940 // installs IllegalMonitorStateException 941 // Else 942 // no error processing 943 void InterpreterMacroAssembler::remove_activation( 944 TosState state, 945 Register ret_addr, 946 bool throw_monitor_exception, 947 bool install_monitor_exception, 948 bool notify_jvmdi) { 949 // Note: Registers rdx xmm0 may be in use for the 950 // result check if synchronized method 951 Label unlocked, unlock, no_unlock; 952 953 const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 954 const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx); 955 const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx); 956 // monitor pointers need different register 957 // because rdx may have the result in it 958 NOT_LP64(get_thread(rcx);) 959 960 // get the value of _do_not_unlock_if_synchronized into rdx 961 const Address do_not_unlock_if_synchronized(rthread, 962 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 963 movbool(rbx, do_not_unlock_if_synchronized); 964 movbool(do_not_unlock_if_synchronized, false); // reset the flag 965 966 // get method access flags 967 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 968 movl(rcx, Address(rcx, Method::access_flags_offset())); 969 testl(rcx, JVM_ACC_SYNCHRONIZED); 970 jcc(Assembler::zero, unlocked); 971 972 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 973 // is set. 974 testbool(rbx); 975 jcc(Assembler::notZero, no_unlock); 976 977 // unlock monitor 978 push(state); // save result 979 980 // BasicObjectLock will be first in list, since this is a 981 // synchronized method. However, need to check that the object has 982 // not been unlocked by an explicit monitorexit bytecode. 983 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * 984 wordSize - (int) sizeof(BasicObjectLock)); 985 // We use c_rarg1/rdx so that if we go slow path it will be the correct 986 // register for unlock_object to pass to VM directly 987 lea(robj, monitor); // address of first monitor 988 989 movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes())); 990 testptr(rax, rax); 991 jcc(Assembler::notZero, unlock); 992 993 pop(state); 994 if (throw_monitor_exception) { 995 // Entry already unlocked, need to throw exception 996 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow 997 call_VM(noreg, CAST_FROM_FN_PTR(address, 998 InterpreterRuntime::throw_illegal_monitor_state_exception)); 999 should_not_reach_here(); 1000 } else { 1001 // Monitor already unlocked during a stack unroll. If requested, 1002 // install an illegal_monitor_state_exception. Continue with 1003 // stack unrolling. 1004 if (install_monitor_exception) { 1005 NOT_LP64(empty_FPU_stack();) 1006 call_VM(noreg, CAST_FROM_FN_PTR(address, 1007 InterpreterRuntime::new_illegal_monitor_state_exception)); 1008 } 1009 jmp(unlocked); 1010 } 1011 1012 bind(unlock); 1013 unlock_object(robj); 1014 pop(state); 1015 1016 // Check that for block-structured locking (i.e., that all locked 1017 // objects has been unlocked) 1018 bind(unlocked); 1019 1020 // rax, rdx: Might contain return value 1021 1022 // Check that all monitors are unlocked 1023 { 1024 Label loop, exception, entry, restart; 1025 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 1026 const Address monitor_block_top( 1027 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 1028 const Address monitor_block_bot( 1029 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 1030 1031 bind(restart); 1032 // We use c_rarg1 so that if we go slow path it will be the correct 1033 // register for unlock_object to pass to VM directly 1034 movptr(rmon, monitor_block_top); // points to current entry, starting 1035 // with top-most entry 1036 lea(rbx, monitor_block_bot); // points to word before bottom of 1037 // monitor block 1038 jmp(entry); 1039 1040 // Entry already locked, need to throw exception 1041 bind(exception); 1042 1043 if (throw_monitor_exception) { 1044 // Throw exception 1045 NOT_LP64(empty_FPU_stack();) 1046 MacroAssembler::call_VM(noreg, 1047 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 1048 throw_illegal_monitor_state_exception)); 1049 should_not_reach_here(); 1050 } else { 1051 // Stack unrolling. Unlock object and install illegal_monitor_exception. 1052 // Unlock does not block, so don't have to worry about the frame. 1053 // We don't have to preserve c_rarg1 since we are going to throw an exception. 1054 1055 push(state); 1056 mov(robj, rmon); // nop if robj and rmon are the same 1057 unlock_object(robj); 1058 pop(state); 1059 1060 if (install_monitor_exception) { 1061 NOT_LP64(empty_FPU_stack();) 1062 call_VM(noreg, CAST_FROM_FN_PTR(address, 1063 InterpreterRuntime:: 1064 new_illegal_monitor_state_exception)); 1065 } 1066 1067 jmp(restart); 1068 } 1069 1070 bind(loop); 1071 // check if current entry is used 1072 cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); 1073 jcc(Assembler::notEqual, exception); 1074 1075 addptr(rmon, entry_size); // otherwise advance to next entry 1076 bind(entry); 1077 cmpptr(rmon, rbx); // check if bottom reached 1078 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry 1079 } 1080 1081 bind(no_unlock); 1082 1083 // jvmti support 1084 if (notify_jvmdi) { 1085 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 1086 } else { 1087 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 1088 } 1089 1090 // remove activation 1091 // get sender sp 1092 movptr(rbx, 1093 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1094 if (StackReservedPages > 0) { 1095 // testing if reserved zone needs to be re-enabled 1096 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1097 Label no_reserved_zone_enabling; 1098 1099 NOT_LP64(get_thread(rthread);) 1100 1101 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled); 1102 jcc(Assembler::equal, no_reserved_zone_enabling); 1103 1104 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset())); 1105 jcc(Assembler::lessEqual, no_reserved_zone_enabling); 1106 1107 call_VM_leaf( 1108 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); 1109 call_VM(noreg, CAST_FROM_FN_PTR(address, 1110 InterpreterRuntime::throw_delayed_StackOverflowError)); 1111 should_not_reach_here(); 1112 1113 bind(no_reserved_zone_enabling); 1114 } 1115 leave(); // remove frame anchor 1116 pop(ret_addr); // get return address 1117 mov(rsp, rbx); // set sp to sender sp 1118 } 1119 1120 void InterpreterMacroAssembler::get_method_counters(Register method, 1121 Register mcs, Label& skip) { 1122 Label has_counters; 1123 movptr(mcs, Address(method, Method::method_counters_offset())); 1124 testptr(mcs, mcs); 1125 jcc(Assembler::notZero, has_counters); 1126 call_VM(noreg, CAST_FROM_FN_PTR(address, 1127 InterpreterRuntime::build_method_counters), method); 1128 movptr(mcs, Address(method,Method::method_counters_offset())); 1129 testptr(mcs, mcs); 1130 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory 1131 bind(has_counters); 1132 } 1133 1134 1135 // Lock object 1136 // 1137 // Args: 1138 // rdx, c_rarg1: BasicObjectLock to be used for locking 1139 // 1140 // Kills: 1141 // rax, rbx 1142 void InterpreterMacroAssembler::lock_object(Register lock_reg) { 1143 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1144 "The argument is only for looks. It must be c_rarg1"); 1145 1146 if (UseHeavyMonitors) { 1147 call_VM(noreg, 1148 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1149 lock_reg); 1150 } else { 1151 Label done; 1152 1153 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1154 const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a 1155 // problematic case where tmp_reg = no_reg. 1156 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1157 1158 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 1159 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 1160 const int mark_offset = lock_offset + 1161 BasicLock::displaced_header_offset_in_bytes(); 1162 1163 Label slow_case; 1164 1165 // Load object pointer into obj_reg 1166 movptr(obj_reg, Address(lock_reg, obj_offset)); 1167 1168 if (UseBiasedLocking) { 1169 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case); 1170 } 1171 1172 // Load immediate 1 into swap_reg %rax 1173 movl(swap_reg, (int32_t)1); 1174 1175 // Load (object->mark() | 1) into swap_reg %rax 1176 orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1177 1178 // Save (object->mark() | 1) into BasicLock's displaced header 1179 movptr(Address(lock_reg, mark_offset), swap_reg); 1180 1181 assert(lock_offset == 0, 1182 "displaced header must be first word in BasicObjectLock"); 1183 1184 if (os::is_MP()) lock(); 1185 cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1186 if (PrintBiasedLockingStatistics) { 1187 cond_inc32(Assembler::zero, 1188 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 1189 } 1190 jcc(Assembler::zero, done); 1191 1192 const int zero_bits = LP64_ONLY(7) NOT_LP64(3); 1193 1194 // Test if the oopMark is an obvious stack pointer, i.e., 1195 // 1) (mark & zero_bits) == 0, and 1196 // 2) rsp <= mark < mark + os::pagesize() 1197 // 1198 // These 3 tests can be done by evaluating the following 1199 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())), 1200 // assuming both stack pointer and pagesize have their 1201 // least significant bits clear. 1202 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg 1203 subptr(swap_reg, rsp); 1204 andptr(swap_reg, zero_bits - os::vm_page_size()); 1205 1206 // Save the test result, for recursive case, the result is zero 1207 movptr(Address(lock_reg, mark_offset), swap_reg); 1208 1209 if (PrintBiasedLockingStatistics) { 1210 cond_inc32(Assembler::zero, 1211 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 1212 } 1213 jcc(Assembler::zero, done); 1214 1215 bind(slow_case); 1216 1217 // Call the runtime routine for slow case 1218 call_VM(noreg, 1219 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1220 lock_reg); 1221 1222 bind(done); 1223 } 1224 } 1225 1226 1227 // Unlocks an object. Used in monitorexit bytecode and 1228 // remove_activation. Throws an IllegalMonitorException if object is 1229 // not locked by current thread. 1230 // 1231 // Args: 1232 // rdx, c_rarg1: BasicObjectLock for lock 1233 // 1234 // Kills: 1235 // rax 1236 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 1237 // rscratch1 (scratch reg) 1238 // rax, rbx, rcx, rdx 1239 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1240 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1241 "The argument is only for looks. It must be c_rarg1"); 1242 1243 if (UseHeavyMonitors) { 1244 call_VM(noreg, 1245 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 1246 lock_reg); 1247 } else { 1248 Label done; 1249 1250 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1251 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark 1252 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1253 1254 save_bcp(); // Save in case of exception 1255 1256 // Convert from BasicObjectLock structure to object and BasicLock 1257 // structure Store the BasicLock address into %rax 1258 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 1259 1260 // Load oop into obj_reg(%c_rarg3) 1261 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 1262 1263 // Free entry 1264 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); 1265 1266 if (UseBiasedLocking) { 1267 biased_locking_exit(obj_reg, header_reg, done); 1268 } 1269 1270 // Load the old header from BasicLock structure 1271 movptr(header_reg, Address(swap_reg, 1272 BasicLock::displaced_header_offset_in_bytes())); 1273 1274 // Test for recursion 1275 testptr(header_reg, header_reg); 1276 1277 // zero for recursive case 1278 jcc(Assembler::zero, done); 1279 1280 // Atomic swap back the old header 1281 if (os::is_MP()) lock(); 1282 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1283 1284 // zero for simple unlock of a stack-lock case 1285 jcc(Assembler::zero, done); 1286 1287 // Call the runtime routine for slow case. 1288 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), 1289 obj_reg); // restore obj 1290 call_VM(noreg, 1291 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 1292 lock_reg); 1293 1294 bind(done); 1295 1296 restore_bcp(); 1297 } 1298 } 1299 1300 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 1301 Label& zero_continue) { 1302 assert(ProfileInterpreter, "must be profiling interpreter"); 1303 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize)); 1304 testptr(mdp, mdp); 1305 jcc(Assembler::zero, zero_continue); 1306 } 1307 1308 1309 // Set the method data pointer for the current bcp. 1310 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1311 assert(ProfileInterpreter, "must be profiling interpreter"); 1312 Label set_mdp; 1313 push(rax); 1314 push(rbx); 1315 1316 get_method(rbx); 1317 // Test MDO to avoid the call if it is NULL. 1318 movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); 1319 testptr(rax, rax); 1320 jcc(Assembler::zero, set_mdp); 1321 // rbx: method 1322 // _bcp_register: bcp 1323 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register); 1324 // rax: mdi 1325 // mdo is guaranteed to be non-zero here, we checked for it before the call. 1326 movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset()))); 1327 addptr(rbx, in_bytes(MethodData::data_offset())); 1328 addptr(rax, rbx); 1329 bind(set_mdp); 1330 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax); 1331 pop(rbx); 1332 pop(rax); 1333 } 1334 1335 void InterpreterMacroAssembler::verify_method_data_pointer() { 1336 assert(ProfileInterpreter, "must be profiling interpreter"); 1337 #ifdef ASSERT 1338 Label verify_continue; 1339 push(rax); 1340 push(rbx); 1341 Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); 1342 Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx); 1343 push(arg3_reg); 1344 push(arg2_reg); 1345 test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue 1346 get_method(rbx); 1347 1348 // If the mdp is valid, it will point to a DataLayout header which is 1349 // consistent with the bcp. The converse is highly probable also. 1350 load_unsigned_short(arg2_reg, 1351 Address(arg3_reg, in_bytes(DataLayout::bci_offset()))); 1352 addptr(arg2_reg, Address(rbx, Method::const_offset())); 1353 lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset())); 1354 cmpptr(arg2_reg, _bcp_register); 1355 jcc(Assembler::equal, verify_continue); 1356 // rbx: method 1357 // _bcp_register: bcp 1358 // c_rarg3: mdp 1359 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 1360 rbx, _bcp_register, arg3_reg); 1361 bind(verify_continue); 1362 pop(arg2_reg); 1363 pop(arg3_reg); 1364 pop(rbx); 1365 pop(rax); 1366 #endif // ASSERT 1367 } 1368 1369 1370 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 1371 int constant, 1372 Register value) { 1373 assert(ProfileInterpreter, "must be profiling interpreter"); 1374 Address data(mdp_in, constant); 1375 movptr(data, value); 1376 } 1377 1378 1379 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1380 int constant, 1381 bool decrement) { 1382 // Counter address 1383 Address data(mdp_in, constant); 1384 1385 increment_mdp_data_at(data, decrement); 1386 } 1387 1388 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 1389 bool decrement) { 1390 assert(ProfileInterpreter, "must be profiling interpreter"); 1391 // %%% this does 64bit counters at best it is wasting space 1392 // at worst it is a rare bug when counters overflow 1393 1394 if (decrement) { 1395 // Decrement the register. Set condition codes. 1396 addptr(data, (int32_t) -DataLayout::counter_increment); 1397 // If the decrement causes the counter to overflow, stay negative 1398 Label L; 1399 jcc(Assembler::negative, L); 1400 addptr(data, (int32_t) DataLayout::counter_increment); 1401 bind(L); 1402 } else { 1403 assert(DataLayout::counter_increment == 1, 1404 "flow-free idiom only works with 1"); 1405 // Increment the register. Set carry flag. 1406 addptr(data, DataLayout::counter_increment); 1407 // If the increment causes the counter to overflow, pull back by 1. 1408 sbbptr(data, (int32_t)0); 1409 } 1410 } 1411 1412 1413 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1414 Register reg, 1415 int constant, 1416 bool decrement) { 1417 Address data(mdp_in, reg, Address::times_1, constant); 1418 1419 increment_mdp_data_at(data, decrement); 1420 } 1421 1422 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 1423 int flag_byte_constant) { 1424 assert(ProfileInterpreter, "must be profiling interpreter"); 1425 int header_offset = in_bytes(DataLayout::header_offset()); 1426 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); 1427 // Set the flag 1428 orl(Address(mdp_in, header_offset), header_bits); 1429 } 1430 1431 1432 1433 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 1434 int offset, 1435 Register value, 1436 Register test_value_out, 1437 Label& not_equal_continue) { 1438 assert(ProfileInterpreter, "must be profiling interpreter"); 1439 if (test_value_out == noreg) { 1440 cmpptr(value, Address(mdp_in, offset)); 1441 } else { 1442 // Put the test value into a register, so caller can use it: 1443 movptr(test_value_out, Address(mdp_in, offset)); 1444 cmpptr(test_value_out, value); 1445 } 1446 jcc(Assembler::notEqual, not_equal_continue); 1447 } 1448 1449 1450 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1451 int offset_of_disp) { 1452 assert(ProfileInterpreter, "must be profiling interpreter"); 1453 Address disp_address(mdp_in, offset_of_disp); 1454 addptr(mdp_in, disp_address); 1455 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1456 } 1457 1458 1459 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1460 Register reg, 1461 int offset_of_disp) { 1462 assert(ProfileInterpreter, "must be profiling interpreter"); 1463 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); 1464 addptr(mdp_in, disp_address); 1465 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1466 } 1467 1468 1469 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 1470 int constant) { 1471 assert(ProfileInterpreter, "must be profiling interpreter"); 1472 addptr(mdp_in, constant); 1473 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1474 } 1475 1476 1477 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1478 assert(ProfileInterpreter, "must be profiling interpreter"); 1479 push(return_bci); // save/restore across call_VM 1480 call_VM(noreg, 1481 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1482 return_bci); 1483 pop(return_bci); 1484 } 1485 1486 1487 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1488 Register bumped_count) { 1489 if (ProfileInterpreter) { 1490 Label profile_continue; 1491 1492 // If no method data exists, go to profile_continue. 1493 // Otherwise, assign to mdp 1494 test_method_data_pointer(mdp, profile_continue); 1495 1496 // We are taking a branch. Increment the taken count. 1497 // We inline increment_mdp_data_at to return bumped_count in a register 1498 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1499 Address data(mdp, in_bytes(JumpData::taken_offset())); 1500 movptr(bumped_count, data); 1501 assert(DataLayout::counter_increment == 1, 1502 "flow-free idiom only works with 1"); 1503 addptr(bumped_count, DataLayout::counter_increment); 1504 sbbptr(bumped_count, 0); 1505 movptr(data, bumped_count); // Store back out 1506 1507 // The method data pointer needs to be updated to reflect the new target. 1508 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1509 bind(profile_continue); 1510 } 1511 } 1512 1513 1514 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1515 if (ProfileInterpreter) { 1516 Label profile_continue; 1517 1518 // If no method data exists, go to profile_continue. 1519 test_method_data_pointer(mdp, profile_continue); 1520 1521 // We are taking a branch. Increment the not taken count. 1522 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1523 1524 // The method data pointer needs to be updated to correspond to 1525 // the next bytecode 1526 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1527 bind(profile_continue); 1528 } 1529 } 1530 1531 void InterpreterMacroAssembler::profile_call(Register mdp) { 1532 if (ProfileInterpreter) { 1533 Label profile_continue; 1534 1535 // If no method data exists, go to profile_continue. 1536 test_method_data_pointer(mdp, profile_continue); 1537 1538 // We are making a call. Increment the count. 1539 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1540 1541 // The method data pointer needs to be updated to reflect the new target. 1542 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1543 bind(profile_continue); 1544 } 1545 } 1546 1547 1548 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1549 if (ProfileInterpreter) { 1550 Label profile_continue; 1551 1552 // If no method data exists, go to profile_continue. 1553 test_method_data_pointer(mdp, profile_continue); 1554 1555 // We are making a call. Increment the count. 1556 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1557 1558 // The method data pointer needs to be updated to reflect the new target. 1559 update_mdp_by_constant(mdp, 1560 in_bytes(VirtualCallData:: 1561 virtual_call_data_size())); 1562 bind(profile_continue); 1563 } 1564 } 1565 1566 1567 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1568 Register mdp, 1569 Register reg2, 1570 bool receiver_can_be_null) { 1571 if (ProfileInterpreter) { 1572 Label profile_continue; 1573 1574 // If no method data exists, go to profile_continue. 1575 test_method_data_pointer(mdp, profile_continue); 1576 1577 Label skip_receiver_profile; 1578 if (receiver_can_be_null) { 1579 Label not_null; 1580 testptr(receiver, receiver); 1581 jccb(Assembler::notZero, not_null); 1582 // We are making a call. Increment the count for null receiver. 1583 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1584 jmp(skip_receiver_profile); 1585 bind(not_null); 1586 } 1587 1588 // Record the receiver type. 1589 record_klass_in_profile(receiver, mdp, reg2, true); 1590 bind(skip_receiver_profile); 1591 1592 // The method data pointer needs to be updated to reflect the new target. 1593 #if INCLUDE_JVMCI 1594 if (MethodProfileWidth == 0) { 1595 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1596 } 1597 #else // INCLUDE_JVMCI 1598 update_mdp_by_constant(mdp, 1599 in_bytes(VirtualCallData:: 1600 virtual_call_data_size())); 1601 #endif // INCLUDE_JVMCI 1602 bind(profile_continue); 1603 } 1604 } 1605 1606 #if INCLUDE_JVMCI 1607 void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) { 1608 assert_different_registers(method, mdp, reg2); 1609 if (ProfileInterpreter && MethodProfileWidth > 0) { 1610 Label profile_continue; 1611 1612 // If no method data exists, go to profile_continue. 1613 test_method_data_pointer(mdp, profile_continue); 1614 1615 Label done; 1616 record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth, 1617 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1618 bind(done); 1619 1620 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1621 bind(profile_continue); 1622 } 1623 } 1624 #endif // INCLUDE_JVMCI 1625 1626 // This routine creates a state machine for updating the multi-row 1627 // type profile at a virtual call site (or other type-sensitive bytecode). 1628 // The machine visits each row (of receiver/count) until the receiver type 1629 // is found, or until it runs out of rows. At the same time, it remembers 1630 // the location of the first empty row. (An empty row records null for its 1631 // receiver, and can be allocated for a newly-observed receiver type.) 1632 // Because there are two degrees of freedom in the state, a simple linear 1633 // search will not work; it must be a decision tree. Hence this helper 1634 // function is recursive, to generate the required tree structured code. 1635 // It's the interpreter, so we are trading off code space for speed. 1636 // See below for example code. 1637 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1638 Register receiver, Register mdp, 1639 Register reg2, int start_row, 1640 Label& done, bool is_virtual_call) { 1641 if (TypeProfileWidth == 0) { 1642 if (is_virtual_call) { 1643 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1644 } 1645 #if INCLUDE_JVMCI 1646 else if (EnableJVMCI) { 1647 increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset())); 1648 } 1649 #endif // INCLUDE_JVMCI 1650 } else { 1651 int non_profiled_offset = -1; 1652 if (is_virtual_call) { 1653 non_profiled_offset = in_bytes(CounterData::count_offset()); 1654 } 1655 #if INCLUDE_JVMCI 1656 else if (EnableJVMCI) { 1657 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1658 } 1659 #endif // INCLUDE_JVMCI 1660 1661 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, 1662 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1663 } 1664 } 1665 1666 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, 1667 Register reg2, int start_row, Label& done, int total_rows, 1668 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1669 int non_profiled_offset) { 1670 int last_row = total_rows - 1; 1671 assert(start_row <= last_row, "must be work left to do"); 1672 // Test this row for both the item and for null. 1673 // Take any of three different outcomes: 1674 // 1. found item => increment count and goto done 1675 // 2. found null => keep looking for case 1, maybe allocate this cell 1676 // 3. found something else => keep looking for cases 1 and 2 1677 // Case 3 is handled by a recursive call. 1678 for (int row = start_row; row <= last_row; row++) { 1679 Label next_test; 1680 bool test_for_null_also = (row == start_row); 1681 1682 // See if the item is item[n]. 1683 int item_offset = in_bytes(item_offset_fn(row)); 1684 test_mdp_data_at(mdp, item_offset, item, 1685 (test_for_null_also ? reg2 : noreg), 1686 next_test); 1687 // (Reg2 now contains the item from the CallData.) 1688 1689 // The item is item[n]. Increment count[n]. 1690 int count_offset = in_bytes(item_count_offset_fn(row)); 1691 increment_mdp_data_at(mdp, count_offset); 1692 jmp(done); 1693 bind(next_test); 1694 1695 if (test_for_null_also) { 1696 Label found_null; 1697 // Failed the equality check on item[n]... Test for null. 1698 testptr(reg2, reg2); 1699 if (start_row == last_row) { 1700 // The only thing left to do is handle the null case. 1701 if (non_profiled_offset >= 0) { 1702 jccb(Assembler::zero, found_null); 1703 // Item did not match any saved item and there is no empty row for it. 1704 // Increment total counter to indicate polymorphic case. 1705 increment_mdp_data_at(mdp, non_profiled_offset); 1706 jmp(done); 1707 bind(found_null); 1708 } else { 1709 jcc(Assembler::notZero, done); 1710 } 1711 break; 1712 } 1713 // Since null is rare, make it be the branch-taken case. 1714 jcc(Assembler::zero, found_null); 1715 1716 // Put all the "Case 3" tests here. 1717 record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, 1718 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1719 1720 // Found a null. Keep searching for a matching item, 1721 // but remember that this is an empty (unused) slot. 1722 bind(found_null); 1723 } 1724 } 1725 1726 // In the fall-through case, we found no matching item, but we 1727 // observed the item[start_row] is NULL. 1728 1729 // Fill in the item field and increment the count. 1730 int item_offset = in_bytes(item_offset_fn(start_row)); 1731 set_mdp_data_at(mdp, item_offset, item); 1732 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1733 movl(reg2, DataLayout::counter_increment); 1734 set_mdp_data_at(mdp, count_offset, reg2); 1735 if (start_row > 0) { 1736 jmp(done); 1737 } 1738 } 1739 1740 // Example state machine code for three profile rows: 1741 // // main copy of decision tree, rooted at row[1] 1742 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1743 // if (row[0].rec != NULL) { 1744 // // inner copy of decision tree, rooted at row[1] 1745 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1746 // if (row[1].rec != NULL) { 1747 // // degenerate decision tree, rooted at row[2] 1748 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1749 // if (row[2].rec != NULL) { count.incr(); goto done; } // overflow 1750 // row[2].init(rec); goto done; 1751 // } else { 1752 // // remember row[1] is empty 1753 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1754 // row[1].init(rec); goto done; 1755 // } 1756 // } else { 1757 // // remember row[0] is empty 1758 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1759 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1760 // row[0].init(rec); goto done; 1761 // } 1762 // done: 1763 1764 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1765 Register mdp, Register reg2, 1766 bool is_virtual_call) { 1767 assert(ProfileInterpreter, "must be profiling"); 1768 Label done; 1769 1770 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1771 1772 bind (done); 1773 } 1774 1775 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1776 Register mdp) { 1777 if (ProfileInterpreter) { 1778 Label profile_continue; 1779 uint row; 1780 1781 // If no method data exists, go to profile_continue. 1782 test_method_data_pointer(mdp, profile_continue); 1783 1784 // Update the total ret count. 1785 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1786 1787 for (row = 0; row < RetData::row_limit(); row++) { 1788 Label next_test; 1789 1790 // See if return_bci is equal to bci[n]: 1791 test_mdp_data_at(mdp, 1792 in_bytes(RetData::bci_offset(row)), 1793 return_bci, noreg, 1794 next_test); 1795 1796 // return_bci is equal to bci[n]. Increment the count. 1797 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1798 1799 // The method data pointer needs to be updated to reflect the new target. 1800 update_mdp_by_offset(mdp, 1801 in_bytes(RetData::bci_displacement_offset(row))); 1802 jmp(profile_continue); 1803 bind(next_test); 1804 } 1805 1806 update_mdp_for_ret(return_bci); 1807 1808 bind(profile_continue); 1809 } 1810 } 1811 1812 1813 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1814 if (ProfileInterpreter) { 1815 Label profile_continue; 1816 1817 // If no method data exists, go to profile_continue. 1818 test_method_data_pointer(mdp, profile_continue); 1819 1820 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1821 1822 // The method data pointer needs to be updated. 1823 int mdp_delta = in_bytes(BitData::bit_data_size()); 1824 if (TypeProfileCasts) { 1825 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1826 } 1827 update_mdp_by_constant(mdp, mdp_delta); 1828 1829 bind(profile_continue); 1830 } 1831 } 1832 1833 1834 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1835 if (ProfileInterpreter && TypeProfileCasts) { 1836 Label profile_continue; 1837 1838 // If no method data exists, go to profile_continue. 1839 test_method_data_pointer(mdp, profile_continue); 1840 1841 int count_offset = in_bytes(CounterData::count_offset()); 1842 // Back up the address, since we have already bumped the mdp. 1843 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1844 1845 // *Decrement* the counter. We expect to see zero or small negatives. 1846 increment_mdp_data_at(mdp, count_offset, true); 1847 1848 bind (profile_continue); 1849 } 1850 } 1851 1852 1853 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1854 if (ProfileInterpreter) { 1855 Label profile_continue; 1856 1857 // If no method data exists, go to profile_continue. 1858 test_method_data_pointer(mdp, profile_continue); 1859 1860 // The method data pointer needs to be updated. 1861 int mdp_delta = in_bytes(BitData::bit_data_size()); 1862 if (TypeProfileCasts) { 1863 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1864 1865 // Record the object type. 1866 record_klass_in_profile(klass, mdp, reg2, false); 1867 NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");) 1868 NOT_LP64(restore_locals();) // Restore EDI 1869 } 1870 update_mdp_by_constant(mdp, mdp_delta); 1871 1872 bind(profile_continue); 1873 } 1874 } 1875 1876 1877 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1878 if (ProfileInterpreter) { 1879 Label profile_continue; 1880 1881 // If no method data exists, go to profile_continue. 1882 test_method_data_pointer(mdp, profile_continue); 1883 1884 // Update the default case count 1885 increment_mdp_data_at(mdp, 1886 in_bytes(MultiBranchData::default_count_offset())); 1887 1888 // The method data pointer needs to be updated. 1889 update_mdp_by_offset(mdp, 1890 in_bytes(MultiBranchData:: 1891 default_displacement_offset())); 1892 1893 bind(profile_continue); 1894 } 1895 } 1896 1897 1898 void InterpreterMacroAssembler::profile_switch_case(Register index, 1899 Register mdp, 1900 Register reg2) { 1901 if (ProfileInterpreter) { 1902 Label profile_continue; 1903 1904 // If no method data exists, go to profile_continue. 1905 test_method_data_pointer(mdp, profile_continue); 1906 1907 // Build the base (index * per_case_size_in_bytes()) + 1908 // case_array_offset_in_bytes() 1909 movl(reg2, in_bytes(MultiBranchData::per_case_size())); 1910 imulptr(index, reg2); // XXX l ? 1911 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? 1912 1913 // Update the case count 1914 increment_mdp_data_at(mdp, 1915 index, 1916 in_bytes(MultiBranchData::relative_count_offset())); 1917 1918 // The method data pointer needs to be updated. 1919 update_mdp_by_offset(mdp, 1920 index, 1921 in_bytes(MultiBranchData:: 1922 relative_displacement_offset())); 1923 1924 bind(profile_continue); 1925 } 1926 } 1927 1928 1929 1930 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 1931 if (state == atos) { 1932 MacroAssembler::verify_oop(reg); 1933 } 1934 } 1935 1936 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 1937 #ifndef _LP64 1938 if ((state == ftos && UseSSE < 1) || 1939 (state == dtos && UseSSE < 2)) { 1940 MacroAssembler::verify_FPU(stack_depth); 1941 } 1942 #endif 1943 } 1944 1945 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1946 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1947 int increment, Address mask, 1948 Register scratch, bool preloaded, 1949 Condition cond, Label* where) { 1950 if (!preloaded) { 1951 movl(scratch, counter_addr); 1952 } 1953 incrementl(scratch, increment); 1954 movl(counter_addr, scratch); 1955 andl(scratch, mask); 1956 jcc(cond, *where); 1957 } 1958 1959 void InterpreterMacroAssembler::notify_method_entry() { 1960 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1961 // track stack depth. If it is possible to enter interp_only_mode we add 1962 // the code to check if the event should be sent. 1963 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1964 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 1965 if (JvmtiExport::can_post_interpreter_events()) { 1966 Label L; 1967 NOT_LP64(get_thread(rthread);) 1968 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 1969 testl(rdx, rdx); 1970 jcc(Assembler::zero, L); 1971 call_VM(noreg, CAST_FROM_FN_PTR(address, 1972 InterpreterRuntime::post_method_entry)); 1973 bind(L); 1974 } 1975 1976 { 1977 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1978 NOT_LP64(get_thread(rthread);) 1979 get_method(rarg); 1980 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1981 rthread, rarg); 1982 } 1983 1984 // RedefineClasses() tracing support for obsolete method entry 1985 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1986 NOT_LP64(get_thread(rthread);) 1987 get_method(rarg); 1988 call_VM_leaf( 1989 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1990 rthread, rarg); 1991 } 1992 } 1993 1994 1995 void InterpreterMacroAssembler::notify_method_exit( 1996 TosState state, NotifyMethodExitMode mode) { 1997 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1998 // track stack depth. If it is possible to enter interp_only_mode we add 1999 // the code to check if the event should be sent. 2000 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 2001 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 2002 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2003 Label L; 2004 // Note: frame::interpreter_frame_result has a dependency on how the 2005 // method result is saved across the call to post_method_exit. If this 2006 // is changed then the interpreter_frame_result implementation will 2007 // need to be updated too. 2008 2009 // template interpreter will leave the result on the top of the stack. 2010 push(state); 2011 NOT_LP64(get_thread(rthread);) 2012 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 2013 testl(rdx, rdx); 2014 jcc(Assembler::zero, L); 2015 call_VM(noreg, 2016 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2017 bind(L); 2018 pop(state); 2019 } 2020 2021 { 2022 SkipIfEqual skip(this, &DTraceMethodProbes, false); 2023 push(state); 2024 NOT_LP64(get_thread(rthread);) 2025 get_method(rarg); 2026 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2027 rthread, rarg); 2028 pop(state); 2029 } 2030 }