1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_x86.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "logging/log.hpp" 30 #include "oops/arrayOop.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/method.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/thread.inline.hpp" 40 41 // Implementation of InterpreterMacroAssembler 42 43 void InterpreterMacroAssembler::jump_to_entry(address entry) { 44 assert(entry, "Entry must have been generated by now"); 45 jump(RuntimeAddress(entry)); 46 } 47 48 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { 49 Label update, next, none; 50 51 verify_oop(obj); 52 53 testptr(obj, obj); 54 jccb(Assembler::notZero, update); 55 orptr(mdo_addr, TypeEntries::null_seen); 56 jmpb(next); 57 58 bind(update); 59 load_klass(obj, obj); 60 61 xorptr(obj, mdo_addr); 62 testptr(obj, TypeEntries::type_klass_mask); 63 jccb(Assembler::zero, next); // klass seen before, nothing to 64 // do. The unknown bit may have been 65 // set already but no need to check. 66 67 testptr(obj, TypeEntries::type_unknown); 68 jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 69 70 cmpptr(mdo_addr, 0); 71 jccb(Assembler::equal, none); 72 cmpptr(mdo_addr, TypeEntries::null_seen); 73 jccb(Assembler::equal, none); 74 // There is a chance that the checks above (re-reading profiling 75 // data from memory) fail if another thread has just set the 76 // profiling to this obj's klass 77 xorptr(obj, mdo_addr); 78 testptr(obj, TypeEntries::type_klass_mask); 79 jccb(Assembler::zero, next); 80 81 // different than before. Cannot keep accurate profile. 82 orptr(mdo_addr, TypeEntries::type_unknown); 83 jmpb(next); 84 85 bind(none); 86 // first time here. Set profile type. 87 movptr(mdo_addr, obj); 88 89 bind(next); 90 } 91 92 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { 93 if (!ProfileInterpreter) { 94 return; 95 } 96 97 if (MethodData::profile_arguments() || MethodData::profile_return()) { 98 Label profile_continue; 99 100 test_method_data_pointer(mdp, profile_continue); 101 102 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 103 104 cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 105 jcc(Assembler::notEqual, profile_continue); 106 107 if (MethodData::profile_arguments()) { 108 Label done; 109 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 110 addptr(mdp, off_to_args); 111 112 for (int i = 0; i < TypeProfileArgsLimit; i++) { 113 if (i > 0 || MethodData::profile_return()) { 114 // If return value type is profiled we may have no argument to profile 115 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 116 subl(tmp, i*TypeStackSlotEntries::per_arg_count()); 117 cmpl(tmp, TypeStackSlotEntries::per_arg_count()); 118 jcc(Assembler::less, done); 119 } 120 movptr(tmp, Address(callee, Method::const_offset())); 121 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); 122 // stack offset o (zero based) from the start of the argument 123 // list, for n arguments translates into offset n - o - 1 from 124 // the end of the argument list 125 subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args)); 126 subl(tmp, 1); 127 Address arg_addr = argument_address(tmp); 128 movptr(tmp, arg_addr); 129 130 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 131 profile_obj_type(tmp, mdo_arg_addr); 132 133 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 134 addptr(mdp, to_add); 135 off_to_args += to_add; 136 } 137 138 if (MethodData::profile_return()) { 139 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 140 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 141 } 142 143 bind(done); 144 145 if (MethodData::profile_return()) { 146 // We're right after the type profile for the last 147 // argument. tmp is the number of cells left in the 148 // CallTypeData/VirtualCallTypeData to reach its end. Non null 149 // if there's a return to profile. 150 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 151 shll(tmp, exact_log2(DataLayout::cell_size)); 152 addptr(mdp, tmp); 153 } 154 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp); 155 } else { 156 assert(MethodData::profile_return(), "either profile call args or call ret"); 157 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); 158 } 159 160 // mdp points right after the end of the 161 // CallTypeData/VirtualCallTypeData, right after the cells for the 162 // return value type if there's one 163 164 bind(profile_continue); 165 } 166 } 167 168 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { 169 assert_different_registers(mdp, ret, tmp, _bcp_register); 170 if (ProfileInterpreter && MethodData::profile_return()) { 171 Label profile_continue, done; 172 173 test_method_data_pointer(mdp, profile_continue); 174 175 if (MethodData::profile_return_jsr292_only()) { 176 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 177 178 // If we don't profile all invoke bytecodes we must make sure 179 // it's a bytecode we indeed profile. We can't go back to the 180 // begining of the ProfileData we intend to update to check its 181 // type because we're right after it and we don't known its 182 // length 183 Label do_profile; 184 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic); 185 jcc(Assembler::equal, do_profile); 186 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle); 187 jcc(Assembler::equal, do_profile); 188 get_method(tmp); 189 cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm); 190 jcc(Assembler::notEqual, profile_continue); 191 192 bind(do_profile); 193 } 194 195 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); 196 mov(tmp, ret); 197 profile_obj_type(tmp, mdo_ret_addr); 198 199 bind(profile_continue); 200 } 201 } 202 203 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { 204 if (ProfileInterpreter && MethodData::profile_parameters()) { 205 Label profile_continue, done; 206 207 test_method_data_pointer(mdp, profile_continue); 208 209 // Load the offset of the area within the MDO used for 210 // parameters. If it's negative we're not profiling any parameters 211 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); 212 testl(tmp1, tmp1); 213 jcc(Assembler::negative, profile_continue); 214 215 // Compute a pointer to the area for parameters from the offset 216 // and move the pointer to the slot for the last 217 // parameters. Collect profiling from last parameter down. 218 // mdo start + parameters offset + array length - 1 219 addptr(mdp, tmp1); 220 movptr(tmp1, Address(mdp, ArrayData::array_len_offset())); 221 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 222 223 Label loop; 224 bind(loop); 225 226 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 227 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 228 Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size); 229 Address arg_off(mdp, tmp1, per_arg_scale, off_base); 230 Address arg_type(mdp, tmp1, per_arg_scale, type_base); 231 232 // load offset on the stack from the slot for this parameter 233 movptr(tmp2, arg_off); 234 negptr(tmp2); 235 // read the parameter from the local area 236 movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale())); 237 238 // profile the parameter 239 profile_obj_type(tmp2, arg_type); 240 241 // go to next parameter 242 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 243 jcc(Assembler::positive, loop); 244 245 bind(profile_continue); 246 } 247 } 248 249 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 250 int number_of_arguments) { 251 // interpreter specific 252 // 253 // Note: No need to save/restore bcp & locals registers 254 // since these are callee saved registers and no blocking/ 255 // GC can happen in leaf calls. 256 // Further Note: DO NOT save/restore bcp/locals. If a caller has 257 // already saved them so that it can use rsi/rdi as temporaries 258 // then a save/restore here will DESTROY the copy the caller 259 // saved! There used to be a save_bcp() that only happened in 260 // the ASSERT path (no restore_bcp). Which caused bizarre failures 261 // when jvm built with ASSERTs. 262 #ifdef ASSERT 263 { 264 Label L; 265 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 266 jcc(Assembler::equal, L); 267 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 268 " last_sp != NULL"); 269 bind(L); 270 } 271 #endif 272 // super call 273 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 274 // interpreter specific 275 // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals 276 // but since they may not have been saved (and we don't want to 277 // save them here (see note above) the assert is invalid. 278 } 279 280 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 281 Register java_thread, 282 Register last_java_sp, 283 address entry_point, 284 int number_of_arguments, 285 bool check_exceptions) { 286 // interpreter specific 287 // 288 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 289 // really make a difference for these runtime calls, since they are 290 // slow anyway. Btw., bcp must be saved/restored since it may change 291 // due to GC. 292 NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");) 293 save_bcp(); 294 #ifdef ASSERT 295 { 296 Label L; 297 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 298 jcc(Assembler::equal, L); 299 stop("InterpreterMacroAssembler::call_VM_base:" 300 " last_sp != NULL"); 301 bind(L); 302 } 303 #endif /* ASSERT */ 304 // super call 305 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 306 entry_point, number_of_arguments, 307 check_exceptions); 308 // interpreter specific 309 restore_bcp(); 310 restore_locals(); 311 } 312 313 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 314 if (JvmtiExport::can_pop_frame()) { 315 Label L; 316 // Initiate popframe handling only if it is not already being 317 // processed. If the flag has the popframe_processing bit set, it 318 // means that this code is called *during* popframe handling - we 319 // don't want to reenter. 320 // This method is only called just after the call into the vm in 321 // call_VM_base, so the arg registers are available. 322 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit 323 LP64_ONLY(c_rarg0); 324 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset())); 325 testl(pop_cond, JavaThread::popframe_pending_bit); 326 jcc(Assembler::zero, L); 327 testl(pop_cond, JavaThread::popframe_processing_bit); 328 jcc(Assembler::notZero, L); 329 // Call Interpreter::remove_activation_preserving_args_entry() to get the 330 // address of the same-named entrypoint in the generated interpreter code. 331 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 332 jmp(rax); 333 bind(L); 334 NOT_LP64(get_thread(java_thread);) 335 } 336 } 337 338 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 339 Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 340 NOT_LP64(get_thread(thread);) 341 movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 342 const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); 343 const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); 344 const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); 345 #ifdef _LP64 346 switch (state) { 347 case qtos: // fall through 348 case atos: movptr(rax, oop_addr); 349 movptr(oop_addr, (int32_t)NULL_WORD); 350 verify_oop(rax, state); break; 351 case ltos: movptr(rax, val_addr); break; 352 case btos: // fall through 353 case ztos: // fall through 354 case ctos: // fall through 355 case stos: // fall through 356 case itos: movl(rax, val_addr); break; 357 case ftos: load_float(val_addr); break; 358 case dtos: load_double(val_addr); break; 359 case vtos: /* nothing to do */ break; 360 default : ShouldNotReachHere(); 361 } 362 // Clean up tos value in the thread object 363 movl(tos_addr, (int) ilgl); 364 movl(val_addr, (int32_t) NULL_WORD); 365 #else 366 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() 367 + in_ByteSize(wordSize)); 368 switch (state) { 369 case qtos: // fall through 370 case atos: movptr(rax, oop_addr); 371 movptr(oop_addr, NULL_WORD); 372 verify_oop(rax, state); break; 373 case ltos: 374 movl(rdx, val_addr1); // fall through 375 case btos: // fall through 376 case ztos: // fall through 377 case ctos: // fall through 378 case stos: // fall through 379 case itos: movl(rax, val_addr); break; 380 case ftos: load_float(val_addr); break; 381 case dtos: load_double(val_addr); break; 382 case vtos: /* nothing to do */ break; 383 default : ShouldNotReachHere(); 384 } 385 #endif // _LP64 386 // Clean up tos value in the thread object 387 movl(tos_addr, (int32_t) ilgl); 388 movptr(val_addr, NULL_WORD); 389 NOT_LP64(movptr(val_addr1, NULL_WORD);) 390 } 391 392 393 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 394 if (JvmtiExport::can_force_early_return()) { 395 Label L; 396 Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread); 397 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread); 398 399 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 400 testptr(tmp, tmp); 401 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; 402 403 // Initiate earlyret handling only if it is not already being processed. 404 // If the flag has the earlyret_processing bit set, it means that this code 405 // is called *during* earlyret handling - we don't want to reenter. 406 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset())); 407 cmpl(tmp, JvmtiThreadState::earlyret_pending); 408 jcc(Assembler::notEqual, L); 409 410 // Call Interpreter::remove_activation_early_entry() to get the address of the 411 // same-named entrypoint in the generated interpreter code. 412 NOT_LP64(get_thread(java_thread);) 413 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 414 #ifdef _LP64 415 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 416 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp); 417 #else 418 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 419 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); 420 #endif // _LP64 421 jmp(rax); 422 bind(L); 423 NOT_LP64(get_thread(java_thread);) 424 } 425 } 426 427 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 428 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 429 load_unsigned_short(reg, Address(_bcp_register, bcp_offset)); 430 bswapl(reg); 431 shrl(reg, 16); 432 } 433 434 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 435 int bcp_offset, 436 size_t index_size) { 437 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 438 if (index_size == sizeof(u2)) { 439 load_unsigned_short(index, Address(_bcp_register, bcp_offset)); 440 } else if (index_size == sizeof(u4)) { 441 movl(index, Address(_bcp_register, bcp_offset)); 442 // Check if the secondary index definition is still ~x, otherwise 443 // we have to change the following assembler code to calculate the 444 // plain index. 445 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 446 notl(index); // convert to plain index 447 } else if (index_size == sizeof(u1)) { 448 load_unsigned_byte(index, Address(_bcp_register, bcp_offset)); 449 } else { 450 ShouldNotReachHere(); 451 } 452 } 453 454 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 455 Register index, 456 int bcp_offset, 457 size_t index_size) { 458 assert_different_registers(cache, index); 459 get_cache_index_at_bcp(index, bcp_offset, index_size); 460 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 461 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 462 // convert from field index to ConstantPoolCacheEntry index 463 assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line"); 464 shll(index, 2); 465 } 466 467 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 468 Register index, 469 Register bytecode, 470 int byte_no, 471 int bcp_offset, 472 size_t index_size) { 473 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 474 // We use a 32-bit load here since the layout of 64-bit words on 475 // little-endian machines allow us that. 476 movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 477 const int shift_count = (1 + byte_no) * BitsPerByte; 478 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 479 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 480 "correct shift count"); 481 shrl(bytecode, shift_count); 482 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 483 andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); 484 } 485 486 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 487 Register tmp, 488 int bcp_offset, 489 size_t index_size) { 490 assert(cache != tmp, "must use different register"); 491 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 492 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 493 // convert from field index to ConstantPoolCacheEntry index 494 // and from word offset to byte offset 495 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 496 shll(tmp, 2 + LogBytesPerWord); 497 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 498 // skip past the header 499 addptr(cache, in_bytes(ConstantPoolCache::base_offset())); 500 addptr(cache, tmp); // construct pointer to cache entry 501 } 502 503 // Load object from cpool->resolved_references(index) 504 void InterpreterMacroAssembler::load_resolved_reference_at_index( 505 Register result, Register index) { 506 assert_different_registers(result, index); 507 // convert from field index to resolved_references() index and from 508 // word index to byte offset. Since this is a java object, it can be compressed 509 Register tmp = index; // reuse 510 shll(tmp, LogBytesPerHeapOop); 511 512 get_constant_pool(result); 513 // load pointer for resolved_references[] objArray 514 movptr(result, Address(result, ConstantPool::cache_offset_in_bytes())); 515 movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); 516 // JNIHandles::resolve(obj); 517 movptr(result, Address(result, 0)); 518 // Add in the index 519 addptr(result, tmp); 520 load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 521 } 522 523 // load cpool->resolved_klass_at(index) 524 void InterpreterMacroAssembler::load_resolved_klass_at_index(Register cpool, 525 Register index, Register klass) { 526 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool))); 527 Register resolved_klasses = cpool; 528 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); 529 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes())); 530 } 531 532 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 533 // subtype of super_klass. 534 // 535 // Args: 536 // rax: superklass 537 // Rsub_klass: subklass 538 // 539 // Kills: 540 // rcx, rdi 541 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 542 Label& ok_is_subtype) { 543 assert(Rsub_klass != rax, "rax holds superklass"); 544 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");) 545 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");) 546 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); 547 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); 548 549 // Profile the not-null value's klass. 550 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi 551 552 // Do the check. 553 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx 554 555 // Profile the failure of the check. 556 profile_typecheck_failed(rcx); // blows rcx 557 } 558 559 560 #ifndef _LP64 561 void InterpreterMacroAssembler::f2ieee() { 562 if (IEEEPrecision) { 563 fstp_s(Address(rsp, 0)); 564 fld_s(Address(rsp, 0)); 565 } 566 } 567 568 569 void InterpreterMacroAssembler::d2ieee() { 570 if (IEEEPrecision) { 571 fstp_d(Address(rsp, 0)); 572 fld_d(Address(rsp, 0)); 573 } 574 } 575 #endif // _LP64 576 577 // Java Expression Stack 578 579 void InterpreterMacroAssembler::pop_ptr(Register r) { 580 pop(r); 581 } 582 583 void InterpreterMacroAssembler::push_ptr(Register r) { 584 push(r); 585 } 586 587 void InterpreterMacroAssembler::push_i(Register r) { 588 push(r); 589 } 590 591 void InterpreterMacroAssembler::push_f(XMMRegister r) { 592 subptr(rsp, wordSize); 593 movflt(Address(rsp, 0), r); 594 } 595 596 void InterpreterMacroAssembler::pop_f(XMMRegister r) { 597 movflt(r, Address(rsp, 0)); 598 addptr(rsp, wordSize); 599 } 600 601 void InterpreterMacroAssembler::push_d(XMMRegister r) { 602 subptr(rsp, 2 * wordSize); 603 movdbl(Address(rsp, 0), r); 604 } 605 606 void InterpreterMacroAssembler::pop_d(XMMRegister r) { 607 movdbl(r, Address(rsp, 0)); 608 addptr(rsp, 2 * Interpreter::stackElementSize); 609 } 610 611 #ifdef _LP64 612 void InterpreterMacroAssembler::pop_i(Register r) { 613 // XXX can't use pop currently, upper half non clean 614 movl(r, Address(rsp, 0)); 615 addptr(rsp, wordSize); 616 } 617 618 void InterpreterMacroAssembler::pop_l(Register r) { 619 movq(r, Address(rsp, 0)); 620 addptr(rsp, 2 * Interpreter::stackElementSize); 621 } 622 623 void InterpreterMacroAssembler::push_l(Register r) { 624 subptr(rsp, 2 * wordSize); 625 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r ); 626 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD ); 627 } 628 629 void InterpreterMacroAssembler::pop(TosState state) { 630 switch (state) { 631 case qtos: // Fall through 632 case atos: pop_ptr(); break; 633 case btos: 634 case ztos: 635 case ctos: 636 case stos: 637 case itos: pop_i(); break; 638 case ltos: pop_l(); break; 639 case ftos: pop_f(xmm0); break; 640 case dtos: pop_d(xmm0); break; 641 case vtos: /* nothing to do */ break; 642 default: ShouldNotReachHere(); 643 } 644 verify_oop(rax, state); 645 } 646 647 void InterpreterMacroAssembler::push(TosState state) { 648 verify_oop(rax, state); 649 switch (state) { 650 case qtos: // Fall through 651 case atos: push_ptr(); break; 652 case btos: 653 case ztos: 654 case ctos: 655 case stos: 656 case itos: push_i(); break; 657 case ltos: push_l(); break; 658 case ftos: push_f(xmm0); break; 659 case dtos: push_d(xmm0); break; 660 case vtos: /* nothing to do */ break; 661 default : ShouldNotReachHere(); 662 } 663 } 664 #else 665 void InterpreterMacroAssembler::pop_i(Register r) { 666 pop(r); 667 } 668 669 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 670 pop(lo); 671 pop(hi); 672 } 673 674 void InterpreterMacroAssembler::pop_f() { 675 fld_s(Address(rsp, 0)); 676 addptr(rsp, 1 * wordSize); 677 } 678 679 void InterpreterMacroAssembler::pop_d() { 680 fld_d(Address(rsp, 0)); 681 addptr(rsp, 2 * wordSize); 682 } 683 684 685 void InterpreterMacroAssembler::pop(TosState state) { 686 switch (state) { 687 case qtos: // fall through 688 case atos: pop_ptr(rax); break; 689 case btos: // fall through 690 case ztos: // fall through 691 case ctos: // fall through 692 case stos: // fall through 693 case itos: pop_i(rax); break; 694 case ltos: pop_l(rax, rdx); break; 695 case ftos: 696 if (UseSSE >= 1) { 697 pop_f(xmm0); 698 } else { 699 pop_f(); 700 } 701 break; 702 case dtos: 703 if (UseSSE >= 2) { 704 pop_d(xmm0); 705 } else { 706 pop_d(); 707 } 708 break; 709 case vtos: /* nothing to do */ break; 710 default : ShouldNotReachHere(); 711 } 712 verify_oop(rax, state); 713 } 714 715 716 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 717 push(hi); 718 push(lo); 719 } 720 721 void InterpreterMacroAssembler::push_f() { 722 // Do not schedule for no AGI! Never write beyond rsp! 723 subptr(rsp, 1 * wordSize); 724 fstp_s(Address(rsp, 0)); 725 } 726 727 void InterpreterMacroAssembler::push_d() { 728 // Do not schedule for no AGI! Never write beyond rsp! 729 subptr(rsp, 2 * wordSize); 730 fstp_d(Address(rsp, 0)); 731 } 732 733 734 void InterpreterMacroAssembler::push(TosState state) { 735 verify_oop(rax, state); 736 switch (state) { 737 case qtos: // fall through 738 case atos: push_ptr(rax); break; 739 case btos: // fall through 740 case ztos: // fall through 741 case ctos: // fall through 742 case stos: // fall through 743 case itos: push_i(rax); break; 744 case ltos: push_l(rax, rdx); break; 745 case ftos: 746 if (UseSSE >= 1) { 747 push_f(xmm0); 748 } else { 749 push_f(); 750 } 751 break; 752 case dtos: 753 if (UseSSE >= 2) { 754 push_d(xmm0); 755 } else { 756 push_d(); 757 } 758 break; 759 case vtos: /* nothing to do */ break; 760 default : ShouldNotReachHere(); 761 } 762 } 763 #endif // _LP64 764 765 766 // Helpers for swap and dup 767 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 768 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); 769 } 770 771 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 772 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); 773 } 774 775 776 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 777 // set sender sp 778 lea(_bcp_register, Address(rsp, wordSize)); 779 // record last_sp 780 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register); 781 } 782 783 784 // Jump to from_interpreted entry of a call unless single stepping is possible 785 // in this thread in which case we must call the i2i entry 786 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 787 prepare_to_jump_from_interpreted(); 788 789 if (JvmtiExport::can_post_interpreter_events()) { 790 Label run_compiled_code; 791 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 792 // compiled code in threads for which the event is enabled. Check here for 793 // interp_only_mode if these events CAN be enabled. 794 // interp_only is an int, on little endian it is sufficient to test the byte only 795 // Is a cmpl faster? 796 LP64_ONLY(temp = r15_thread;) 797 NOT_LP64(get_thread(temp);) 798 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 799 jccb(Assembler::zero, run_compiled_code); 800 jmp(Address(method, Method::interpreter_entry_offset())); 801 bind(run_compiled_code); 802 } 803 804 jmp(Address(method, Method::from_interpreted_offset())); 805 } 806 807 // The following two routines provide a hook so that an implementation 808 // can schedule the dispatch in two parts. x86 does not do this. 809 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 810 // Nothing x86 specific to be done here 811 } 812 813 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 814 dispatch_next(state, step); 815 } 816 817 void InterpreterMacroAssembler::dispatch_base(TosState state, 818 address* table, 819 bool verifyoop) { 820 verify_FPU(1, state); 821 if (VerifyActivationFrameSize) { 822 Label L; 823 mov(rcx, rbp); 824 subptr(rcx, rsp); 825 int32_t min_frame_size = 826 (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * 827 wordSize; 828 cmpptr(rcx, (int32_t)min_frame_size); 829 jcc(Assembler::greaterEqual, L); 830 stop("broken stack frame"); 831 bind(L); 832 } 833 if (verifyoop) { 834 verify_oop(rax, state); 835 } 836 #ifdef _LP64 837 lea(rscratch1, ExternalAddress((address)table)); 838 jmp(Address(rscratch1, rbx, Address::times_8)); 839 #else 840 Address index(noreg, rbx, Address::times_ptr); 841 ExternalAddress tbl((address)table); 842 ArrayAddress dispatch(tbl, index); 843 jump(dispatch); 844 #endif // _LP64 845 } 846 847 void InterpreterMacroAssembler::dispatch_only(TosState state) { 848 dispatch_base(state, Interpreter::dispatch_table(state)); 849 } 850 851 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 852 dispatch_base(state, Interpreter::normal_table(state)); 853 } 854 855 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 856 dispatch_base(state, Interpreter::normal_table(state), false); 857 } 858 859 860 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { 861 // load next bytecode (load before advancing _bcp_register to prevent AGI) 862 load_unsigned_byte(rbx, Address(_bcp_register, step)); 863 // advance _bcp_register 864 increment(_bcp_register, step); 865 dispatch_base(state, Interpreter::dispatch_table(state)); 866 } 867 868 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 869 // load current bytecode 870 load_unsigned_byte(rbx, Address(_bcp_register, 0)); 871 dispatch_base(state, table); 872 } 873 874 void InterpreterMacroAssembler::narrow(Register result) { 875 876 // Get method->_constMethod->_result_type 877 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 878 movptr(rcx, Address(rcx, Method::const_offset())); 879 load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset())); 880 881 Label done, notBool, notByte, notChar; 882 883 // common case first 884 cmpl(rcx, T_INT); 885 jcc(Assembler::equal, done); 886 887 // mask integer result to narrower return type. 888 cmpl(rcx, T_BOOLEAN); 889 jcc(Assembler::notEqual, notBool); 890 andl(result, 0x1); 891 jmp(done); 892 893 bind(notBool); 894 cmpl(rcx, T_BYTE); 895 jcc(Assembler::notEqual, notByte); 896 LP64_ONLY(movsbl(result, result);) 897 NOT_LP64(shll(result, 24);) // truncate upper 24 bits 898 NOT_LP64(sarl(result, 24);) // and sign-extend byte 899 jmp(done); 900 901 bind(notByte); 902 cmpl(rcx, T_CHAR); 903 jcc(Assembler::notEqual, notChar); 904 LP64_ONLY(movzwl(result, result);) 905 NOT_LP64(andl(result, 0xFFFF);) // truncate upper 16 bits 906 jmp(done); 907 908 bind(notChar); 909 // cmpl(rcx, T_SHORT); // all that's left 910 // jcc(Assembler::notEqual, done); 911 LP64_ONLY(movswl(result, result);) 912 NOT_LP64(shll(result, 16);) // truncate upper 16 bits 913 NOT_LP64(sarl(result, 16);) // and sign-extend short 914 915 // Nothing to do for T_INT 916 bind(done); 917 } 918 919 // remove activation 920 // 921 // Unlock the receiver if this is a synchronized method. 922 // Unlock any Java monitors from syncronized blocks. 923 // Remove the activation from the stack. 924 // 925 // If there are locked Java monitors 926 // If throw_monitor_exception 927 // throws IllegalMonitorStateException 928 // Else if install_monitor_exception 929 // installs IllegalMonitorStateException 930 // Else 931 // no error processing 932 void InterpreterMacroAssembler::remove_activation( 933 TosState state, 934 Register ret_addr, 935 bool throw_monitor_exception, 936 bool install_monitor_exception, 937 bool notify_jvmdi, 938 bool load_values) { 939 // Note: Registers rdx xmm0 may be in use for the 940 // result check if synchronized method 941 Label unlocked, unlock, no_unlock; 942 943 const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 944 const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx); 945 const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx); 946 // monitor pointers need different register 947 // because rdx may have the result in it 948 NOT_LP64(get_thread(rcx);) 949 950 // get the value of _do_not_unlock_if_synchronized into rdx 951 const Address do_not_unlock_if_synchronized(rthread, 952 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 953 movbool(rbx, do_not_unlock_if_synchronized); 954 movbool(do_not_unlock_if_synchronized, false); // reset the flag 955 956 // get method access flags 957 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 958 movl(rcx, Address(rcx, Method::access_flags_offset())); 959 testl(rcx, JVM_ACC_SYNCHRONIZED); 960 jcc(Assembler::zero, unlocked); 961 962 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 963 // is set. 964 testbool(rbx); 965 jcc(Assembler::notZero, no_unlock); 966 967 // unlock monitor 968 push(state); // save result 969 970 // BasicObjectLock will be first in list, since this is a 971 // synchronized method. However, need to check that the object has 972 // not been unlocked by an explicit monitorexit bytecode. 973 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * 974 wordSize - (int) sizeof(BasicObjectLock)); 975 // We use c_rarg1/rdx so that if we go slow path it will be the correct 976 // register for unlock_object to pass to VM directly 977 lea(robj, monitor); // address of first monitor 978 979 movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes())); 980 testptr(rax, rax); 981 jcc(Assembler::notZero, unlock); 982 983 pop(state); 984 if (throw_monitor_exception) { 985 // Entry already unlocked, need to throw exception 986 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow 987 call_VM(noreg, CAST_FROM_FN_PTR(address, 988 InterpreterRuntime::throw_illegal_monitor_state_exception)); 989 should_not_reach_here(); 990 } else { 991 // Monitor already unlocked during a stack unroll. If requested, 992 // install an illegal_monitor_state_exception. Continue with 993 // stack unrolling. 994 if (install_monitor_exception) { 995 NOT_LP64(empty_FPU_stack();) 996 call_VM(noreg, CAST_FROM_FN_PTR(address, 997 InterpreterRuntime::new_illegal_monitor_state_exception)); 998 } 999 jmp(unlocked); 1000 } 1001 1002 bind(unlock); 1003 unlock_object(robj); 1004 pop(state); 1005 1006 // Check that for block-structured locking (i.e., that all locked 1007 // objects has been unlocked) 1008 bind(unlocked); 1009 1010 // rax, rdx: Might contain return value 1011 1012 // Check that all monitors are unlocked 1013 { 1014 Label loop, exception, entry, restart; 1015 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 1016 const Address monitor_block_top( 1017 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 1018 const Address monitor_block_bot( 1019 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 1020 1021 bind(restart); 1022 // We use c_rarg1 so that if we go slow path it will be the correct 1023 // register for unlock_object to pass to VM directly 1024 movptr(rmon, monitor_block_top); // points to current entry, starting 1025 // with top-most entry 1026 lea(rbx, monitor_block_bot); // points to word before bottom of 1027 // monitor block 1028 jmp(entry); 1029 1030 // Entry already locked, need to throw exception 1031 bind(exception); 1032 1033 if (throw_monitor_exception) { 1034 // Throw exception 1035 NOT_LP64(empty_FPU_stack();) 1036 MacroAssembler::call_VM(noreg, 1037 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 1038 throw_illegal_monitor_state_exception)); 1039 should_not_reach_here(); 1040 } else { 1041 // Stack unrolling. Unlock object and install illegal_monitor_exception. 1042 // Unlock does not block, so don't have to worry about the frame. 1043 // We don't have to preserve c_rarg1 since we are going to throw an exception. 1044 1045 push(state); 1046 mov(robj, rmon); // nop if robj and rmon are the same 1047 unlock_object(robj); 1048 pop(state); 1049 1050 if (install_monitor_exception) { 1051 NOT_LP64(empty_FPU_stack();) 1052 call_VM(noreg, CAST_FROM_FN_PTR(address, 1053 InterpreterRuntime:: 1054 new_illegal_monitor_state_exception)); 1055 } 1056 1057 jmp(restart); 1058 } 1059 1060 bind(loop); 1061 // check if current entry is used 1062 cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); 1063 jcc(Assembler::notEqual, exception); 1064 1065 addptr(rmon, entry_size); // otherwise advance to next entry 1066 bind(entry); 1067 cmpptr(rmon, rbx); // check if bottom reached 1068 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry 1069 } 1070 1071 bind(no_unlock); 1072 1073 // jvmti support 1074 if (notify_jvmdi) { 1075 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 1076 } else { 1077 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 1078 } 1079 1080 // remove activation 1081 // get sender sp 1082 movptr(rbx, 1083 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1084 if (StackReservedPages > 0) { 1085 // testing if reserved zone needs to be re-enabled 1086 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1087 Label no_reserved_zone_enabling; 1088 1089 NOT_LP64(get_thread(rthread);) 1090 1091 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled); 1092 jcc(Assembler::equal, no_reserved_zone_enabling); 1093 1094 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset())); 1095 jcc(Assembler::lessEqual, no_reserved_zone_enabling); 1096 1097 call_VM_leaf( 1098 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); 1099 push(rthread); 1100 call_VM(noreg, CAST_FROM_FN_PTR(address, 1101 InterpreterRuntime::throw_delayed_StackOverflowError)); 1102 should_not_reach_here(); 1103 1104 bind(no_reserved_zone_enabling); 1105 } 1106 if (load_values) { 1107 // We are returning a value type, load its fields into registers 1108 super_call_VM_leaf(StubRoutines::load_value_type_fields_in_regs()); 1109 1110 // call above kills the value in rbx. Reload it. 1111 movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1112 } 1113 leave(); // remove frame anchor 1114 pop(ret_addr); // get return address 1115 mov(rsp, rbx); // set sp to sender sp 1116 } 1117 1118 void InterpreterMacroAssembler::get_method_counters(Register method, 1119 Register mcs, Label& skip) { 1120 Label has_counters; 1121 movptr(mcs, Address(method, Method::method_counters_offset())); 1122 testptr(mcs, mcs); 1123 jcc(Assembler::notZero, has_counters); 1124 call_VM(noreg, CAST_FROM_FN_PTR(address, 1125 InterpreterRuntime::build_method_counters), method); 1126 movptr(mcs, Address(method,Method::method_counters_offset())); 1127 testptr(mcs, mcs); 1128 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory 1129 bind(has_counters); 1130 } 1131 1132 1133 // Lock object 1134 // 1135 // Args: 1136 // rdx, c_rarg1: BasicObjectLock to be used for locking 1137 // 1138 // Kills: 1139 // rax, rbx 1140 void InterpreterMacroAssembler::lock_object(Register lock_reg) { 1141 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1142 "The argument is only for looks. It must be c_rarg1"); 1143 1144 if (UseHeavyMonitors) { 1145 call_VM(noreg, 1146 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1147 lock_reg); 1148 } else { 1149 Label done; 1150 1151 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1152 const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a 1153 // problematic case where tmp_reg = no_reg. 1154 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1155 1156 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 1157 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 1158 const int mark_offset = lock_offset + 1159 BasicLock::displaced_header_offset_in_bytes(); 1160 1161 Label slow_case; 1162 1163 // Load object pointer into obj_reg 1164 movptr(obj_reg, Address(lock_reg, obj_offset)); 1165 1166 if (UseBiasedLocking) { 1167 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case); 1168 } 1169 1170 // Load immediate 1 into swap_reg %rax 1171 movl(swap_reg, (int32_t)1); 1172 1173 // Load (object->mark() | 1) into swap_reg %rax 1174 orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1175 1176 // Save (object->mark() | 1) into BasicLock's displaced header 1177 movptr(Address(lock_reg, mark_offset), swap_reg); 1178 1179 assert(lock_offset == 0, 1180 "displaced header must be first word in BasicObjectLock"); 1181 1182 if (os::is_MP()) lock(); 1183 cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1184 if (PrintBiasedLockingStatistics) { 1185 cond_inc32(Assembler::zero, 1186 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 1187 } 1188 jcc(Assembler::zero, done); 1189 1190 const int zero_bits = LP64_ONLY(7) NOT_LP64(3); 1191 1192 // Test if the oopMark is an obvious stack pointer, i.e., 1193 // 1) (mark & zero_bits) == 0, and 1194 // 2) rsp <= mark < mark + os::pagesize() 1195 // 1196 // These 3 tests can be done by evaluating the following 1197 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())), 1198 // assuming both stack pointer and pagesize have their 1199 // least significant bits clear. 1200 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg 1201 subptr(swap_reg, rsp); 1202 andptr(swap_reg, zero_bits - os::vm_page_size()); 1203 1204 // Save the test result, for recursive case, the result is zero 1205 movptr(Address(lock_reg, mark_offset), swap_reg); 1206 1207 if (PrintBiasedLockingStatistics) { 1208 cond_inc32(Assembler::zero, 1209 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 1210 } 1211 jcc(Assembler::zero, done); 1212 1213 bind(slow_case); 1214 1215 // Call the runtime routine for slow case 1216 call_VM(noreg, 1217 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1218 lock_reg); 1219 1220 bind(done); 1221 } 1222 } 1223 1224 1225 // Unlocks an object. Used in monitorexit bytecode and 1226 // remove_activation. Throws an IllegalMonitorException if object is 1227 // not locked by current thread. 1228 // 1229 // Args: 1230 // rdx, c_rarg1: BasicObjectLock for lock 1231 // 1232 // Kills: 1233 // rax 1234 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 1235 // rscratch1 (scratch reg) 1236 // rax, rbx, rcx, rdx 1237 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1238 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1239 "The argument is only for looks. It must be c_rarg1"); 1240 1241 if (UseHeavyMonitors) { 1242 call_VM(noreg, 1243 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 1244 lock_reg); 1245 } else { 1246 Label done; 1247 1248 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1249 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark 1250 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1251 1252 save_bcp(); // Save in case of exception 1253 1254 // Convert from BasicObjectLock structure to object and BasicLock 1255 // structure Store the BasicLock address into %rax 1256 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 1257 1258 // Load oop into obj_reg(%c_rarg3) 1259 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 1260 1261 // Free entry 1262 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); 1263 1264 if (UseBiasedLocking) { 1265 biased_locking_exit(obj_reg, header_reg, done); 1266 } 1267 1268 // Load the old header from BasicLock structure 1269 movptr(header_reg, Address(swap_reg, 1270 BasicLock::displaced_header_offset_in_bytes())); 1271 1272 // Test for recursion 1273 testptr(header_reg, header_reg); 1274 1275 // zero for recursive case 1276 jcc(Assembler::zero, done); 1277 1278 // Atomic swap back the old header 1279 if (os::is_MP()) lock(); 1280 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1281 1282 // zero for simple unlock of a stack-lock case 1283 jcc(Assembler::zero, done); 1284 1285 // Call the runtime routine for slow case. 1286 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), 1287 obj_reg); // restore obj 1288 call_VM(noreg, 1289 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 1290 lock_reg); 1291 1292 bind(done); 1293 1294 restore_bcp(); 1295 } 1296 } 1297 1298 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 1299 Label& zero_continue) { 1300 assert(ProfileInterpreter, "must be profiling interpreter"); 1301 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize)); 1302 testptr(mdp, mdp); 1303 jcc(Assembler::zero, zero_continue); 1304 } 1305 1306 1307 // Set the method data pointer for the current bcp. 1308 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1309 assert(ProfileInterpreter, "must be profiling interpreter"); 1310 Label set_mdp; 1311 push(rax); 1312 push(rbx); 1313 1314 get_method(rbx); 1315 // Test MDO to avoid the call if it is NULL. 1316 movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); 1317 testptr(rax, rax); 1318 jcc(Assembler::zero, set_mdp); 1319 // rbx: method 1320 // _bcp_register: bcp 1321 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register); 1322 // rax: mdi 1323 // mdo is guaranteed to be non-zero here, we checked for it before the call. 1324 movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset()))); 1325 addptr(rbx, in_bytes(MethodData::data_offset())); 1326 addptr(rax, rbx); 1327 bind(set_mdp); 1328 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax); 1329 pop(rbx); 1330 pop(rax); 1331 } 1332 1333 void InterpreterMacroAssembler::verify_method_data_pointer() { 1334 assert(ProfileInterpreter, "must be profiling interpreter"); 1335 #ifdef ASSERT 1336 Label verify_continue; 1337 push(rax); 1338 push(rbx); 1339 Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); 1340 Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx); 1341 push(arg3_reg); 1342 push(arg2_reg); 1343 test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue 1344 get_method(rbx); 1345 1346 // If the mdp is valid, it will point to a DataLayout header which is 1347 // consistent with the bcp. The converse is highly probable also. 1348 load_unsigned_short(arg2_reg, 1349 Address(arg3_reg, in_bytes(DataLayout::bci_offset()))); 1350 addptr(arg2_reg, Address(rbx, Method::const_offset())); 1351 lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset())); 1352 cmpptr(arg2_reg, _bcp_register); 1353 jcc(Assembler::equal, verify_continue); 1354 // rbx: method 1355 // _bcp_register: bcp 1356 // c_rarg3: mdp 1357 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 1358 rbx, _bcp_register, arg3_reg); 1359 bind(verify_continue); 1360 pop(arg2_reg); 1361 pop(arg3_reg); 1362 pop(rbx); 1363 pop(rax); 1364 #endif // ASSERT 1365 } 1366 1367 1368 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 1369 int constant, 1370 Register value) { 1371 assert(ProfileInterpreter, "must be profiling interpreter"); 1372 Address data(mdp_in, constant); 1373 movptr(data, value); 1374 } 1375 1376 1377 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1378 int constant, 1379 bool decrement) { 1380 // Counter address 1381 Address data(mdp_in, constant); 1382 1383 increment_mdp_data_at(data, decrement); 1384 } 1385 1386 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 1387 bool decrement) { 1388 assert(ProfileInterpreter, "must be profiling interpreter"); 1389 // %%% this does 64bit counters at best it is wasting space 1390 // at worst it is a rare bug when counters overflow 1391 1392 if (decrement) { 1393 // Decrement the register. Set condition codes. 1394 addptr(data, (int32_t) -DataLayout::counter_increment); 1395 // If the decrement causes the counter to overflow, stay negative 1396 Label L; 1397 jcc(Assembler::negative, L); 1398 addptr(data, (int32_t) DataLayout::counter_increment); 1399 bind(L); 1400 } else { 1401 assert(DataLayout::counter_increment == 1, 1402 "flow-free idiom only works with 1"); 1403 // Increment the register. Set carry flag. 1404 addptr(data, DataLayout::counter_increment); 1405 // If the increment causes the counter to overflow, pull back by 1. 1406 sbbptr(data, (int32_t)0); 1407 } 1408 } 1409 1410 1411 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1412 Register reg, 1413 int constant, 1414 bool decrement) { 1415 Address data(mdp_in, reg, Address::times_1, constant); 1416 1417 increment_mdp_data_at(data, decrement); 1418 } 1419 1420 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 1421 int flag_byte_constant) { 1422 assert(ProfileInterpreter, "must be profiling interpreter"); 1423 int header_offset = in_bytes(DataLayout::header_offset()); 1424 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); 1425 // Set the flag 1426 orl(Address(mdp_in, header_offset), header_bits); 1427 } 1428 1429 1430 1431 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 1432 int offset, 1433 Register value, 1434 Register test_value_out, 1435 Label& not_equal_continue) { 1436 assert(ProfileInterpreter, "must be profiling interpreter"); 1437 if (test_value_out == noreg) { 1438 cmpptr(value, Address(mdp_in, offset)); 1439 } else { 1440 // Put the test value into a register, so caller can use it: 1441 movptr(test_value_out, Address(mdp_in, offset)); 1442 cmpptr(test_value_out, value); 1443 } 1444 jcc(Assembler::notEqual, not_equal_continue); 1445 } 1446 1447 1448 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1449 int offset_of_disp) { 1450 assert(ProfileInterpreter, "must be profiling interpreter"); 1451 Address disp_address(mdp_in, offset_of_disp); 1452 addptr(mdp_in, disp_address); 1453 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1454 } 1455 1456 1457 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1458 Register reg, 1459 int offset_of_disp) { 1460 assert(ProfileInterpreter, "must be profiling interpreter"); 1461 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); 1462 addptr(mdp_in, disp_address); 1463 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1464 } 1465 1466 1467 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 1468 int constant) { 1469 assert(ProfileInterpreter, "must be profiling interpreter"); 1470 addptr(mdp_in, constant); 1471 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1472 } 1473 1474 1475 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1476 assert(ProfileInterpreter, "must be profiling interpreter"); 1477 push(return_bci); // save/restore across call_VM 1478 call_VM(noreg, 1479 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1480 return_bci); 1481 pop(return_bci); 1482 } 1483 1484 1485 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1486 Register bumped_count) { 1487 if (ProfileInterpreter) { 1488 Label profile_continue; 1489 1490 // If no method data exists, go to profile_continue. 1491 // Otherwise, assign to mdp 1492 test_method_data_pointer(mdp, profile_continue); 1493 1494 // We are taking a branch. Increment the taken count. 1495 // We inline increment_mdp_data_at to return bumped_count in a register 1496 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1497 Address data(mdp, in_bytes(JumpData::taken_offset())); 1498 movptr(bumped_count, data); 1499 assert(DataLayout::counter_increment == 1, 1500 "flow-free idiom only works with 1"); 1501 addptr(bumped_count, DataLayout::counter_increment); 1502 sbbptr(bumped_count, 0); 1503 movptr(data, bumped_count); // Store back out 1504 1505 // The method data pointer needs to be updated to reflect the new target. 1506 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1507 bind(profile_continue); 1508 } 1509 } 1510 1511 1512 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1513 if (ProfileInterpreter) { 1514 Label profile_continue; 1515 1516 // If no method data exists, go to profile_continue. 1517 test_method_data_pointer(mdp, profile_continue); 1518 1519 // We are taking a branch. Increment the not taken count. 1520 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1521 1522 // The method data pointer needs to be updated to correspond to 1523 // the next bytecode 1524 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1525 bind(profile_continue); 1526 } 1527 } 1528 1529 void InterpreterMacroAssembler::profile_call(Register mdp) { 1530 if (ProfileInterpreter) { 1531 Label profile_continue; 1532 1533 // If no method data exists, go to profile_continue. 1534 test_method_data_pointer(mdp, profile_continue); 1535 1536 // We are making a call. Increment the count. 1537 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1538 1539 // The method data pointer needs to be updated to reflect the new target. 1540 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1541 bind(profile_continue); 1542 } 1543 } 1544 1545 1546 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1547 if (ProfileInterpreter) { 1548 Label profile_continue; 1549 1550 // If no method data exists, go to profile_continue. 1551 test_method_data_pointer(mdp, profile_continue); 1552 1553 // We are making a call. Increment the count. 1554 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1555 1556 // The method data pointer needs to be updated to reflect the new target. 1557 update_mdp_by_constant(mdp, 1558 in_bytes(VirtualCallData:: 1559 virtual_call_data_size())); 1560 bind(profile_continue); 1561 } 1562 } 1563 1564 1565 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1566 Register mdp, 1567 Register reg2, 1568 bool receiver_can_be_null) { 1569 if (ProfileInterpreter) { 1570 Label profile_continue; 1571 1572 // If no method data exists, go to profile_continue. 1573 test_method_data_pointer(mdp, profile_continue); 1574 1575 Label skip_receiver_profile; 1576 if (receiver_can_be_null) { 1577 Label not_null; 1578 testptr(receiver, receiver); 1579 jccb(Assembler::notZero, not_null); 1580 // We are making a call. Increment the count for null receiver. 1581 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1582 jmp(skip_receiver_profile); 1583 bind(not_null); 1584 } 1585 1586 // Record the receiver type. 1587 record_klass_in_profile(receiver, mdp, reg2, true); 1588 bind(skip_receiver_profile); 1589 1590 // The method data pointer needs to be updated to reflect the new target. 1591 #if INCLUDE_JVMCI 1592 if (MethodProfileWidth == 0) { 1593 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1594 } 1595 #else // INCLUDE_JVMCI 1596 update_mdp_by_constant(mdp, 1597 in_bytes(VirtualCallData:: 1598 virtual_call_data_size())); 1599 #endif // INCLUDE_JVMCI 1600 bind(profile_continue); 1601 } 1602 } 1603 1604 #if INCLUDE_JVMCI 1605 void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) { 1606 assert_different_registers(method, mdp, reg2); 1607 if (ProfileInterpreter && MethodProfileWidth > 0) { 1608 Label profile_continue; 1609 1610 // If no method data exists, go to profile_continue. 1611 test_method_data_pointer(mdp, profile_continue); 1612 1613 Label done; 1614 record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth, 1615 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1616 bind(done); 1617 1618 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1619 bind(profile_continue); 1620 } 1621 } 1622 #endif // INCLUDE_JVMCI 1623 1624 // This routine creates a state machine for updating the multi-row 1625 // type profile at a virtual call site (or other type-sensitive bytecode). 1626 // The machine visits each row (of receiver/count) until the receiver type 1627 // is found, or until it runs out of rows. At the same time, it remembers 1628 // the location of the first empty row. (An empty row records null for its 1629 // receiver, and can be allocated for a newly-observed receiver type.) 1630 // Because there are two degrees of freedom in the state, a simple linear 1631 // search will not work; it must be a decision tree. Hence this helper 1632 // function is recursive, to generate the required tree structured code. 1633 // It's the interpreter, so we are trading off code space for speed. 1634 // See below for example code. 1635 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1636 Register receiver, Register mdp, 1637 Register reg2, int start_row, 1638 Label& done, bool is_virtual_call) { 1639 if (TypeProfileWidth == 0) { 1640 if (is_virtual_call) { 1641 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1642 } 1643 #if INCLUDE_JVMCI 1644 else if (EnableJVMCI) { 1645 increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset())); 1646 } 1647 #endif // INCLUDE_JVMCI 1648 } else { 1649 int non_profiled_offset = -1; 1650 if (is_virtual_call) { 1651 non_profiled_offset = in_bytes(CounterData::count_offset()); 1652 } 1653 #if INCLUDE_JVMCI 1654 else if (EnableJVMCI) { 1655 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1656 } 1657 #endif // INCLUDE_JVMCI 1658 1659 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, 1660 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1661 } 1662 } 1663 1664 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, 1665 Register reg2, int start_row, Label& done, int total_rows, 1666 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1667 int non_profiled_offset) { 1668 int last_row = total_rows - 1; 1669 assert(start_row <= last_row, "must be work left to do"); 1670 // Test this row for both the item and for null. 1671 // Take any of three different outcomes: 1672 // 1. found item => increment count and goto done 1673 // 2. found null => keep looking for case 1, maybe allocate this cell 1674 // 3. found something else => keep looking for cases 1 and 2 1675 // Case 3 is handled by a recursive call. 1676 for (int row = start_row; row <= last_row; row++) { 1677 Label next_test; 1678 bool test_for_null_also = (row == start_row); 1679 1680 // See if the item is item[n]. 1681 int item_offset = in_bytes(item_offset_fn(row)); 1682 test_mdp_data_at(mdp, item_offset, item, 1683 (test_for_null_also ? reg2 : noreg), 1684 next_test); 1685 // (Reg2 now contains the item from the CallData.) 1686 1687 // The item is item[n]. Increment count[n]. 1688 int count_offset = in_bytes(item_count_offset_fn(row)); 1689 increment_mdp_data_at(mdp, count_offset); 1690 jmp(done); 1691 bind(next_test); 1692 1693 if (test_for_null_also) { 1694 Label found_null; 1695 // Failed the equality check on item[n]... Test for null. 1696 testptr(reg2, reg2); 1697 if (start_row == last_row) { 1698 // The only thing left to do is handle the null case. 1699 if (non_profiled_offset >= 0) { 1700 jccb(Assembler::zero, found_null); 1701 // Item did not match any saved item and there is no empty row for it. 1702 // Increment total counter to indicate polymorphic case. 1703 increment_mdp_data_at(mdp, non_profiled_offset); 1704 jmp(done); 1705 bind(found_null); 1706 } else { 1707 jcc(Assembler::notZero, done); 1708 } 1709 break; 1710 } 1711 // Since null is rare, make it be the branch-taken case. 1712 jcc(Assembler::zero, found_null); 1713 1714 // Put all the "Case 3" tests here. 1715 record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, 1716 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1717 1718 // Found a null. Keep searching for a matching item, 1719 // but remember that this is an empty (unused) slot. 1720 bind(found_null); 1721 } 1722 } 1723 1724 // In the fall-through case, we found no matching item, but we 1725 // observed the item[start_row] is NULL. 1726 1727 // Fill in the item field and increment the count. 1728 int item_offset = in_bytes(item_offset_fn(start_row)); 1729 set_mdp_data_at(mdp, item_offset, item); 1730 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1731 movl(reg2, DataLayout::counter_increment); 1732 set_mdp_data_at(mdp, count_offset, reg2); 1733 if (start_row > 0) { 1734 jmp(done); 1735 } 1736 } 1737 1738 // Example state machine code for three profile rows: 1739 // // main copy of decision tree, rooted at row[1] 1740 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1741 // if (row[0].rec != NULL) { 1742 // // inner copy of decision tree, rooted at row[1] 1743 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1744 // if (row[1].rec != NULL) { 1745 // // degenerate decision tree, rooted at row[2] 1746 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1747 // if (row[2].rec != NULL) { count.incr(); goto done; } // overflow 1748 // row[2].init(rec); goto done; 1749 // } else { 1750 // // remember row[1] is empty 1751 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1752 // row[1].init(rec); goto done; 1753 // } 1754 // } else { 1755 // // remember row[0] is empty 1756 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1757 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1758 // row[0].init(rec); goto done; 1759 // } 1760 // done: 1761 1762 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1763 Register mdp, Register reg2, 1764 bool is_virtual_call) { 1765 assert(ProfileInterpreter, "must be profiling"); 1766 Label done; 1767 1768 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1769 1770 bind (done); 1771 } 1772 1773 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1774 Register mdp) { 1775 if (ProfileInterpreter) { 1776 Label profile_continue; 1777 uint row; 1778 1779 // If no method data exists, go to profile_continue. 1780 test_method_data_pointer(mdp, profile_continue); 1781 1782 // Update the total ret count. 1783 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1784 1785 for (row = 0; row < RetData::row_limit(); row++) { 1786 Label next_test; 1787 1788 // See if return_bci is equal to bci[n]: 1789 test_mdp_data_at(mdp, 1790 in_bytes(RetData::bci_offset(row)), 1791 return_bci, noreg, 1792 next_test); 1793 1794 // return_bci is equal to bci[n]. Increment the count. 1795 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1796 1797 // The method data pointer needs to be updated to reflect the new target. 1798 update_mdp_by_offset(mdp, 1799 in_bytes(RetData::bci_displacement_offset(row))); 1800 jmp(profile_continue); 1801 bind(next_test); 1802 } 1803 1804 update_mdp_for_ret(return_bci); 1805 1806 bind(profile_continue); 1807 } 1808 } 1809 1810 1811 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1812 if (ProfileInterpreter) { 1813 Label profile_continue; 1814 1815 // If no method data exists, go to profile_continue. 1816 test_method_data_pointer(mdp, profile_continue); 1817 1818 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1819 1820 // The method data pointer needs to be updated. 1821 int mdp_delta = in_bytes(BitData::bit_data_size()); 1822 if (TypeProfileCasts) { 1823 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1824 } 1825 update_mdp_by_constant(mdp, mdp_delta); 1826 1827 bind(profile_continue); 1828 } 1829 } 1830 1831 1832 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1833 if (ProfileInterpreter && TypeProfileCasts) { 1834 Label profile_continue; 1835 1836 // If no method data exists, go to profile_continue. 1837 test_method_data_pointer(mdp, profile_continue); 1838 1839 int count_offset = in_bytes(CounterData::count_offset()); 1840 // Back up the address, since we have already bumped the mdp. 1841 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1842 1843 // *Decrement* the counter. We expect to see zero or small negatives. 1844 increment_mdp_data_at(mdp, count_offset, true); 1845 1846 bind (profile_continue); 1847 } 1848 } 1849 1850 1851 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1852 if (ProfileInterpreter) { 1853 Label profile_continue; 1854 1855 // If no method data exists, go to profile_continue. 1856 test_method_data_pointer(mdp, profile_continue); 1857 1858 // The method data pointer needs to be updated. 1859 int mdp_delta = in_bytes(BitData::bit_data_size()); 1860 if (TypeProfileCasts) { 1861 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1862 1863 // Record the object type. 1864 record_klass_in_profile(klass, mdp, reg2, false); 1865 NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");) 1866 NOT_LP64(restore_locals();) // Restore EDI 1867 } 1868 update_mdp_by_constant(mdp, mdp_delta); 1869 1870 bind(profile_continue); 1871 } 1872 } 1873 1874 1875 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1876 if (ProfileInterpreter) { 1877 Label profile_continue; 1878 1879 // If no method data exists, go to profile_continue. 1880 test_method_data_pointer(mdp, profile_continue); 1881 1882 // Update the default case count 1883 increment_mdp_data_at(mdp, 1884 in_bytes(MultiBranchData::default_count_offset())); 1885 1886 // The method data pointer needs to be updated. 1887 update_mdp_by_offset(mdp, 1888 in_bytes(MultiBranchData:: 1889 default_displacement_offset())); 1890 1891 bind(profile_continue); 1892 } 1893 } 1894 1895 1896 void InterpreterMacroAssembler::profile_switch_case(Register index, 1897 Register mdp, 1898 Register reg2) { 1899 if (ProfileInterpreter) { 1900 Label profile_continue; 1901 1902 // If no method data exists, go to profile_continue. 1903 test_method_data_pointer(mdp, profile_continue); 1904 1905 // Build the base (index * per_case_size_in_bytes()) + 1906 // case_array_offset_in_bytes() 1907 movl(reg2, in_bytes(MultiBranchData::per_case_size())); 1908 imulptr(index, reg2); // XXX l ? 1909 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? 1910 1911 // Update the case count 1912 increment_mdp_data_at(mdp, 1913 index, 1914 in_bytes(MultiBranchData::relative_count_offset())); 1915 1916 // The method data pointer needs to be updated. 1917 update_mdp_by_offset(mdp, 1918 index, 1919 in_bytes(MultiBranchData:: 1920 relative_displacement_offset())); 1921 1922 bind(profile_continue); 1923 } 1924 } 1925 1926 1927 1928 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 1929 if (state == atos) { 1930 MacroAssembler::verify_oop(reg); 1931 } 1932 } 1933 1934 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 1935 #ifndef _LP64 1936 if ((state == ftos && UseSSE < 1) || 1937 (state == dtos && UseSSE < 2)) { 1938 MacroAssembler::verify_FPU(stack_depth); 1939 } 1940 #endif 1941 } 1942 1943 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1944 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1945 int increment, Address mask, 1946 Register scratch, bool preloaded, 1947 Condition cond, Label* where) { 1948 if (!preloaded) { 1949 movl(scratch, counter_addr); 1950 } 1951 incrementl(scratch, increment); 1952 movl(counter_addr, scratch); 1953 andl(scratch, mask); 1954 jcc(cond, *where); 1955 } 1956 1957 void InterpreterMacroAssembler::notify_method_entry() { 1958 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1959 // track stack depth. If it is possible to enter interp_only_mode we add 1960 // the code to check if the event should be sent. 1961 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1962 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 1963 if (JvmtiExport::can_post_interpreter_events()) { 1964 Label L; 1965 NOT_LP64(get_thread(rthread);) 1966 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 1967 testl(rdx, rdx); 1968 jcc(Assembler::zero, L); 1969 call_VM(noreg, CAST_FROM_FN_PTR(address, 1970 InterpreterRuntime::post_method_entry)); 1971 bind(L); 1972 } 1973 1974 { 1975 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1976 NOT_LP64(get_thread(rthread);) 1977 get_method(rarg); 1978 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1979 rthread, rarg); 1980 } 1981 1982 // RedefineClasses() tracing support for obsolete method entry 1983 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1984 NOT_LP64(get_thread(rthread);) 1985 get_method(rarg); 1986 call_VM_leaf( 1987 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1988 rthread, rarg); 1989 } 1990 } 1991 1992 1993 void InterpreterMacroAssembler::notify_method_exit( 1994 TosState state, NotifyMethodExitMode mode) { 1995 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1996 // track stack depth. If it is possible to enter interp_only_mode we add 1997 // the code to check if the event should be sent. 1998 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1999 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 2000 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2001 Label L; 2002 // Note: frame::interpreter_frame_result has a dependency on how the 2003 // method result is saved across the call to post_method_exit. If this 2004 // is changed then the interpreter_frame_result implementation will 2005 // need to be updated too. 2006 2007 // template interpreter will leave the result on the top of the stack. 2008 push(state); 2009 NOT_LP64(get_thread(rthread);) 2010 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 2011 testl(rdx, rdx); 2012 jcc(Assembler::zero, L); 2013 call_VM(noreg, 2014 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2015 bind(L); 2016 pop(state); 2017 } 2018 2019 { 2020 SkipIfEqual skip(this, &DTraceMethodProbes, false); 2021 push(state); 2022 NOT_LP64(get_thread(rthread);) 2023 get_method(rarg); 2024 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2025 rthread, rarg); 2026 pop(state); 2027 } 2028 }