1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_x86.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "logging/log.hpp" 30 #include "oops/arrayOop.hpp" 31 #include "oops/markWord.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/method.hpp" 34 #include "oops/valueKlass.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "prims/jvmtiThreadState.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/safepointMechanism.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/thread.inline.hpp" 43 44 // Implementation of InterpreterMacroAssembler 45 46 void InterpreterMacroAssembler::jump_to_entry(address entry) { 47 assert(entry, "Entry must have been generated by now"); 48 jump(RuntimeAddress(entry)); 49 } 50 51 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { 52 Label update, next, none; 53 54 verify_oop(obj); 55 56 testptr(obj, obj); 57 jccb(Assembler::notZero, update); 58 orptr(mdo_addr, TypeEntries::null_seen); 59 jmpb(next); 60 61 bind(update); 62 load_klass(obj, obj); 63 64 xorptr(obj, mdo_addr); 65 testptr(obj, TypeEntries::type_klass_mask); 66 jccb(Assembler::zero, next); // klass seen before, nothing to 67 // do. The unknown bit may have been 68 // set already but no need to check. 69 70 testptr(obj, TypeEntries::type_unknown); 71 jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 72 73 cmpptr(mdo_addr, 0); 74 jccb(Assembler::equal, none); 75 cmpptr(mdo_addr, TypeEntries::null_seen); 76 jccb(Assembler::equal, none); 77 // There is a chance that the checks above (re-reading profiling 78 // data from memory) fail if another thread has just set the 79 // profiling to this obj's klass 80 xorptr(obj, mdo_addr); 81 testptr(obj, TypeEntries::type_klass_mask); 82 jccb(Assembler::zero, next); 83 84 // different than before. Cannot keep accurate profile. 85 orptr(mdo_addr, TypeEntries::type_unknown); 86 jmpb(next); 87 88 bind(none); 89 // first time here. Set profile type. 90 movptr(mdo_addr, obj); 91 92 bind(next); 93 } 94 95 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { 96 if (!ProfileInterpreter) { 97 return; 98 } 99 100 if (MethodData::profile_arguments() || MethodData::profile_return()) { 101 Label profile_continue; 102 103 test_method_data_pointer(mdp, profile_continue); 104 105 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 106 107 cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 108 jcc(Assembler::notEqual, profile_continue); 109 110 if (MethodData::profile_arguments()) { 111 Label done; 112 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 113 addptr(mdp, off_to_args); 114 115 for (int i = 0; i < TypeProfileArgsLimit; i++) { 116 if (i > 0 || MethodData::profile_return()) { 117 // If return value type is profiled we may have no argument to profile 118 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 119 subl(tmp, i*TypeStackSlotEntries::per_arg_count()); 120 cmpl(tmp, TypeStackSlotEntries::per_arg_count()); 121 jcc(Assembler::less, done); 122 } 123 movptr(tmp, Address(callee, Method::const_offset())); 124 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); 125 // stack offset o (zero based) from the start of the argument 126 // list, for n arguments translates into offset n - o - 1 from 127 // the end of the argument list 128 subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args)); 129 subl(tmp, 1); 130 Address arg_addr = argument_address(tmp); 131 movptr(tmp, arg_addr); 132 133 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 134 profile_obj_type(tmp, mdo_arg_addr); 135 136 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 137 addptr(mdp, to_add); 138 off_to_args += to_add; 139 } 140 141 if (MethodData::profile_return()) { 142 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 143 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 144 } 145 146 bind(done); 147 148 if (MethodData::profile_return()) { 149 // We're right after the type profile for the last 150 // argument. tmp is the number of cells left in the 151 // CallTypeData/VirtualCallTypeData to reach its end. Non null 152 // if there's a return to profile. 153 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 154 shll(tmp, exact_log2(DataLayout::cell_size)); 155 addptr(mdp, tmp); 156 } 157 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp); 158 } else { 159 assert(MethodData::profile_return(), "either profile call args or call ret"); 160 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); 161 } 162 163 // mdp points right after the end of the 164 // CallTypeData/VirtualCallTypeData, right after the cells for the 165 // return value type if there's one 166 167 bind(profile_continue); 168 } 169 } 170 171 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { 172 assert_different_registers(mdp, ret, tmp, _bcp_register); 173 if (ProfileInterpreter && MethodData::profile_return()) { 174 Label profile_continue; 175 176 test_method_data_pointer(mdp, profile_continue); 177 178 if (MethodData::profile_return_jsr292_only()) { 179 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 180 181 // If we don't profile all invoke bytecodes we must make sure 182 // it's a bytecode we indeed profile. We can't go back to the 183 // begining of the ProfileData we intend to update to check its 184 // type because we're right after it and we don't known its 185 // length 186 Label do_profile; 187 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic); 188 jcc(Assembler::equal, do_profile); 189 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle); 190 jcc(Assembler::equal, do_profile); 191 get_method(tmp); 192 cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm); 193 jcc(Assembler::notEqual, profile_continue); 194 195 bind(do_profile); 196 } 197 198 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size())); 199 mov(tmp, ret); 200 profile_obj_type(tmp, mdo_ret_addr); 201 202 bind(profile_continue); 203 } 204 } 205 206 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { 207 if (ProfileInterpreter && MethodData::profile_parameters()) { 208 Label profile_continue; 209 210 test_method_data_pointer(mdp, profile_continue); 211 212 // Load the offset of the area within the MDO used for 213 // parameters. If it's negative we're not profiling any parameters 214 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); 215 testl(tmp1, tmp1); 216 jcc(Assembler::negative, profile_continue); 217 218 // Compute a pointer to the area for parameters from the offset 219 // and move the pointer to the slot for the last 220 // parameters. Collect profiling from last parameter down. 221 // mdo start + parameters offset + array length - 1 222 addptr(mdp, tmp1); 223 movptr(tmp1, Address(mdp, ArrayData::array_len_offset())); 224 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 225 226 Label loop; 227 bind(loop); 228 229 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 230 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 231 Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size); 232 Address arg_off(mdp, tmp1, per_arg_scale, off_base); 233 Address arg_type(mdp, tmp1, per_arg_scale, type_base); 234 235 // load offset on the stack from the slot for this parameter 236 movptr(tmp2, arg_off); 237 negptr(tmp2); 238 // read the parameter from the local area 239 movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale())); 240 241 // profile the parameter 242 profile_obj_type(tmp2, arg_type); 243 244 // go to next parameter 245 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 246 jcc(Assembler::positive, loop); 247 248 bind(profile_continue); 249 } 250 } 251 252 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 253 int number_of_arguments) { 254 // interpreter specific 255 // 256 // Note: No need to save/restore bcp & locals registers 257 // since these are callee saved registers and no blocking/ 258 // GC can happen in leaf calls. 259 // Further Note: DO NOT save/restore bcp/locals. If a caller has 260 // already saved them so that it can use rsi/rdi as temporaries 261 // then a save/restore here will DESTROY the copy the caller 262 // saved! There used to be a save_bcp() that only happened in 263 // the ASSERT path (no restore_bcp). Which caused bizarre failures 264 // when jvm built with ASSERTs. 265 #ifdef ASSERT 266 { 267 Label L; 268 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 269 jcc(Assembler::equal, L); 270 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 271 " last_sp != NULL"); 272 bind(L); 273 } 274 #endif 275 // super call 276 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 277 // interpreter specific 278 // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals 279 // but since they may not have been saved (and we don't want to 280 // save them here (see note above) the assert is invalid. 281 } 282 283 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 284 Register java_thread, 285 Register last_java_sp, 286 address entry_point, 287 int number_of_arguments, 288 bool check_exceptions) { 289 // interpreter specific 290 // 291 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 292 // really make a difference for these runtime calls, since they are 293 // slow anyway. Btw., bcp must be saved/restored since it may change 294 // due to GC. 295 NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");) 296 save_bcp(); 297 #ifdef ASSERT 298 { 299 Label L; 300 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 301 jcc(Assembler::equal, L); 302 stop("InterpreterMacroAssembler::call_VM_base:" 303 " last_sp != NULL"); 304 bind(L); 305 } 306 #endif /* ASSERT */ 307 // super call 308 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 309 entry_point, number_of_arguments, 310 check_exceptions); 311 // interpreter specific 312 restore_bcp(); 313 restore_locals(); 314 } 315 316 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 317 if (JvmtiExport::can_pop_frame()) { 318 Label L; 319 // Initiate popframe handling only if it is not already being 320 // processed. If the flag has the popframe_processing bit set, it 321 // means that this code is called *during* popframe handling - we 322 // don't want to reenter. 323 // This method is only called just after the call into the vm in 324 // call_VM_base, so the arg registers are available. 325 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit 326 LP64_ONLY(c_rarg0); 327 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset())); 328 testl(pop_cond, JavaThread::popframe_pending_bit); 329 jcc(Assembler::zero, L); 330 testl(pop_cond, JavaThread::popframe_processing_bit); 331 jcc(Assembler::notZero, L); 332 // Call Interpreter::remove_activation_preserving_args_entry() to get the 333 // address of the same-named entrypoint in the generated interpreter code. 334 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 335 jmp(rax); 336 bind(L); 337 NOT_LP64(get_thread(java_thread);) 338 } 339 } 340 341 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 342 Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 343 NOT_LP64(get_thread(thread);) 344 movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 345 const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); 346 const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); 347 const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); 348 #ifdef _LP64 349 switch (state) { 350 case atos: movptr(rax, oop_addr); 351 movptr(oop_addr, (int32_t)NULL_WORD); 352 verify_oop(rax, state); break; 353 case ltos: movptr(rax, val_addr); break; 354 case btos: // fall through 355 case ztos: // fall through 356 case ctos: // fall through 357 case stos: // fall through 358 case itos: movl(rax, val_addr); break; 359 case ftos: load_float(val_addr); break; 360 case dtos: load_double(val_addr); break; 361 case vtos: /* nothing to do */ break; 362 default : ShouldNotReachHere(); 363 } 364 // Clean up tos value in the thread object 365 movl(tos_addr, (int) ilgl); 366 movl(val_addr, (int32_t) NULL_WORD); 367 #else 368 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() 369 + in_ByteSize(wordSize)); 370 switch (state) { 371 case atos: movptr(rax, oop_addr); 372 movptr(oop_addr, NULL_WORD); 373 verify_oop(rax, state); break; 374 case ltos: 375 movl(rdx, val_addr1); // fall through 376 case btos: // fall through 377 case ztos: // fall through 378 case ctos: // fall through 379 case stos: // fall through 380 case itos: movl(rax, val_addr); break; 381 case ftos: load_float(val_addr); break; 382 case dtos: load_double(val_addr); break; 383 case vtos: /* nothing to do */ break; 384 default : ShouldNotReachHere(); 385 } 386 #endif // _LP64 387 // Clean up tos value in the thread object 388 movl(tos_addr, (int32_t) ilgl); 389 movptr(val_addr, NULL_WORD); 390 NOT_LP64(movptr(val_addr1, NULL_WORD);) 391 } 392 393 394 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 395 if (JvmtiExport::can_force_early_return()) { 396 Label L; 397 Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread); 398 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread); 399 400 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 401 testptr(tmp, tmp); 402 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; 403 404 // Initiate earlyret handling only if it is not already being processed. 405 // If the flag has the earlyret_processing bit set, it means that this code 406 // is called *during* earlyret handling - we don't want to reenter. 407 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset())); 408 cmpl(tmp, JvmtiThreadState::earlyret_pending); 409 jcc(Assembler::notEqual, L); 410 411 // Call Interpreter::remove_activation_early_entry() to get the address of the 412 // same-named entrypoint in the generated interpreter code. 413 NOT_LP64(get_thread(java_thread);) 414 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 415 #ifdef _LP64 416 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 417 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp); 418 #else 419 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 420 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); 421 #endif // _LP64 422 jmp(rax); 423 bind(L); 424 NOT_LP64(get_thread(java_thread);) 425 } 426 } 427 428 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 429 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 430 load_unsigned_short(reg, Address(_bcp_register, bcp_offset)); 431 bswapl(reg); 432 shrl(reg, 16); 433 } 434 435 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 436 int bcp_offset, 437 size_t index_size) { 438 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 439 if (index_size == sizeof(u2)) { 440 load_unsigned_short(index, Address(_bcp_register, bcp_offset)); 441 } else if (index_size == sizeof(u4)) { 442 movl(index, Address(_bcp_register, bcp_offset)); 443 // Check if the secondary index definition is still ~x, otherwise 444 // we have to change the following assembler code to calculate the 445 // plain index. 446 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 447 notl(index); // convert to plain index 448 } else if (index_size == sizeof(u1)) { 449 load_unsigned_byte(index, Address(_bcp_register, bcp_offset)); 450 } else { 451 ShouldNotReachHere(); 452 } 453 } 454 455 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 456 Register index, 457 int bcp_offset, 458 size_t index_size) { 459 assert_different_registers(cache, index); 460 get_cache_index_at_bcp(index, bcp_offset, index_size); 461 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 462 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 463 // convert from field index to ConstantPoolCacheEntry index 464 assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line"); 465 shll(index, 2); 466 } 467 468 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 469 Register index, 470 Register bytecode, 471 int byte_no, 472 int bcp_offset, 473 size_t index_size) { 474 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 475 // We use a 32-bit load here since the layout of 64-bit words on 476 // little-endian machines allow us that. 477 movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 478 const int shift_count = (1 + byte_no) * BitsPerByte; 479 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 480 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 481 "correct shift count"); 482 shrl(bytecode, shift_count); 483 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 484 andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); 485 } 486 487 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 488 Register tmp, 489 int bcp_offset, 490 size_t index_size) { 491 assert_different_registers(cache, tmp); 492 493 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 494 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 495 // convert from field index to ConstantPoolCacheEntry index 496 // and from word offset to byte offset 497 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 498 shll(tmp, 2 + LogBytesPerWord); 499 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 500 // skip past the header 501 addptr(cache, in_bytes(ConstantPoolCache::base_offset())); 502 addptr(cache, tmp); // construct pointer to cache entry 503 } 504 505 // Load object from cpool->resolved_references(index) 506 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, 507 Register index, 508 Register tmp) { 509 assert_different_registers(result, index); 510 511 get_constant_pool(result); 512 // load pointer for resolved_references[] objArray 513 movptr(result, Address(result, ConstantPool::cache_offset_in_bytes())); 514 movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); 515 resolve_oop_handle(result, tmp); 516 load_heap_oop(result, Address(result, index, 517 UseCompressedOops ? Address::times_4 : Address::times_ptr, 518 arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp); 519 } 520 521 // load cpool->resolved_klass_at(index) 522 void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass, 523 Register cpool, 524 Register index) { 525 assert_different_registers(cpool, index); 526 527 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool))); 528 Register resolved_klasses = cpool; 529 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); 530 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes())); 531 } 532 533 void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no, 534 Register method, 535 Register cache, 536 Register index) { 537 assert_different_registers(cache, index); 538 539 const int method_offset = in_bytes( 540 ConstantPoolCache::base_offset() + 541 ((byte_no == TemplateTable::f2_byte) 542 ? ConstantPoolCacheEntry::f2_offset() 543 : ConstantPoolCacheEntry::f1_offset())); 544 545 movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method* 546 } 547 548 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 549 // subtype of super_klass. 550 // 551 // Args: 552 // rax: superklass 553 // Rsub_klass: subklass 554 // 555 // Kills: 556 // rcx, rdi 557 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 558 Label& ok_is_subtype, 559 bool profile) { 560 assert(Rsub_klass != rax, "rax holds superklass"); 561 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");) 562 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");) 563 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); 564 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); 565 566 // Profile the not-null value's klass. 567 if (profile) { 568 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi 569 } 570 571 // Do the check. 572 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx 573 574 // Profile the failure of the check. 575 if (profile) { 576 profile_typecheck_failed(rcx); // blows rcx 577 } 578 } 579 580 581 #ifndef _LP64 582 void InterpreterMacroAssembler::f2ieee() { 583 if (IEEEPrecision) { 584 fstp_s(Address(rsp, 0)); 585 fld_s(Address(rsp, 0)); 586 } 587 } 588 589 590 void InterpreterMacroAssembler::d2ieee() { 591 if (IEEEPrecision) { 592 fstp_d(Address(rsp, 0)); 593 fld_d(Address(rsp, 0)); 594 } 595 } 596 #endif // _LP64 597 598 // Java Expression Stack 599 600 void InterpreterMacroAssembler::pop_ptr(Register r) { 601 pop(r); 602 } 603 604 void InterpreterMacroAssembler::push_ptr(Register r) { 605 push(r); 606 } 607 608 void InterpreterMacroAssembler::push_i(Register r) { 609 push(r); 610 } 611 612 void InterpreterMacroAssembler::push_f(XMMRegister r) { 613 subptr(rsp, wordSize); 614 movflt(Address(rsp, 0), r); 615 } 616 617 void InterpreterMacroAssembler::pop_f(XMMRegister r) { 618 movflt(r, Address(rsp, 0)); 619 addptr(rsp, wordSize); 620 } 621 622 void InterpreterMacroAssembler::push_d(XMMRegister r) { 623 subptr(rsp, 2 * wordSize); 624 movdbl(Address(rsp, 0), r); 625 } 626 627 void InterpreterMacroAssembler::pop_d(XMMRegister r) { 628 movdbl(r, Address(rsp, 0)); 629 addptr(rsp, 2 * Interpreter::stackElementSize); 630 } 631 632 #ifdef _LP64 633 void InterpreterMacroAssembler::pop_i(Register r) { 634 // XXX can't use pop currently, upper half non clean 635 movl(r, Address(rsp, 0)); 636 addptr(rsp, wordSize); 637 } 638 639 void InterpreterMacroAssembler::pop_l(Register r) { 640 movq(r, Address(rsp, 0)); 641 addptr(rsp, 2 * Interpreter::stackElementSize); 642 } 643 644 void InterpreterMacroAssembler::push_l(Register r) { 645 subptr(rsp, 2 * wordSize); 646 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r ); 647 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD ); 648 } 649 650 void InterpreterMacroAssembler::pop(TosState state) { 651 switch (state) { 652 case atos: pop_ptr(); break; 653 case btos: 654 case ztos: 655 case ctos: 656 case stos: 657 case itos: pop_i(); break; 658 case ltos: pop_l(); break; 659 case ftos: pop_f(xmm0); break; 660 case dtos: pop_d(xmm0); break; 661 case vtos: /* nothing to do */ break; 662 default: ShouldNotReachHere(); 663 } 664 verify_oop(rax, state); 665 } 666 667 void InterpreterMacroAssembler::push(TosState state) { 668 verify_oop(rax, state); 669 switch (state) { 670 case atos: push_ptr(); break; 671 case btos: 672 case ztos: 673 case ctos: 674 case stos: 675 case itos: push_i(); break; 676 case ltos: push_l(); break; 677 case ftos: push_f(xmm0); break; 678 case dtos: push_d(xmm0); break; 679 case vtos: /* nothing to do */ break; 680 default : ShouldNotReachHere(); 681 } 682 } 683 #else 684 void InterpreterMacroAssembler::pop_i(Register r) { 685 pop(r); 686 } 687 688 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 689 pop(lo); 690 pop(hi); 691 } 692 693 void InterpreterMacroAssembler::pop_f() { 694 fld_s(Address(rsp, 0)); 695 addptr(rsp, 1 * wordSize); 696 } 697 698 void InterpreterMacroAssembler::pop_d() { 699 fld_d(Address(rsp, 0)); 700 addptr(rsp, 2 * wordSize); 701 } 702 703 704 void InterpreterMacroAssembler::pop(TosState state) { 705 switch (state) { 706 case atos: pop_ptr(rax); break; 707 case btos: // fall through 708 case ztos: // fall through 709 case ctos: // fall through 710 case stos: // fall through 711 case itos: pop_i(rax); break; 712 case ltos: pop_l(rax, rdx); break; 713 case ftos: 714 if (UseSSE >= 1) { 715 pop_f(xmm0); 716 } else { 717 pop_f(); 718 } 719 break; 720 case dtos: 721 if (UseSSE >= 2) { 722 pop_d(xmm0); 723 } else { 724 pop_d(); 725 } 726 break; 727 case vtos: /* nothing to do */ break; 728 default : ShouldNotReachHere(); 729 } 730 verify_oop(rax, state); 731 } 732 733 734 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 735 push(hi); 736 push(lo); 737 } 738 739 void InterpreterMacroAssembler::push_f() { 740 // Do not schedule for no AGI! Never write beyond rsp! 741 subptr(rsp, 1 * wordSize); 742 fstp_s(Address(rsp, 0)); 743 } 744 745 void InterpreterMacroAssembler::push_d() { 746 // Do not schedule for no AGI! Never write beyond rsp! 747 subptr(rsp, 2 * wordSize); 748 fstp_d(Address(rsp, 0)); 749 } 750 751 752 void InterpreterMacroAssembler::push(TosState state) { 753 verify_oop(rax, state); 754 switch (state) { 755 case atos: push_ptr(rax); break; 756 case btos: // fall through 757 case ztos: // fall through 758 case ctos: // fall through 759 case stos: // fall through 760 case itos: push_i(rax); break; 761 case ltos: push_l(rax, rdx); break; 762 case ftos: 763 if (UseSSE >= 1) { 764 push_f(xmm0); 765 } else { 766 push_f(); 767 } 768 break; 769 case dtos: 770 if (UseSSE >= 2) { 771 push_d(xmm0); 772 } else { 773 push_d(); 774 } 775 break; 776 case vtos: /* nothing to do */ break; 777 default : ShouldNotReachHere(); 778 } 779 } 780 #endif // _LP64 781 782 783 // Helpers for swap and dup 784 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 785 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); 786 } 787 788 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 789 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); 790 } 791 792 793 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 794 // set sender sp 795 lea(_bcp_register, Address(rsp, wordSize)); 796 // record last_sp 797 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register); 798 } 799 800 801 // Jump to from_interpreted entry of a call unless single stepping is possible 802 // in this thread in which case we must call the i2i entry 803 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 804 prepare_to_jump_from_interpreted(); 805 806 if (JvmtiExport::can_post_interpreter_events()) { 807 Label run_compiled_code; 808 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 809 // compiled code in threads for which the event is enabled. Check here for 810 // interp_only_mode if these events CAN be enabled. 811 // interp_only is an int, on little endian it is sufficient to test the byte only 812 // Is a cmpl faster? 813 LP64_ONLY(temp = r15_thread;) 814 NOT_LP64(get_thread(temp);) 815 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 816 jccb(Assembler::zero, run_compiled_code); 817 jmp(Address(method, Method::interpreter_entry_offset())); 818 bind(run_compiled_code); 819 } 820 821 jmp(Address(method, Method::from_interpreted_offset())); 822 } 823 824 // The following two routines provide a hook so that an implementation 825 // can schedule the dispatch in two parts. x86 does not do this. 826 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 827 // Nothing x86 specific to be done here 828 } 829 830 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 831 dispatch_next(state, step); 832 } 833 834 void InterpreterMacroAssembler::dispatch_base(TosState state, 835 address* table, 836 bool verifyoop, 837 bool generate_poll) { 838 verify_FPU(1, state); 839 if (VerifyActivationFrameSize) { 840 Label L; 841 mov(rcx, rbp); 842 subptr(rcx, rsp); 843 int32_t min_frame_size = 844 (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * 845 wordSize; 846 cmpptr(rcx, (int32_t)min_frame_size); 847 jcc(Assembler::greaterEqual, L); 848 stop("broken stack frame"); 849 bind(L); 850 } 851 if (verifyoop) { 852 verify_oop(rax, state); 853 } 854 855 address* const safepoint_table = Interpreter::safept_table(state); 856 #ifdef _LP64 857 Label no_safepoint, dispatch; 858 if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) { 859 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 860 testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); 861 862 jccb(Assembler::zero, no_safepoint); 863 lea(rscratch1, ExternalAddress((address)safepoint_table)); 864 jmpb(dispatch); 865 } 866 867 bind(no_safepoint); 868 lea(rscratch1, ExternalAddress((address)table)); 869 bind(dispatch); 870 jmp(Address(rscratch1, rbx, Address::times_8)); 871 872 #else 873 Address index(noreg, rbx, Address::times_ptr); 874 if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) { 875 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 876 Label no_safepoint; 877 const Register thread = rcx; 878 get_thread(thread); 879 testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); 880 881 jccb(Assembler::zero, no_safepoint); 882 ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index); 883 jump(dispatch_addr); 884 bind(no_safepoint); 885 } 886 887 { 888 ArrayAddress dispatch_addr(ExternalAddress((address)table), index); 889 jump(dispatch_addr); 890 } 891 #endif // _LP64 892 } 893 894 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) { 895 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 896 } 897 898 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 899 dispatch_base(state, Interpreter::normal_table(state)); 900 } 901 902 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 903 dispatch_base(state, Interpreter::normal_table(state), false); 904 } 905 906 907 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { 908 // load next bytecode (load before advancing _bcp_register to prevent AGI) 909 load_unsigned_byte(rbx, Address(_bcp_register, step)); 910 // advance _bcp_register 911 increment(_bcp_register, step); 912 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 913 } 914 915 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 916 // load current bytecode 917 load_unsigned_byte(rbx, Address(_bcp_register, 0)); 918 dispatch_base(state, table); 919 } 920 921 void InterpreterMacroAssembler::narrow(Register result) { 922 923 // Get method->_constMethod->_result_type 924 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 925 movptr(rcx, Address(rcx, Method::const_offset())); 926 load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset())); 927 928 Label done, notBool, notByte, notChar; 929 930 // common case first 931 cmpl(rcx, T_INT); 932 jcc(Assembler::equal, done); 933 934 // mask integer result to narrower return type. 935 cmpl(rcx, T_BOOLEAN); 936 jcc(Assembler::notEqual, notBool); 937 andl(result, 0x1); 938 jmp(done); 939 940 bind(notBool); 941 cmpl(rcx, T_BYTE); 942 jcc(Assembler::notEqual, notByte); 943 LP64_ONLY(movsbl(result, result);) 944 NOT_LP64(shll(result, 24);) // truncate upper 24 bits 945 NOT_LP64(sarl(result, 24);) // and sign-extend byte 946 jmp(done); 947 948 bind(notByte); 949 cmpl(rcx, T_CHAR); 950 jcc(Assembler::notEqual, notChar); 951 LP64_ONLY(movzwl(result, result);) 952 NOT_LP64(andl(result, 0xFFFF);) // truncate upper 16 bits 953 jmp(done); 954 955 bind(notChar); 956 // cmpl(rcx, T_SHORT); // all that's left 957 // jcc(Assembler::notEqual, done); 958 LP64_ONLY(movswl(result, result);) 959 NOT_LP64(shll(result, 16);) // truncate upper 16 bits 960 NOT_LP64(sarl(result, 16);) // and sign-extend short 961 962 // Nothing to do for T_INT 963 bind(done); 964 } 965 966 // remove activation 967 // 968 // Unlock the receiver if this is a synchronized method. 969 // Unlock any Java monitors from syncronized blocks. 970 // Remove the activation from the stack. 971 // 972 // If there are locked Java monitors 973 // If throw_monitor_exception 974 // throws IllegalMonitorStateException 975 // Else if install_monitor_exception 976 // installs IllegalMonitorStateException 977 // Else 978 // no error processing 979 void InterpreterMacroAssembler::remove_activation( 980 TosState state, 981 Register ret_addr, 982 bool throw_monitor_exception, 983 bool install_monitor_exception, 984 bool notify_jvmdi) { 985 // Note: Registers rdx xmm0 may be in use for the 986 // result check if synchronized method 987 Label unlocked, unlock, no_unlock; 988 989 const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 990 const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx); 991 const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx); 992 // monitor pointers need different register 993 // because rdx may have the result in it 994 NOT_LP64(get_thread(rcx);) 995 996 // get the value of _do_not_unlock_if_synchronized into rdx 997 const Address do_not_unlock_if_synchronized(rthread, 998 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 999 movbool(rbx, do_not_unlock_if_synchronized); 1000 movbool(do_not_unlock_if_synchronized, false); // reset the flag 1001 1002 // get method access flags 1003 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 1004 movl(rcx, Address(rcx, Method::access_flags_offset())); 1005 testl(rcx, JVM_ACC_SYNCHRONIZED); 1006 jcc(Assembler::zero, unlocked); 1007 1008 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1009 // is set. 1010 testbool(rbx); 1011 jcc(Assembler::notZero, no_unlock); 1012 1013 // unlock monitor 1014 push(state); // save result 1015 1016 // BasicObjectLock will be first in list, since this is a 1017 // synchronized method. However, need to check that the object has 1018 // not been unlocked by an explicit monitorexit bytecode. 1019 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * 1020 wordSize - (int) sizeof(BasicObjectLock)); 1021 // We use c_rarg1/rdx so that if we go slow path it will be the correct 1022 // register for unlock_object to pass to VM directly 1023 lea(robj, monitor); // address of first monitor 1024 1025 movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes())); 1026 testptr(rax, rax); 1027 jcc(Assembler::notZero, unlock); 1028 1029 pop(state); 1030 if (throw_monitor_exception) { 1031 // Entry already unlocked, need to throw exception 1032 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow 1033 call_VM(noreg, CAST_FROM_FN_PTR(address, 1034 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1035 should_not_reach_here(); 1036 } else { 1037 // Monitor already unlocked during a stack unroll. If requested, 1038 // install an illegal_monitor_state_exception. Continue with 1039 // stack unrolling. 1040 if (install_monitor_exception) { 1041 NOT_LP64(empty_FPU_stack();) 1042 call_VM(noreg, CAST_FROM_FN_PTR(address, 1043 InterpreterRuntime::new_illegal_monitor_state_exception)); 1044 } 1045 jmp(unlocked); 1046 } 1047 1048 bind(unlock); 1049 unlock_object(robj); 1050 pop(state); 1051 1052 // Check that for block-structured locking (i.e., that all locked 1053 // objects has been unlocked) 1054 bind(unlocked); 1055 1056 // rax, rdx: Might contain return value 1057 1058 // Check that all monitors are unlocked 1059 { 1060 Label loop, exception, entry, restart; 1061 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 1062 const Address monitor_block_top( 1063 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 1064 const Address monitor_block_bot( 1065 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 1066 1067 bind(restart); 1068 // We use c_rarg1 so that if we go slow path it will be the correct 1069 // register for unlock_object to pass to VM directly 1070 movptr(rmon, monitor_block_top); // points to current entry, starting 1071 // with top-most entry 1072 lea(rbx, monitor_block_bot); // points to word before bottom of 1073 // monitor block 1074 jmp(entry); 1075 1076 // Entry already locked, need to throw exception 1077 bind(exception); 1078 1079 if (throw_monitor_exception) { 1080 // Throw exception 1081 NOT_LP64(empty_FPU_stack();) 1082 MacroAssembler::call_VM(noreg, 1083 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 1084 throw_illegal_monitor_state_exception)); 1085 should_not_reach_here(); 1086 } else { 1087 // Stack unrolling. Unlock object and install illegal_monitor_exception. 1088 // Unlock does not block, so don't have to worry about the frame. 1089 // We don't have to preserve c_rarg1 since we are going to throw an exception. 1090 1091 push(state); 1092 mov(robj, rmon); // nop if robj and rmon are the same 1093 unlock_object(robj); 1094 pop(state); 1095 1096 if (install_monitor_exception) { 1097 NOT_LP64(empty_FPU_stack();) 1098 call_VM(noreg, CAST_FROM_FN_PTR(address, 1099 InterpreterRuntime:: 1100 new_illegal_monitor_state_exception)); 1101 } 1102 1103 jmp(restart); 1104 } 1105 1106 bind(loop); 1107 // check if current entry is used 1108 cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); 1109 jcc(Assembler::notEqual, exception); 1110 1111 addptr(rmon, entry_size); // otherwise advance to next entry 1112 bind(entry); 1113 cmpptr(rmon, rbx); // check if bottom reached 1114 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry 1115 } 1116 1117 bind(no_unlock); 1118 1119 // jvmti support 1120 if (notify_jvmdi) { 1121 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 1122 } else { 1123 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 1124 } 1125 1126 if (StackReservedPages > 0) { 1127 movptr(rbx, 1128 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1129 // testing if reserved zone needs to be re-enabled 1130 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1131 Label no_reserved_zone_enabling; 1132 1133 NOT_LP64(get_thread(rthread);) 1134 1135 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled); 1136 jcc(Assembler::equal, no_reserved_zone_enabling); 1137 1138 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset())); 1139 jcc(Assembler::lessEqual, no_reserved_zone_enabling); 1140 1141 call_VM_leaf( 1142 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); 1143 call_VM(noreg, CAST_FROM_FN_PTR(address, 1144 InterpreterRuntime::throw_delayed_StackOverflowError)); 1145 should_not_reach_here(); 1146 1147 bind(no_reserved_zone_enabling); 1148 } 1149 1150 // remove activation 1151 // get sender sp 1152 movptr(rbx, 1153 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1154 1155 if (state == atos && ValueTypeReturnedAsFields) { 1156 Label skip; 1157 // Test if the return type is a value type 1158 movptr(rdi, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 1159 movptr(rdi, Address(rdi, Method::const_offset())); 1160 load_unsigned_byte(rdi, Address(rdi, ConstMethod::result_type_offset())); 1161 cmpl(rdi, T_VALUETYPE); 1162 jcc(Assembler::notEqual, skip); 1163 1164 // We are returning a value type, load its fields into registers 1165 #ifndef _LP64 1166 super_call_VM_leaf(StubRoutines::load_value_type_fields_in_regs()); 1167 #else 1168 // Load fields from a buffered value with a value class specific handler 1169 load_klass(rdi, rax); 1170 movptr(rdi, Address(rdi, InstanceKlass::adr_valueklass_fixed_block_offset())); 1171 movptr(rdi, Address(rdi, ValueKlass::unpack_handler_offset())); 1172 1173 testptr(rdi, rdi); 1174 jcc(Assembler::equal, skip); 1175 1176 call(rdi); 1177 #endif 1178 // call above kills the value in rbx. Reload it. 1179 movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1180 bind(skip); 1181 } 1182 leave(); // remove frame anchor 1183 pop(ret_addr); // get return address 1184 mov(rsp, rbx); // set sp to sender sp 1185 } 1186 1187 void InterpreterMacroAssembler::get_method_counters(Register method, 1188 Register mcs, Label& skip) { 1189 Label has_counters; 1190 movptr(mcs, Address(method, Method::method_counters_offset())); 1191 testptr(mcs, mcs); 1192 jcc(Assembler::notZero, has_counters); 1193 call_VM(noreg, CAST_FROM_FN_PTR(address, 1194 InterpreterRuntime::build_method_counters), method); 1195 movptr(mcs, Address(method,Method::method_counters_offset())); 1196 testptr(mcs, mcs); 1197 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory 1198 bind(has_counters); 1199 } 1200 1201 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj, 1202 Register t1, Register t2, 1203 bool clear_fields, Label& alloc_failed) { 1204 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed); 1205 { 1206 SkipIfEqual skip_if(this, &DTraceAllocProbes, 0); 1207 // Trigger dtrace event for fastpath 1208 push(atos); 1209 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), new_obj); 1210 pop(atos); 1211 } 1212 } 1213 1214 1215 void InterpreterMacroAssembler::read_flattened_field(Register holder_klass, 1216 Register field_index, Register field_offset, 1217 Register obj) { 1218 Label alloc_failed, empty_value, done; 1219 const Register src = field_offset; 1220 const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi); 1221 const Register dst_temp = LP64_ONLY(rscratch2) NOT_LP64(rdi); 1222 assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp); 1223 1224 // Grap the inline field klass 1225 push(holder_klass); 1226 const Register field_klass = holder_klass; 1227 get_value_field_klass(holder_klass, field_index, field_klass); 1228 1229 //check for empty value klass 1230 test_klass_is_empty_value(field_klass, dst_temp, empty_value); 1231 1232 // allocate buffer 1233 push(obj); // save holder 1234 allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed); 1235 1236 // Have a oop instance buffer, copy into it 1237 data_for_oop(obj, dst_temp, field_klass); 1238 pop(alloc_temp); // restore holder 1239 lea(src, Address(alloc_temp, field_offset)); 1240 // call_VM_leaf, clobbers a few regs, save restore new obj 1241 push(obj); 1242 access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass); 1243 pop(obj); 1244 pop(holder_klass); 1245 jmp(done); 1246 1247 bind(empty_value); 1248 get_empty_value_oop(field_klass, dst_temp, obj); 1249 pop(holder_klass); 1250 jmp(done); 1251 1252 bind(alloc_failed); 1253 pop(obj); 1254 pop(holder_klass); 1255 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), 1256 obj, field_index, holder_klass); 1257 1258 bind(done); 1259 } 1260 1261 // Lock object 1262 // 1263 // Args: 1264 // rdx, c_rarg1: BasicObjectLock to be used for locking 1265 // 1266 // Kills: 1267 // rax, rbx 1268 void InterpreterMacroAssembler::lock_object(Register lock_reg) { 1269 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1270 "The argument is only for looks. It must be c_rarg1"); 1271 1272 if (UseHeavyMonitors) { 1273 call_VM(noreg, 1274 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1275 lock_reg); 1276 } else { 1277 Label done; 1278 1279 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1280 const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a 1281 // problematic case where tmp_reg = no_reg. 1282 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1283 1284 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 1285 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 1286 const int mark_offset = lock_offset + 1287 BasicLock::displaced_header_offset_in_bytes(); 1288 1289 Label slow_case; 1290 1291 // Load object pointer into obj_reg 1292 movptr(obj_reg, Address(lock_reg, obj_offset)); 1293 1294 if (UseBiasedLocking) { 1295 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case); 1296 } 1297 1298 // Load immediate 1 into swap_reg %rax 1299 movl(swap_reg, (int32_t)1); 1300 1301 // Load (object->mark() | 1) into swap_reg %rax 1302 orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1303 if (EnableValhalla && !UseBiasedLocking) { 1304 // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking 1305 andptr(swap_reg, ~((int) markWord::biased_lock_bit_in_place)); 1306 } 1307 1308 // Save (object->mark() | 1) into BasicLock's displaced header 1309 movptr(Address(lock_reg, mark_offset), swap_reg); 1310 1311 assert(lock_offset == 0, 1312 "displaced header must be first word in BasicObjectLock"); 1313 1314 lock(); 1315 cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1316 if (PrintBiasedLockingStatistics) { 1317 cond_inc32(Assembler::zero, 1318 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 1319 } 1320 jcc(Assembler::zero, done); 1321 1322 const int zero_bits = LP64_ONLY(7) NOT_LP64(3); 1323 1324 // Test if the oopMark is an obvious stack pointer, i.e., 1325 // 1) (mark & zero_bits) == 0, and 1326 // 2) rsp <= mark < mark + os::pagesize() 1327 // 1328 // These 3 tests can be done by evaluating the following 1329 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())), 1330 // assuming both stack pointer and pagesize have their 1331 // least significant bits clear. 1332 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg 1333 subptr(swap_reg, rsp); 1334 andptr(swap_reg, zero_bits - os::vm_page_size()); 1335 1336 // Save the test result, for recursive case, the result is zero 1337 movptr(Address(lock_reg, mark_offset), swap_reg); 1338 1339 if (PrintBiasedLockingStatistics) { 1340 cond_inc32(Assembler::zero, 1341 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); 1342 } 1343 jcc(Assembler::zero, done); 1344 1345 bind(slow_case); 1346 1347 // Call the runtime routine for slow case 1348 call_VM(noreg, 1349 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1350 lock_reg); 1351 1352 bind(done); 1353 } 1354 } 1355 1356 1357 // Unlocks an object. Used in monitorexit bytecode and 1358 // remove_activation. Throws an IllegalMonitorException if object is 1359 // not locked by current thread. 1360 // 1361 // Args: 1362 // rdx, c_rarg1: BasicObjectLock for lock 1363 // 1364 // Kills: 1365 // rax 1366 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 1367 // rscratch1 (scratch reg) 1368 // rax, rbx, rcx, rdx 1369 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1370 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1371 "The argument is only for looks. It must be c_rarg1"); 1372 1373 if (UseHeavyMonitors) { 1374 call_VM(noreg, 1375 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 1376 lock_reg); 1377 } else { 1378 Label done; 1379 1380 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1381 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark 1382 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1383 1384 save_bcp(); // Save in case of exception 1385 1386 // Convert from BasicObjectLock structure to object and BasicLock 1387 // structure Store the BasicLock address into %rax 1388 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 1389 1390 // Load oop into obj_reg(%c_rarg3) 1391 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 1392 1393 // Free entry 1394 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); 1395 1396 if (UseBiasedLocking) { 1397 biased_locking_exit(obj_reg, header_reg, done); 1398 } 1399 1400 // Load the old header from BasicLock structure 1401 movptr(header_reg, Address(swap_reg, 1402 BasicLock::displaced_header_offset_in_bytes())); 1403 1404 // Test for recursion 1405 testptr(header_reg, header_reg); 1406 1407 // zero for recursive case 1408 jcc(Assembler::zero, done); 1409 1410 // Atomic swap back the old header 1411 lock(); 1412 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1413 1414 // zero for simple unlock of a stack-lock case 1415 jcc(Assembler::zero, done); 1416 1417 // Call the runtime routine for slow case. 1418 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), 1419 obj_reg); // restore obj 1420 call_VM(noreg, 1421 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 1422 lock_reg); 1423 1424 bind(done); 1425 1426 restore_bcp(); 1427 } 1428 } 1429 1430 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 1431 Label& zero_continue) { 1432 assert(ProfileInterpreter, "must be profiling interpreter"); 1433 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize)); 1434 testptr(mdp, mdp); 1435 jcc(Assembler::zero, zero_continue); 1436 } 1437 1438 1439 // Set the method data pointer for the current bcp. 1440 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1441 assert(ProfileInterpreter, "must be profiling interpreter"); 1442 Label set_mdp; 1443 push(rax); 1444 push(rbx); 1445 1446 get_method(rbx); 1447 // Test MDO to avoid the call if it is NULL. 1448 movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); 1449 testptr(rax, rax); 1450 jcc(Assembler::zero, set_mdp); 1451 // rbx: method 1452 // _bcp_register: bcp 1453 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register); 1454 // rax: mdi 1455 // mdo is guaranteed to be non-zero here, we checked for it before the call. 1456 movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset()))); 1457 addptr(rbx, in_bytes(MethodData::data_offset())); 1458 addptr(rax, rbx); 1459 bind(set_mdp); 1460 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax); 1461 pop(rbx); 1462 pop(rax); 1463 } 1464 1465 void InterpreterMacroAssembler::verify_method_data_pointer() { 1466 assert(ProfileInterpreter, "must be profiling interpreter"); 1467 #ifdef ASSERT 1468 Label verify_continue; 1469 push(rax); 1470 push(rbx); 1471 Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); 1472 Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx); 1473 push(arg3_reg); 1474 push(arg2_reg); 1475 test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue 1476 get_method(rbx); 1477 1478 // If the mdp is valid, it will point to a DataLayout header which is 1479 // consistent with the bcp. The converse is highly probable also. 1480 load_unsigned_short(arg2_reg, 1481 Address(arg3_reg, in_bytes(DataLayout::bci_offset()))); 1482 addptr(arg2_reg, Address(rbx, Method::const_offset())); 1483 lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset())); 1484 cmpptr(arg2_reg, _bcp_register); 1485 jcc(Assembler::equal, verify_continue); 1486 // rbx: method 1487 // _bcp_register: bcp 1488 // c_rarg3: mdp 1489 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 1490 rbx, _bcp_register, arg3_reg); 1491 bind(verify_continue); 1492 pop(arg2_reg); 1493 pop(arg3_reg); 1494 pop(rbx); 1495 pop(rax); 1496 #endif // ASSERT 1497 } 1498 1499 1500 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 1501 int constant, 1502 Register value) { 1503 assert(ProfileInterpreter, "must be profiling interpreter"); 1504 Address data(mdp_in, constant); 1505 movptr(data, value); 1506 } 1507 1508 1509 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1510 int constant, 1511 bool decrement) { 1512 // Counter address 1513 Address data(mdp_in, constant); 1514 1515 increment_mdp_data_at(data, decrement); 1516 } 1517 1518 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 1519 bool decrement) { 1520 assert(ProfileInterpreter, "must be profiling interpreter"); 1521 // %%% this does 64bit counters at best it is wasting space 1522 // at worst it is a rare bug when counters overflow 1523 1524 if (decrement) { 1525 // Decrement the register. Set condition codes. 1526 addptr(data, (int32_t) -DataLayout::counter_increment); 1527 // If the decrement causes the counter to overflow, stay negative 1528 Label L; 1529 jcc(Assembler::negative, L); 1530 addptr(data, (int32_t) DataLayout::counter_increment); 1531 bind(L); 1532 } else { 1533 assert(DataLayout::counter_increment == 1, 1534 "flow-free idiom only works with 1"); 1535 // Increment the register. Set carry flag. 1536 addptr(data, DataLayout::counter_increment); 1537 // If the increment causes the counter to overflow, pull back by 1. 1538 sbbptr(data, (int32_t)0); 1539 } 1540 } 1541 1542 1543 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1544 Register reg, 1545 int constant, 1546 bool decrement) { 1547 Address data(mdp_in, reg, Address::times_1, constant); 1548 1549 increment_mdp_data_at(data, decrement); 1550 } 1551 1552 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 1553 int flag_byte_constant) { 1554 assert(ProfileInterpreter, "must be profiling interpreter"); 1555 int header_offset = in_bytes(DataLayout::flags_offset()); 1556 int header_bits = flag_byte_constant; 1557 // Set the flag 1558 orb(Address(mdp_in, header_offset), header_bits); 1559 } 1560 1561 1562 1563 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 1564 int offset, 1565 Register value, 1566 Register test_value_out, 1567 Label& not_equal_continue) { 1568 assert(ProfileInterpreter, "must be profiling interpreter"); 1569 if (test_value_out == noreg) { 1570 cmpptr(value, Address(mdp_in, offset)); 1571 } else { 1572 // Put the test value into a register, so caller can use it: 1573 movptr(test_value_out, Address(mdp_in, offset)); 1574 cmpptr(test_value_out, value); 1575 } 1576 jcc(Assembler::notEqual, not_equal_continue); 1577 } 1578 1579 1580 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1581 int offset_of_disp) { 1582 assert(ProfileInterpreter, "must be profiling interpreter"); 1583 Address disp_address(mdp_in, offset_of_disp); 1584 addptr(mdp_in, disp_address); 1585 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1586 } 1587 1588 1589 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1590 Register reg, 1591 int offset_of_disp) { 1592 assert(ProfileInterpreter, "must be profiling interpreter"); 1593 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); 1594 addptr(mdp_in, disp_address); 1595 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1596 } 1597 1598 1599 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 1600 int constant) { 1601 assert(ProfileInterpreter, "must be profiling interpreter"); 1602 addptr(mdp_in, constant); 1603 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1604 } 1605 1606 1607 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1608 assert(ProfileInterpreter, "must be profiling interpreter"); 1609 push(return_bci); // save/restore across call_VM 1610 call_VM(noreg, 1611 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1612 return_bci); 1613 pop(return_bci); 1614 } 1615 1616 1617 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1618 Register bumped_count) { 1619 if (ProfileInterpreter) { 1620 Label profile_continue; 1621 1622 // If no method data exists, go to profile_continue. 1623 // Otherwise, assign to mdp 1624 test_method_data_pointer(mdp, profile_continue); 1625 1626 // We are taking a branch. Increment the taken count. 1627 // We inline increment_mdp_data_at to return bumped_count in a register 1628 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1629 Address data(mdp, in_bytes(JumpData::taken_offset())); 1630 movptr(bumped_count, data); 1631 assert(DataLayout::counter_increment == 1, 1632 "flow-free idiom only works with 1"); 1633 addptr(bumped_count, DataLayout::counter_increment); 1634 sbbptr(bumped_count, 0); 1635 movptr(data, bumped_count); // Store back out 1636 1637 // The method data pointer needs to be updated to reflect the new target. 1638 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1639 bind(profile_continue); 1640 } 1641 } 1642 1643 1644 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1645 if (ProfileInterpreter) { 1646 Label profile_continue; 1647 1648 // If no method data exists, go to profile_continue. 1649 test_method_data_pointer(mdp, profile_continue); 1650 1651 // We are taking a branch. Increment the not taken count. 1652 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1653 1654 // The method data pointer needs to be updated to correspond to 1655 // the next bytecode 1656 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1657 bind(profile_continue); 1658 } 1659 } 1660 1661 void InterpreterMacroAssembler::profile_call(Register mdp) { 1662 if (ProfileInterpreter) { 1663 Label profile_continue; 1664 1665 // If no method data exists, go to profile_continue. 1666 test_method_data_pointer(mdp, profile_continue); 1667 1668 // We are making a call. Increment the count. 1669 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1670 1671 // The method data pointer needs to be updated to reflect the new target. 1672 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1673 bind(profile_continue); 1674 } 1675 } 1676 1677 1678 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1679 if (ProfileInterpreter) { 1680 Label profile_continue; 1681 1682 // If no method data exists, go to profile_continue. 1683 test_method_data_pointer(mdp, profile_continue); 1684 1685 // We are making a call. Increment the count. 1686 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1687 1688 // The method data pointer needs to be updated to reflect the new target. 1689 update_mdp_by_constant(mdp, 1690 in_bytes(VirtualCallData:: 1691 virtual_call_data_size())); 1692 bind(profile_continue); 1693 } 1694 } 1695 1696 1697 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1698 Register mdp, 1699 Register reg2, 1700 bool receiver_can_be_null) { 1701 if (ProfileInterpreter) { 1702 Label profile_continue; 1703 1704 // If no method data exists, go to profile_continue. 1705 test_method_data_pointer(mdp, profile_continue); 1706 1707 Label skip_receiver_profile; 1708 if (receiver_can_be_null) { 1709 Label not_null; 1710 testptr(receiver, receiver); 1711 jccb(Assembler::notZero, not_null); 1712 // We are making a call. Increment the count for null receiver. 1713 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1714 jmp(skip_receiver_profile); 1715 bind(not_null); 1716 } 1717 1718 // Record the receiver type. 1719 record_klass_in_profile(receiver, mdp, reg2, true); 1720 bind(skip_receiver_profile); 1721 1722 // The method data pointer needs to be updated to reflect the new target. 1723 #if INCLUDE_JVMCI 1724 if (MethodProfileWidth == 0) { 1725 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1726 } 1727 #else // INCLUDE_JVMCI 1728 update_mdp_by_constant(mdp, 1729 in_bytes(VirtualCallData:: 1730 virtual_call_data_size())); 1731 #endif // INCLUDE_JVMCI 1732 bind(profile_continue); 1733 } 1734 } 1735 1736 #if INCLUDE_JVMCI 1737 void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) { 1738 assert_different_registers(method, mdp, reg2); 1739 if (ProfileInterpreter && MethodProfileWidth > 0) { 1740 Label profile_continue; 1741 1742 // If no method data exists, go to profile_continue. 1743 test_method_data_pointer(mdp, profile_continue); 1744 1745 Label done; 1746 record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth, 1747 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1748 bind(done); 1749 1750 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1751 bind(profile_continue); 1752 } 1753 } 1754 #endif // INCLUDE_JVMCI 1755 1756 // This routine creates a state machine for updating the multi-row 1757 // type profile at a virtual call site (or other type-sensitive bytecode). 1758 // The machine visits each row (of receiver/count) until the receiver type 1759 // is found, or until it runs out of rows. At the same time, it remembers 1760 // the location of the first empty row. (An empty row records null for its 1761 // receiver, and can be allocated for a newly-observed receiver type.) 1762 // Because there are two degrees of freedom in the state, a simple linear 1763 // search will not work; it must be a decision tree. Hence this helper 1764 // function is recursive, to generate the required tree structured code. 1765 // It's the interpreter, so we are trading off code space for speed. 1766 // See below for example code. 1767 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1768 Register receiver, Register mdp, 1769 Register reg2, int start_row, 1770 Label& done, bool is_virtual_call) { 1771 if (TypeProfileWidth == 0) { 1772 if (is_virtual_call) { 1773 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1774 } 1775 #if INCLUDE_JVMCI 1776 else if (EnableJVMCI) { 1777 increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset())); 1778 } 1779 #endif // INCLUDE_JVMCI 1780 } else { 1781 int non_profiled_offset = -1; 1782 if (is_virtual_call) { 1783 non_profiled_offset = in_bytes(CounterData::count_offset()); 1784 } 1785 #if INCLUDE_JVMCI 1786 else if (EnableJVMCI) { 1787 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1788 } 1789 #endif // INCLUDE_JVMCI 1790 1791 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, 1792 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1793 } 1794 } 1795 1796 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, 1797 Register reg2, int start_row, Label& done, int total_rows, 1798 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1799 int non_profiled_offset) { 1800 int last_row = total_rows - 1; 1801 assert(start_row <= last_row, "must be work left to do"); 1802 // Test this row for both the item and for null. 1803 // Take any of three different outcomes: 1804 // 1. found item => increment count and goto done 1805 // 2. found null => keep looking for case 1, maybe allocate this cell 1806 // 3. found something else => keep looking for cases 1 and 2 1807 // Case 3 is handled by a recursive call. 1808 for (int row = start_row; row <= last_row; row++) { 1809 Label next_test; 1810 bool test_for_null_also = (row == start_row); 1811 1812 // See if the item is item[n]. 1813 int item_offset = in_bytes(item_offset_fn(row)); 1814 test_mdp_data_at(mdp, item_offset, item, 1815 (test_for_null_also ? reg2 : noreg), 1816 next_test); 1817 // (Reg2 now contains the item from the CallData.) 1818 1819 // The item is item[n]. Increment count[n]. 1820 int count_offset = in_bytes(item_count_offset_fn(row)); 1821 increment_mdp_data_at(mdp, count_offset); 1822 jmp(done); 1823 bind(next_test); 1824 1825 if (test_for_null_also) { 1826 // Failed the equality check on item[n]... Test for null. 1827 testptr(reg2, reg2); 1828 if (start_row == last_row) { 1829 // The only thing left to do is handle the null case. 1830 if (non_profiled_offset >= 0) { 1831 Label found_null; 1832 jccb(Assembler::zero, found_null); 1833 // Item did not match any saved item and there is no empty row for it. 1834 // Increment total counter to indicate polymorphic case. 1835 increment_mdp_data_at(mdp, non_profiled_offset); 1836 jmp(done); 1837 bind(found_null); 1838 } else { 1839 jcc(Assembler::notZero, done); 1840 } 1841 break; 1842 } 1843 Label found_null; 1844 // Since null is rare, make it be the branch-taken case. 1845 jcc(Assembler::zero, found_null); 1846 1847 // Put all the "Case 3" tests here. 1848 record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, 1849 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1850 1851 // Found a null. Keep searching for a matching item, 1852 // but remember that this is an empty (unused) slot. 1853 bind(found_null); 1854 } 1855 } 1856 1857 // In the fall-through case, we found no matching item, but we 1858 // observed the item[start_row] is NULL. 1859 1860 // Fill in the item field and increment the count. 1861 int item_offset = in_bytes(item_offset_fn(start_row)); 1862 set_mdp_data_at(mdp, item_offset, item); 1863 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1864 movl(reg2, DataLayout::counter_increment); 1865 set_mdp_data_at(mdp, count_offset, reg2); 1866 if (start_row > 0) { 1867 jmp(done); 1868 } 1869 } 1870 1871 // Example state machine code for three profile rows: 1872 // // main copy of decision tree, rooted at row[1] 1873 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1874 // if (row[0].rec != NULL) { 1875 // // inner copy of decision tree, rooted at row[1] 1876 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1877 // if (row[1].rec != NULL) { 1878 // // degenerate decision tree, rooted at row[2] 1879 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1880 // if (row[2].rec != NULL) { count.incr(); goto done; } // overflow 1881 // row[2].init(rec); goto done; 1882 // } else { 1883 // // remember row[1] is empty 1884 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1885 // row[1].init(rec); goto done; 1886 // } 1887 // } else { 1888 // // remember row[0] is empty 1889 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1890 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1891 // row[0].init(rec); goto done; 1892 // } 1893 // done: 1894 1895 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1896 Register mdp, Register reg2, 1897 bool is_virtual_call) { 1898 assert(ProfileInterpreter, "must be profiling"); 1899 Label done; 1900 1901 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1902 1903 bind (done); 1904 } 1905 1906 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1907 Register mdp) { 1908 if (ProfileInterpreter) { 1909 Label profile_continue; 1910 uint row; 1911 1912 // If no method data exists, go to profile_continue. 1913 test_method_data_pointer(mdp, profile_continue); 1914 1915 // Update the total ret count. 1916 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1917 1918 for (row = 0; row < RetData::row_limit(); row++) { 1919 Label next_test; 1920 1921 // See if return_bci is equal to bci[n]: 1922 test_mdp_data_at(mdp, 1923 in_bytes(RetData::bci_offset(row)), 1924 return_bci, noreg, 1925 next_test); 1926 1927 // return_bci is equal to bci[n]. Increment the count. 1928 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1929 1930 // The method data pointer needs to be updated to reflect the new target. 1931 update_mdp_by_offset(mdp, 1932 in_bytes(RetData::bci_displacement_offset(row))); 1933 jmp(profile_continue); 1934 bind(next_test); 1935 } 1936 1937 update_mdp_for_ret(return_bci); 1938 1939 bind(profile_continue); 1940 } 1941 } 1942 1943 1944 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1945 if (ProfileInterpreter) { 1946 Label profile_continue; 1947 1948 // If no method data exists, go to profile_continue. 1949 test_method_data_pointer(mdp, profile_continue); 1950 1951 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1952 1953 // The method data pointer needs to be updated. 1954 int mdp_delta = in_bytes(BitData::bit_data_size()); 1955 if (TypeProfileCasts) { 1956 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1957 } 1958 update_mdp_by_constant(mdp, mdp_delta); 1959 1960 bind(profile_continue); 1961 } 1962 } 1963 1964 1965 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1966 if (ProfileInterpreter && TypeProfileCasts) { 1967 Label profile_continue; 1968 1969 // If no method data exists, go to profile_continue. 1970 test_method_data_pointer(mdp, profile_continue); 1971 1972 int count_offset = in_bytes(CounterData::count_offset()); 1973 // Back up the address, since we have already bumped the mdp. 1974 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1975 1976 // *Decrement* the counter. We expect to see zero or small negatives. 1977 increment_mdp_data_at(mdp, count_offset, true); 1978 1979 bind (profile_continue); 1980 } 1981 } 1982 1983 1984 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1985 if (ProfileInterpreter) { 1986 Label profile_continue; 1987 1988 // If no method data exists, go to profile_continue. 1989 test_method_data_pointer(mdp, profile_continue); 1990 1991 // The method data pointer needs to be updated. 1992 int mdp_delta = in_bytes(BitData::bit_data_size()); 1993 if (TypeProfileCasts) { 1994 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1995 1996 // Record the object type. 1997 record_klass_in_profile(klass, mdp, reg2, false); 1998 NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");) 1999 NOT_LP64(restore_locals();) // Restore EDI 2000 } 2001 update_mdp_by_constant(mdp, mdp_delta); 2002 2003 bind(profile_continue); 2004 } 2005 } 2006 2007 2008 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 2009 if (ProfileInterpreter) { 2010 Label profile_continue; 2011 2012 // If no method data exists, go to profile_continue. 2013 test_method_data_pointer(mdp, profile_continue); 2014 2015 // Update the default case count 2016 increment_mdp_data_at(mdp, 2017 in_bytes(MultiBranchData::default_count_offset())); 2018 2019 // The method data pointer needs to be updated. 2020 update_mdp_by_offset(mdp, 2021 in_bytes(MultiBranchData:: 2022 default_displacement_offset())); 2023 2024 bind(profile_continue); 2025 } 2026 } 2027 2028 2029 void InterpreterMacroAssembler::profile_switch_case(Register index, 2030 Register mdp, 2031 Register reg2) { 2032 if (ProfileInterpreter) { 2033 Label profile_continue; 2034 2035 // If no method data exists, go to profile_continue. 2036 test_method_data_pointer(mdp, profile_continue); 2037 2038 // Build the base (index * per_case_size_in_bytes()) + 2039 // case_array_offset_in_bytes() 2040 movl(reg2, in_bytes(MultiBranchData::per_case_size())); 2041 imulptr(index, reg2); // XXX l ? 2042 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? 2043 2044 // Update the case count 2045 increment_mdp_data_at(mdp, 2046 index, 2047 in_bytes(MultiBranchData::relative_count_offset())); 2048 2049 // The method data pointer needs to be updated. 2050 update_mdp_by_offset(mdp, 2051 index, 2052 in_bytes(MultiBranchData:: 2053 relative_displacement_offset())); 2054 2055 bind(profile_continue); 2056 } 2057 } 2058 2059 void InterpreterMacroAssembler::profile_array(Register mdp, 2060 Register array, 2061 Register tmp) { 2062 if (ProfileInterpreter) { 2063 Label profile_continue; 2064 2065 // If no method data exists, go to profile_continue. 2066 test_method_data_pointer(mdp, profile_continue); 2067 2068 mov(tmp, array); 2069 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset()))); 2070 2071 Label not_flat; 2072 test_non_flattened_array_oop(array, tmp, not_flat); 2073 2074 set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant()); 2075 2076 bind(not_flat); 2077 2078 Label not_null_free; 2079 test_non_null_free_array_oop(array, tmp, not_null_free); 2080 2081 set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant()); 2082 2083 bind(not_null_free); 2084 2085 bind(profile_continue); 2086 } 2087 } 2088 2089 void InterpreterMacroAssembler::profile_element(Register mdp, 2090 Register element, 2091 Register tmp) { 2092 if (ProfileInterpreter) { 2093 Label profile_continue; 2094 2095 // If no method data exists, go to profile_continue. 2096 test_method_data_pointer(mdp, profile_continue); 2097 2098 mov(tmp, element); 2099 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset()))); 2100 2101 // The method data pointer needs to be updated. 2102 update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size())); 2103 2104 bind(profile_continue); 2105 } 2106 } 2107 2108 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 2109 if (state == atos) { 2110 MacroAssembler::verify_oop(reg); 2111 } 2112 } 2113 2114 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2115 #ifndef _LP64 2116 if ((state == ftos && UseSSE < 1) || 2117 (state == dtos && UseSSE < 2)) { 2118 MacroAssembler::verify_FPU(stack_depth); 2119 } 2120 #endif 2121 } 2122 2123 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2124 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2125 int increment, Address mask, 2126 Register scratch, bool preloaded, 2127 Condition cond, Label* where) { 2128 if (!preloaded) { 2129 movl(scratch, counter_addr); 2130 } 2131 incrementl(scratch, increment); 2132 movl(counter_addr, scratch); 2133 andl(scratch, mask); 2134 if (where != NULL) { 2135 jcc(cond, *where); 2136 } 2137 } 2138 2139 void InterpreterMacroAssembler::notify_method_entry() { 2140 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 2141 // track stack depth. If it is possible to enter interp_only_mode we add 2142 // the code to check if the event should be sent. 2143 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 2144 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 2145 if (JvmtiExport::can_post_interpreter_events()) { 2146 Label L; 2147 NOT_LP64(get_thread(rthread);) 2148 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 2149 testl(rdx, rdx); 2150 jcc(Assembler::zero, L); 2151 call_VM(noreg, CAST_FROM_FN_PTR(address, 2152 InterpreterRuntime::post_method_entry)); 2153 bind(L); 2154 } 2155 2156 { 2157 SkipIfEqual skip(this, &DTraceMethodProbes, false); 2158 NOT_LP64(get_thread(rthread);) 2159 get_method(rarg); 2160 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2161 rthread, rarg); 2162 } 2163 2164 // RedefineClasses() tracing support for obsolete method entry 2165 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2166 NOT_LP64(get_thread(rthread);) 2167 get_method(rarg); 2168 call_VM_leaf( 2169 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2170 rthread, rarg); 2171 } 2172 } 2173 2174 2175 void InterpreterMacroAssembler::notify_method_exit( 2176 TosState state, NotifyMethodExitMode mode) { 2177 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 2178 // track stack depth. If it is possible to enter interp_only_mode we add 2179 // the code to check if the event should be sent. 2180 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 2181 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 2182 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2183 Label L; 2184 // Note: frame::interpreter_frame_result has a dependency on how the 2185 // method result is saved across the call to post_method_exit. If this 2186 // is changed then the interpreter_frame_result implementation will 2187 // need to be updated too. 2188 2189 // template interpreter will leave the result on the top of the stack. 2190 push(state); 2191 NOT_LP64(get_thread(rthread);) 2192 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 2193 testl(rdx, rdx); 2194 jcc(Assembler::zero, L); 2195 call_VM(noreg, 2196 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2197 bind(L); 2198 pop(state); 2199 } 2200 2201 { 2202 SkipIfEqual skip(this, &DTraceMethodProbes, false); 2203 push(state); 2204 NOT_LP64(get_thread(rthread);) 2205 get_method(rarg); 2206 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2207 rthread, rarg); 2208 pop(state); 2209 } 2210 }