1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interp_masm_x86.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/vtBuffer.hpp"
  31 #include "oops/arrayOop.hpp"
  32 #include "oops/markOop.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/valueKlass.hpp"
  36 #include "prims/jvmtiExport.hpp"
  37 #include "prims/jvmtiThreadState.hpp"
  38 #include "runtime/basicLock.hpp"
  39 #include "runtime/biasedLocking.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 
  44 // Implementation of InterpreterMacroAssembler
  45 
  46 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  47   assert(entry, "Entry must have been generated by now");
  48   jump(RuntimeAddress(entry));
  49 }
  50 
  51 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  52   Label update, next, none;
  53 
  54   verify_oop(obj);
  55 
  56   testptr(obj, obj);
  57   jccb(Assembler::notZero, update);
  58   orptr(mdo_addr, TypeEntries::null_seen);
  59   jmpb(next);
  60 
  61   bind(update);
  62   load_klass(obj, obj);
  63 
  64   xorptr(obj, mdo_addr);
  65   testptr(obj, TypeEntries::type_klass_mask);
  66   jccb(Assembler::zero, next); // klass seen before, nothing to
  67                                // do. The unknown bit may have been
  68                                // set already but no need to check.
  69 
  70   testptr(obj, TypeEntries::type_unknown);
  71   jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
  72 
  73   cmpptr(mdo_addr, 0);
  74   jccb(Assembler::equal, none);
  75   cmpptr(mdo_addr, TypeEntries::null_seen);
  76   jccb(Assembler::equal, none);
  77   // There is a chance that the checks above (re-reading profiling
  78   // data from memory) fail if another thread has just set the
  79   // profiling to this obj's klass
  80   xorptr(obj, mdo_addr);
  81   testptr(obj, TypeEntries::type_klass_mask);
  82   jccb(Assembler::zero, next);
  83 
  84   // different than before. Cannot keep accurate profile.
  85   orptr(mdo_addr, TypeEntries::type_unknown);
  86   jmpb(next);
  87 
  88   bind(none);
  89   // first time here. Set profile type.
  90   movptr(mdo_addr, obj);
  91 
  92   bind(next);
  93 }
  94 
  95 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
  96   if (!ProfileInterpreter) {
  97     return;
  98   }
  99 
 100   if (MethodData::profile_arguments() || MethodData::profile_return()) {
 101     Label profile_continue;
 102 
 103     test_method_data_pointer(mdp, profile_continue);
 104 
 105     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
 106 
 107     cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
 108     jcc(Assembler::notEqual, profile_continue);
 109 
 110     if (MethodData::profile_arguments()) {
 111       Label done;
 112       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
 113       addptr(mdp, off_to_args);
 114 
 115       for (int i = 0; i < TypeProfileArgsLimit; i++) {
 116         if (i > 0 || MethodData::profile_return()) {
 117           // If return value type is profiled we may have no argument to profile
 118           movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 119           subl(tmp, i*TypeStackSlotEntries::per_arg_count());
 120           cmpl(tmp, TypeStackSlotEntries::per_arg_count());
 121           jcc(Assembler::less, done);
 122         }
 123         movptr(tmp, Address(callee, Method::const_offset()));
 124         load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
 125         // stack offset o (zero based) from the start of the argument
 126         // list, for n arguments translates into offset n - o - 1 from
 127         // the end of the argument list
 128         subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
 129         subl(tmp, 1);
 130         Address arg_addr = argument_address(tmp);
 131         movptr(tmp, arg_addr);
 132 
 133         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 134         profile_obj_type(tmp, mdo_arg_addr);
 135 
 136         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 137         addptr(mdp, to_add);
 138         off_to_args += to_add;
 139       }
 140 
 141       if (MethodData::profile_return()) {
 142         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 143         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 144       }
 145 
 146       bind(done);
 147 
 148       if (MethodData::profile_return()) {
 149         // We're right after the type profile for the last
 150         // argument. tmp is the number of cells left in the
 151         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 152         // if there's a return to profile.
 153         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 154         shll(tmp, exact_log2(DataLayout::cell_size));
 155         addptr(mdp, tmp);
 156       }
 157       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 158     } else {
 159       assert(MethodData::profile_return(), "either profile call args or call ret");
 160       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 161     }
 162 
 163     // mdp points right after the end of the
 164     // CallTypeData/VirtualCallTypeData, right after the cells for the
 165     // return value type if there's one
 166 
 167     bind(profile_continue);
 168   }
 169 }
 170 
 171 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 172   assert_different_registers(mdp, ret, tmp, _bcp_register);
 173   if (ProfileInterpreter && MethodData::profile_return()) {
 174     Label profile_continue, done;
 175 
 176     test_method_data_pointer(mdp, profile_continue);
 177 
 178     if (MethodData::profile_return_jsr292_only()) {
 179       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 180 
 181       // If we don't profile all invoke bytecodes we must make sure
 182       // it's a bytecode we indeed profile. We can't go back to the
 183       // begining of the ProfileData we intend to update to check its
 184       // type because we're right after it and we don't known its
 185       // length
 186       Label do_profile;
 187       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 188       jcc(Assembler::equal, do_profile);
 189       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 190       jcc(Assembler::equal, do_profile);
 191       get_method(tmp);
 192       cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
 193       jcc(Assembler::notEqual, profile_continue);
 194 
 195       bind(do_profile);
 196     }
 197 
 198     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
 199     mov(tmp, ret);
 200     profile_obj_type(tmp, mdo_ret_addr);
 201 
 202     bind(profile_continue);
 203   }
 204 }
 205 
 206 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 207   if (ProfileInterpreter && MethodData::profile_parameters()) {
 208     Label profile_continue, done;
 209 
 210     test_method_data_pointer(mdp, profile_continue);
 211 
 212     // Load the offset of the area within the MDO used for
 213     // parameters. If it's negative we're not profiling any parameters
 214     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 215     testl(tmp1, tmp1);
 216     jcc(Assembler::negative, profile_continue);
 217 
 218     // Compute a pointer to the area for parameters from the offset
 219     // and move the pointer to the slot for the last
 220     // parameters. Collect profiling from last parameter down.
 221     // mdo start + parameters offset + array length - 1
 222     addptr(mdp, tmp1);
 223     movptr(tmp1, Address(mdp, ArrayData::array_len_offset()));
 224     decrement(tmp1, TypeStackSlotEntries::per_arg_count());
 225 
 226     Label loop;
 227     bind(loop);
 228 
 229     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
 230     int type_base = in_bytes(ParametersTypeData::type_offset(0));
 231     Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size);
 232     Address arg_off(mdp, tmp1, per_arg_scale, off_base);
 233     Address arg_type(mdp, tmp1, per_arg_scale, type_base);
 234 
 235     // load offset on the stack from the slot for this parameter
 236     movptr(tmp2, arg_off);
 237     negptr(tmp2);
 238     // read the parameter from the local area
 239     movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale()));
 240 
 241     // profile the parameter
 242     profile_obj_type(tmp2, arg_type);
 243 
 244     // go to next parameter
 245     decrement(tmp1, TypeStackSlotEntries::per_arg_count());
 246     jcc(Assembler::positive, loop);
 247 
 248     bind(profile_continue);
 249   }
 250 }
 251 
 252 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
 253                                                   int number_of_arguments) {
 254   // interpreter specific
 255   //
 256   // Note: No need to save/restore bcp & locals registers
 257   //       since these are callee saved registers and no blocking/
 258   //       GC can happen in leaf calls.
 259   // Further Note: DO NOT save/restore bcp/locals. If a caller has
 260   // already saved them so that it can use rsi/rdi as temporaries
 261   // then a save/restore here will DESTROY the copy the caller
 262   // saved! There used to be a save_bcp() that only happened in
 263   // the ASSERT path (no restore_bcp). Which caused bizarre failures
 264   // when jvm built with ASSERTs.
 265 #ifdef ASSERT
 266   {
 267     Label L;
 268     cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 269     jcc(Assembler::equal, L);
 270     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
 271          " last_sp != NULL");
 272     bind(L);
 273   }
 274 #endif
 275   // super call
 276   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
 277   // interpreter specific
 278   // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals
 279   // but since they may not have been saved (and we don't want to
 280   // save them here (see note above) the assert is invalid.
 281 }
 282 
 283 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
 284                                              Register java_thread,
 285                                              Register last_java_sp,
 286                                              address  entry_point,
 287                                              int      number_of_arguments,
 288                                              bool     check_exceptions) {
 289   // interpreter specific
 290   //
 291   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
 292   //       really make a difference for these runtime calls, since they are
 293   //       slow anyway. Btw., bcp must be saved/restored since it may change
 294   //       due to GC.
 295   NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");)
 296   save_bcp();
 297 #ifdef ASSERT
 298   {
 299     Label L;
 300     cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 301     jcc(Assembler::equal, L);
 302     stop("InterpreterMacroAssembler::call_VM_base:"
 303          " last_sp != NULL");
 304     bind(L);
 305   }
 306 #endif /* ASSERT */
 307   // super call
 308   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
 309                                entry_point, number_of_arguments,
 310                                check_exceptions);
 311   // interpreter specific
 312   restore_bcp();
 313   restore_locals();
 314 }
 315 
 316 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
 317   if (JvmtiExport::can_pop_frame()) {
 318     Label L;
 319     // Initiate popframe handling only if it is not already being
 320     // processed.  If the flag has the popframe_processing bit set, it
 321     // means that this code is called *during* popframe handling - we
 322     // don't want to reenter.
 323     // This method is only called just after the call into the vm in
 324     // call_VM_base, so the arg registers are available.
 325     Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit
 326                         LP64_ONLY(c_rarg0);
 327     movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
 328     testl(pop_cond, JavaThread::popframe_pending_bit);
 329     jcc(Assembler::zero, L);
 330     testl(pop_cond, JavaThread::popframe_processing_bit);
 331     jcc(Assembler::notZero, L);
 332     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 333     // address of the same-named entrypoint in the generated interpreter code.
 334     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 335     jmp(rax);
 336     bind(L);
 337     NOT_LP64(get_thread(java_thread);)
 338   }
 339 }
 340 
 341 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 342   Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 343   NOT_LP64(get_thread(thread);)
 344   movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
 345   const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
 346   const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
 347   const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
 348 #ifdef _LP64
 349   switch (state) {
 350     case atos: movptr(rax, oop_addr);
 351                movptr(oop_addr, (int32_t)NULL_WORD);
 352                verify_oop(rax, state);              break;
 353     case ltos: movptr(rax, val_addr);                 break;
 354     case btos:                                   // fall through
 355     case ztos:                                   // fall through
 356     case ctos:                                   // fall through
 357     case stos:                                   // fall through
 358     case itos: movl(rax, val_addr);                 break;
 359     case ftos: load_float(val_addr);                break;
 360     case dtos: load_double(val_addr);               break;
 361     case vtos: /* nothing to do */                  break;
 362     default  : ShouldNotReachHere();
 363   }
 364   // Clean up tos value in the thread object
 365   movl(tos_addr,  (int) ilgl);
 366   movl(val_addr,  (int32_t) NULL_WORD);
 367 #else
 368   const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()
 369                              + in_ByteSize(wordSize));
 370   switch (state) {
 371     case atos: movptr(rax, oop_addr);
 372                movptr(oop_addr, NULL_WORD);
 373                verify_oop(rax, state);                break;
 374     case ltos:
 375                movl(rdx, val_addr1);               // fall through
 376     case btos:                                     // fall through
 377     case ztos:                                     // fall through
 378     case ctos:                                     // fall through
 379     case stos:                                     // fall through
 380     case itos: movl(rax, val_addr);                   break;
 381     case ftos: load_float(val_addr);                  break;
 382     case dtos: load_double(val_addr);                 break;
 383     case vtos: /* nothing to do */                    break;
 384     default  : ShouldNotReachHere();
 385   }
 386 #endif // _LP64
 387   // Clean up tos value in the thread object
 388   movl(tos_addr,  (int32_t) ilgl);
 389   movptr(val_addr,  NULL_WORD);
 390   NOT_LP64(movptr(val_addr1, NULL_WORD);)
 391 }
 392 
 393 
 394 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 395   if (JvmtiExport::can_force_early_return()) {
 396     Label L;
 397     Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread);
 398     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread);
 399 
 400     movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 401     testptr(tmp, tmp);
 402     jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
 403 
 404     // Initiate earlyret handling only if it is not already being processed.
 405     // If the flag has the earlyret_processing bit set, it means that this code
 406     // is called *during* earlyret handling - we don't want to reenter.
 407     movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset()));
 408     cmpl(tmp, JvmtiThreadState::earlyret_pending);
 409     jcc(Assembler::notEqual, L);
 410 
 411     // Call Interpreter::remove_activation_early_entry() to get the address of the
 412     // same-named entrypoint in the generated interpreter code.
 413     NOT_LP64(get_thread(java_thread);)
 414     movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 415 #ifdef _LP64
 416     movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
 417     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp);
 418 #else
 419     pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
 420     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1);
 421 #endif // _LP64
 422     jmp(rax);
 423     bind(L);
 424     NOT_LP64(get_thread(java_thread);)
 425   }
 426 }
 427 
 428 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
 429   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 430   load_unsigned_short(reg, Address(_bcp_register, bcp_offset));
 431   bswapl(reg);
 432   shrl(reg, 16);
 433 }
 434 
 435 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
 436                                                        int bcp_offset,
 437                                                        size_t index_size) {
 438   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 439   if (index_size == sizeof(u2)) {
 440     load_unsigned_short(index, Address(_bcp_register, bcp_offset));
 441   } else if (index_size == sizeof(u4)) {
 442     movl(index, Address(_bcp_register, bcp_offset));
 443     // Check if the secondary index definition is still ~x, otherwise
 444     // we have to change the following assembler code to calculate the
 445     // plain index.
 446     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 447     notl(index);  // convert to plain index
 448   } else if (index_size == sizeof(u1)) {
 449     load_unsigned_byte(index, Address(_bcp_register, bcp_offset));
 450   } else {
 451     ShouldNotReachHere();
 452   }
 453 }
 454 
 455 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
 456                                                            Register index,
 457                                                            int bcp_offset,
 458                                                            size_t index_size) {
 459   assert_different_registers(cache, index);
 460   get_cache_index_at_bcp(index, bcp_offset, index_size);
 461   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
 462   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 463   // convert from field index to ConstantPoolCacheEntry index
 464   assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");
 465   shll(index, 2);
 466 }
 467 
 468 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 469                                                                         Register index,
 470                                                                         Register bytecode,
 471                                                                         int byte_no,
 472                                                                         int bcp_offset,
 473                                                                         size_t index_size) {
 474   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
 475   // We use a 32-bit load here since the layout of 64-bit words on
 476   // little-endian machines allow us that.
 477   movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
 478   const int shift_count = (1 + byte_no) * BitsPerByte;
 479   assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
 480          (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
 481          "correct shift count");
 482   shrl(bytecode, shift_count);
 483   assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
 484   andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
 485 }
 486 
 487 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 488                                                                Register tmp,
 489                                                                int bcp_offset,
 490                                                                size_t index_size) {
 491   assert(cache != tmp, "must use different register");
 492   get_cache_index_at_bcp(tmp, bcp_offset, index_size);
 493   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 494   // convert from field index to ConstantPoolCacheEntry index
 495   // and from word offset to byte offset
 496   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 497   shll(tmp, 2 + LogBytesPerWord);
 498   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
 499   // skip past the header
 500   addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
 501   addptr(cache, tmp);  // construct pointer to cache entry
 502 }
 503 
 504 // Load object from cpool->resolved_references(index)
 505 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 506                                            Register result, Register index) {
 507   assert_different_registers(result, index);
 508   // convert from field index to resolved_references() index and from
 509   // word index to byte offset. Since this is a java object, it can be compressed
 510   Register tmp = index;  // reuse
 511   shll(tmp, LogBytesPerHeapOop);
 512 
 513   get_constant_pool(result);
 514   // load pointer for resolved_references[] objArray
 515   movptr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 516   movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 517   resolve_oop_handle(result);
 518   // Add in the index
 519   addptr(result, tmp);
 520   load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
 521   // The resulting oop is null if the reference is not yet resolved.
 522   // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy.
 523 }
 524 
 525 // load cpool->resolved_klass_at(index)
 526 void InterpreterMacroAssembler::load_resolved_klass_at_index(Register cpool,
 527                                            Register index, Register klass) {
 528   movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
 529   Register resolved_klasses = cpool;
 530   movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes()));
 531   movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
 532 }
 533 
 534 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 535 // subtype of super_klass.
 536 //
 537 // Args:
 538 //      rax: superklass
 539 //      Rsub_klass: subklass
 540 //
 541 // Kills:
 542 //      rcx, rdi
 543 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 544                                                   Label& ok_is_subtype) {
 545   assert(Rsub_klass != rax, "rax holds superklass");
 546   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 547   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 548   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 549   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 550 
 551   // Profile the not-null value's klass.
 552   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
 553 
 554   // Do the check.
 555   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 556 
 557   // Profile the failure of the check.
 558   profile_typecheck_failed(rcx); // blows rcx
 559 }
 560 
 561 
 562 #ifndef _LP64
 563 void InterpreterMacroAssembler::f2ieee() {
 564   if (IEEEPrecision) {
 565     fstp_s(Address(rsp, 0));
 566     fld_s(Address(rsp, 0));
 567   }
 568 }
 569 
 570 
 571 void InterpreterMacroAssembler::d2ieee() {
 572   if (IEEEPrecision) {
 573     fstp_d(Address(rsp, 0));
 574     fld_d(Address(rsp, 0));
 575   }
 576 }
 577 #endif // _LP64
 578 
 579 // Java Expression Stack
 580 
 581 void InterpreterMacroAssembler::pop_ptr(Register r) {
 582   pop(r);
 583 }
 584 
 585 void InterpreterMacroAssembler::push_ptr(Register r) {
 586   push(r);
 587 }
 588 
 589 void InterpreterMacroAssembler::push_i(Register r) {
 590   push(r);
 591 }
 592 
 593 void InterpreterMacroAssembler::push_f(XMMRegister r) {
 594   subptr(rsp, wordSize);
 595   movflt(Address(rsp, 0), r);
 596 }
 597 
 598 void InterpreterMacroAssembler::pop_f(XMMRegister r) {
 599   movflt(r, Address(rsp, 0));
 600   addptr(rsp, wordSize);
 601 }
 602 
 603 void InterpreterMacroAssembler::push_d(XMMRegister r) {
 604   subptr(rsp, 2 * wordSize);
 605   movdbl(Address(rsp, 0), r);
 606 }
 607 
 608 void InterpreterMacroAssembler::pop_d(XMMRegister r) {
 609   movdbl(r, Address(rsp, 0));
 610   addptr(rsp, 2 * Interpreter::stackElementSize);
 611 }
 612 
 613 #ifdef _LP64
 614 void InterpreterMacroAssembler::pop_i(Register r) {
 615   // XXX can't use pop currently, upper half non clean
 616   movl(r, Address(rsp, 0));
 617   addptr(rsp, wordSize);
 618 }
 619 
 620 void InterpreterMacroAssembler::pop_l(Register r) {
 621   movq(r, Address(rsp, 0));
 622   addptr(rsp, 2 * Interpreter::stackElementSize);
 623 }
 624 
 625 void InterpreterMacroAssembler::push_l(Register r) {
 626   subptr(rsp, 2 * wordSize);
 627   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r         );
 628   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD );
 629 }
 630 
 631 void InterpreterMacroAssembler::pop(TosState state) {
 632   switch (state) {
 633   case atos: pop_ptr();                 break;
 634   case btos:
 635   case ztos:
 636   case ctos:
 637   case stos:
 638   case itos: pop_i();                   break;
 639   case ltos: pop_l();                   break;
 640   case ftos: pop_f(xmm0);               break;
 641   case dtos: pop_d(xmm0);               break;
 642   case vtos: /* nothing to do */        break;
 643   default:   ShouldNotReachHere();
 644   }
 645   verify_oop(rax, state);
 646 }
 647 
 648 void InterpreterMacroAssembler::push(TosState state) {
 649   verify_oop(rax, state);
 650   switch (state) {
 651   case atos: push_ptr();                break;
 652   case btos:
 653   case ztos:
 654   case ctos:
 655   case stos:
 656   case itos: push_i();                  break;
 657   case ltos: push_l();                  break;
 658   case ftos: push_f(xmm0);              break;
 659   case dtos: push_d(xmm0);              break;
 660   case vtos: /* nothing to do */        break;
 661   default  : ShouldNotReachHere();
 662   }
 663 }
 664 #else
 665 void InterpreterMacroAssembler::pop_i(Register r) {
 666   pop(r);
 667 }
 668 
 669 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
 670   pop(lo);
 671   pop(hi);
 672 }
 673 
 674 void InterpreterMacroAssembler::pop_f() {
 675   fld_s(Address(rsp, 0));
 676   addptr(rsp, 1 * wordSize);
 677 }
 678 
 679 void InterpreterMacroAssembler::pop_d() {
 680   fld_d(Address(rsp, 0));
 681   addptr(rsp, 2 * wordSize);
 682 }
 683 
 684 
 685 void InterpreterMacroAssembler::pop(TosState state) {
 686   switch (state) {
 687     case atos: pop_ptr(rax);                                 break;
 688     case btos:                                               // fall through
 689     case ztos:                                               // fall through
 690     case ctos:                                               // fall through
 691     case stos:                                               // fall through
 692     case itos: pop_i(rax);                                   break;
 693     case ltos: pop_l(rax, rdx);                              break;
 694     case ftos:
 695       if (UseSSE >= 1) {
 696         pop_f(xmm0);
 697       } else {
 698         pop_f();
 699       }
 700       break;
 701     case dtos:
 702       if (UseSSE >= 2) {
 703         pop_d(xmm0);
 704       } else {
 705         pop_d();
 706       }
 707       break;
 708     case vtos: /* nothing to do */                           break;
 709     default  : ShouldNotReachHere();
 710   }
 711   verify_oop(rax, state);
 712 }
 713 
 714 
 715 void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
 716   push(hi);
 717   push(lo);
 718 }
 719 
 720 void InterpreterMacroAssembler::push_f() {
 721   // Do not schedule for no AGI! Never write beyond rsp!
 722   subptr(rsp, 1 * wordSize);
 723   fstp_s(Address(rsp, 0));
 724 }
 725 
 726 void InterpreterMacroAssembler::push_d() {
 727   // Do not schedule for no AGI! Never write beyond rsp!
 728   subptr(rsp, 2 * wordSize);
 729   fstp_d(Address(rsp, 0));
 730 }
 731 
 732 
 733 void InterpreterMacroAssembler::push(TosState state) {
 734   verify_oop(rax, state);
 735   switch (state) {
 736     case atos: push_ptr(rax); break;
 737     case btos:                                               // fall through
 738     case ztos:                                               // fall through
 739     case ctos:                                               // fall through
 740     case stos:                                               // fall through
 741     case itos: push_i(rax);                                    break;
 742     case ltos: push_l(rax, rdx);                               break;
 743     case ftos:
 744       if (UseSSE >= 1) {
 745         push_f(xmm0);
 746       } else {
 747         push_f();
 748       }
 749       break;
 750     case dtos:
 751       if (UseSSE >= 2) {
 752         push_d(xmm0);
 753       } else {
 754         push_d();
 755       }
 756       break;
 757     case vtos: /* nothing to do */                             break;
 758     default  : ShouldNotReachHere();
 759   }
 760 }
 761 #endif // _LP64
 762 
 763 
 764 // Helpers for swap and dup
 765 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 766   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
 767 }
 768 
 769 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 770   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 771 }
 772 
 773 
 774 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
 775   // set sender sp
 776   lea(_bcp_register, Address(rsp, wordSize));
 777   // record last_sp
 778   movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register);
 779 }
 780 
 781 
 782 // Jump to from_interpreted entry of a call unless single stepping is possible
 783 // in this thread in which case we must call the i2i entry
 784 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 785   prepare_to_jump_from_interpreted();
 786 
 787   if (JvmtiExport::can_post_interpreter_events()) {
 788     Label run_compiled_code;
 789     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 790     // compiled code in threads for which the event is enabled.  Check here for
 791     // interp_only_mode if these events CAN be enabled.
 792     // interp_only is an int, on little endian it is sufficient to test the byte only
 793     // Is a cmpl faster?
 794     LP64_ONLY(temp = r15_thread;)
 795     NOT_LP64(get_thread(temp);)
 796     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
 797     jccb(Assembler::zero, run_compiled_code);
 798     jmp(Address(method, Method::interpreter_entry_offset()));
 799     bind(run_compiled_code);
 800   }
 801 
 802   jmp(Address(method, Method::from_interpreted_offset()));
 803 }
 804 
 805 // The following two routines provide a hook so that an implementation
 806 // can schedule the dispatch in two parts.  x86 does not do this.
 807 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 808   // Nothing x86 specific to be done here
 809 }
 810 
 811 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 812   dispatch_next(state, step);
 813 }
 814 
 815 void InterpreterMacroAssembler::dispatch_base(TosState state,
 816                                               address* table,
 817                                               bool verifyoop,
 818                                               bool generate_poll) {
 819   verify_FPU(1, state);
 820   if (VerifyActivationFrameSize) {
 821     Label L;
 822     mov(rcx, rbp);
 823     subptr(rcx, rsp);
 824     int32_t min_frame_size =
 825       (frame::link_offset - frame::interpreter_frame_initial_sp_offset) *
 826       wordSize;
 827     cmpptr(rcx, (int32_t)min_frame_size);
 828     jcc(Assembler::greaterEqual, L);
 829     stop("broken stack frame");
 830     bind(L);
 831   }
 832   if (verifyoop) {
 833     verify_oop(rax, state);
 834   }
 835 
 836   address* const safepoint_table = Interpreter::safept_table(state);
 837 #ifdef _LP64
 838   Label no_safepoint, dispatch;
 839   if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) {
 840     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 841     testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
 842 
 843     jccb(Assembler::zero, no_safepoint);
 844     lea(rscratch1, ExternalAddress((address)safepoint_table));
 845     jmpb(dispatch);
 846   }
 847 
 848   bind(no_safepoint);
 849   lea(rscratch1, ExternalAddress((address)table));
 850   bind(dispatch);
 851   jmp(Address(rscratch1, rbx, Address::times_8));
 852 
 853 #else
 854   Address index(noreg, rbx, Address::times_ptr);
 855   if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) {
 856     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 857     Label no_safepoint;
 858     const Register thread = rcx;
 859     get_thread(thread);
 860     testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
 861 
 862     jccb(Assembler::zero, no_safepoint);
 863     ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);
 864     jump(dispatch_addr);
 865     bind(no_safepoint);
 866   }
 867 
 868   {
 869     ArrayAddress dispatch_addr(ExternalAddress((address)table), index);
 870     jump(dispatch_addr);
 871   }
 872 #endif // _LP64
 873 }
 874 
 875 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
 876   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 877 }
 878 
 879 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 880   dispatch_base(state, Interpreter::normal_table(state));
 881 }
 882 
 883 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 884   dispatch_base(state, Interpreter::normal_table(state), false);
 885 }
 886 
 887 
 888 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
 889   // load next bytecode (load before advancing _bcp_register to prevent AGI)
 890   load_unsigned_byte(rbx, Address(_bcp_register, step));
 891   // advance _bcp_register
 892   increment(_bcp_register, step);
 893   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 894 }
 895 
 896 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 897   // load current bytecode
 898   load_unsigned_byte(rbx, Address(_bcp_register, 0));
 899   dispatch_base(state, table);
 900 }
 901 
 902 void InterpreterMacroAssembler::narrow(Register result) {
 903 
 904   // Get method->_constMethod->_result_type
 905   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 906   movptr(rcx, Address(rcx, Method::const_offset()));
 907   load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset()));
 908 
 909   Label done, notBool, notByte, notChar;
 910 
 911   // common case first
 912   cmpl(rcx, T_INT);
 913   jcc(Assembler::equal, done);
 914 
 915   // mask integer result to narrower return type.
 916   cmpl(rcx, T_BOOLEAN);
 917   jcc(Assembler::notEqual, notBool);
 918   andl(result, 0x1);
 919   jmp(done);
 920 
 921   bind(notBool);
 922   cmpl(rcx, T_BYTE);
 923   jcc(Assembler::notEqual, notByte);
 924   LP64_ONLY(movsbl(result, result);)
 925   NOT_LP64(shll(result, 24);)      // truncate upper 24 bits
 926   NOT_LP64(sarl(result, 24);)      // and sign-extend byte
 927   jmp(done);
 928 
 929   bind(notByte);
 930   cmpl(rcx, T_CHAR);
 931   jcc(Assembler::notEqual, notChar);
 932   LP64_ONLY(movzwl(result, result);)
 933   NOT_LP64(andl(result, 0xFFFF);)  // truncate upper 16 bits
 934   jmp(done);
 935 
 936   bind(notChar);
 937   // cmpl(rcx, T_SHORT);  // all that's left
 938   // jcc(Assembler::notEqual, done);
 939   LP64_ONLY(movswl(result, result);)
 940   NOT_LP64(shll(result, 16);)      // truncate upper 16 bits
 941   NOT_LP64(sarl(result, 16);)      // and sign-extend short
 942 
 943   // Nothing to do for T_INT
 944   bind(done);
 945 }
 946 
 947 // remove activation
 948 //
 949 // Unlock the receiver if this is a synchronized method.
 950 // Unlock any Java monitors from syncronized blocks.
 951 // Remove the activation from the stack.
 952 //
 953 // If there are locked Java monitors
 954 //    If throw_monitor_exception
 955 //       throws IllegalMonitorStateException
 956 //    Else if install_monitor_exception
 957 //       installs IllegalMonitorStateException
 958 //    Else
 959 //       no error processing
 960 void InterpreterMacroAssembler::remove_activation(
 961         TosState state,
 962         Register ret_addr,
 963         bool throw_monitor_exception,
 964         bool install_monitor_exception,
 965         bool notify_jvmdi,
 966         bool load_values) {
 967   // Note: Registers rdx xmm0 may be in use for the
 968   // result check if synchronized method
 969   Label unlocked, unlock, no_unlock;
 970 
 971   const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 972   const Register robj    = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
 973   const Register rmon    = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
 974                               // monitor pointers need different register
 975                               // because rdx may have the result in it
 976   NOT_LP64(get_thread(rcx);)
 977 
 978   // get the value of _do_not_unlock_if_synchronized into rdx
 979   const Address do_not_unlock_if_synchronized(rthread,
 980     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 981   movbool(rbx, do_not_unlock_if_synchronized);
 982   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 983 
 984  // get method access flags
 985   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 986   movl(rcx, Address(rcx, Method::access_flags_offset()));
 987   testl(rcx, JVM_ACC_SYNCHRONIZED);
 988   jcc(Assembler::zero, unlocked);
 989 
 990   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 991   // is set.
 992   testbool(rbx);
 993   jcc(Assembler::notZero, no_unlock);
 994 
 995   // unlock monitor
 996   push(state); // save result
 997 
 998   // BasicObjectLock will be first in list, since this is a
 999   // synchronized method. However, need to check that the object has
1000   // not been unlocked by an explicit monitorexit bytecode.
1001   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1002                         wordSize - (int) sizeof(BasicObjectLock));
1003   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1004   // register for unlock_object to pass to VM directly
1005   lea(robj, monitor); // address of first monitor
1006 
1007   movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes()));
1008   testptr(rax, rax);
1009   jcc(Assembler::notZero, unlock);
1010 
1011   pop(state);
1012   if (throw_monitor_exception) {
1013     // Entry already unlocked, need to throw exception
1014     NOT_LP64(empty_FPU_stack();)  // remove possible return value from FPU-stack, otherwise stack could overflow
1015     call_VM(noreg, CAST_FROM_FN_PTR(address,
1016                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1017     should_not_reach_here();
1018   } else {
1019     // Monitor already unlocked during a stack unroll. If requested,
1020     // install an illegal_monitor_state_exception.  Continue with
1021     // stack unrolling.
1022     if (install_monitor_exception) {
1023       NOT_LP64(empty_FPU_stack();)
1024       call_VM(noreg, CAST_FROM_FN_PTR(address,
1025                      InterpreterRuntime::new_illegal_monitor_state_exception));
1026     }
1027     jmp(unlocked);
1028   }
1029 
1030   bind(unlock);
1031   unlock_object(robj);
1032   pop(state);
1033 
1034   // Check that for block-structured locking (i.e., that all locked
1035   // objects has been unlocked)
1036   bind(unlocked);
1037 
1038   // rax, rdx: Might contain return value
1039 
1040   // Check that all monitors are unlocked
1041   {
1042     Label loop, exception, entry, restart;
1043     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1044     const Address monitor_block_top(
1045         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1046     const Address monitor_block_bot(
1047         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
1048 
1049     bind(restart);
1050     // We use c_rarg1 so that if we go slow path it will be the correct
1051     // register for unlock_object to pass to VM directly
1052     movptr(rmon, monitor_block_top); // points to current entry, starting
1053                                   // with top-most entry
1054     lea(rbx, monitor_block_bot);  // points to word before bottom of
1055                                   // monitor block
1056     jmp(entry);
1057 
1058     // Entry already locked, need to throw exception
1059     bind(exception);
1060 
1061     if (throw_monitor_exception) {
1062       // Throw exception
1063       NOT_LP64(empty_FPU_stack();)
1064       MacroAssembler::call_VM(noreg,
1065                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
1066                                    throw_illegal_monitor_state_exception));
1067       should_not_reach_here();
1068     } else {
1069       // Stack unrolling. Unlock object and install illegal_monitor_exception.
1070       // Unlock does not block, so don't have to worry about the frame.
1071       // We don't have to preserve c_rarg1 since we are going to throw an exception.
1072 
1073       push(state);
1074       mov(robj, rmon);   // nop if robj and rmon are the same
1075       unlock_object(robj);
1076       pop(state);
1077 
1078       if (install_monitor_exception) {
1079         NOT_LP64(empty_FPU_stack();)
1080         call_VM(noreg, CAST_FROM_FN_PTR(address,
1081                                         InterpreterRuntime::
1082                                         new_illegal_monitor_state_exception));
1083       }
1084 
1085       jmp(restart);
1086     }
1087 
1088     bind(loop);
1089     // check if current entry is used
1090     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1091     jcc(Assembler::notEqual, exception);
1092 
1093     addptr(rmon, entry_size); // otherwise advance to next entry
1094     bind(entry);
1095     cmpptr(rmon, rbx); // check if bottom reached
1096     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1097   }
1098 
1099   bind(no_unlock);
1100 
1101   // jvmti support
1102   if (notify_jvmdi) {
1103     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1104   } else {
1105     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1106   }
1107 
1108   if (StackReservedPages > 0) {
1109     movptr(rbx,
1110                Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1111     // testing if reserved zone needs to be re-enabled
1112     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1113     Label no_reserved_zone_enabling;
1114 
1115     NOT_LP64(get_thread(rthread);)
1116 
1117     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled);
1118     jcc(Assembler::equal, no_reserved_zone_enabling);
1119 
1120     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1121     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1122 
1123     call_VM_leaf(
1124       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1125     call_VM(noreg, CAST_FROM_FN_PTR(address,
1126                    InterpreterRuntime::throw_delayed_StackOverflowError));
1127     should_not_reach_here();
1128 
1129     bind(no_reserved_zone_enabling);
1130   }
1131 
1132   if (ValueTypesBufferMaxMemory > 0) {
1133     // Code below is taking care of recycling TLVB memory, no safepoint should
1134     // occur between this point and the end of the remove_activation() method
1135     Label vtbuffer_slow, vtbuffer_done, no_buffered_value_returned;
1136     const Register thread1 = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1137     const uintptr_t chunk_mask = VTBufferChunk::chunk_mask();
1138     NOT_LP64(get_thread(thread1));
1139     cmpptr(Address(thread1, JavaThread::return_buffered_value_offset()), (intptr_t)NULL_WORD);
1140     jcc(Assembler::equal, no_buffered_value_returned);
1141     movptr(rbx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
1142     call_VM_leaf(CAST_FROM_FN_PTR(address,
1143                                   InterpreterRuntime::return_value_step2), rax, rbx);
1144     NOT_LP64(get_thread(thread1));
1145     get_vm_result(rax, thread1);
1146     jmp(vtbuffer_done);
1147     bind(no_buffered_value_returned);
1148     movptr(rbx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
1149     NOT_LP64(get_thread(thread1));
1150     movptr(rcx, Address(thread1, JavaThread::vt_alloc_ptr_offset()));
1151     cmpptr(rbx, rcx);
1152     jcc(Assembler::equal, vtbuffer_done);
1153     andptr(rbx, chunk_mask);
1154     andptr(rcx, chunk_mask);
1155     cmpptr(rbx, rcx);
1156     jcc(Assembler::notEqual, vtbuffer_slow);
1157     movptr(rbx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
1158     movptr(Address(thread1, JavaThread::vt_alloc_ptr_offset()), rbx);
1159     jmp(vtbuffer_done);
1160     bind(vtbuffer_slow);
1161     push(state);
1162     movptr(rbx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
1163     call_VM_leaf(CAST_FROM_FN_PTR(address,
1164                                   InterpreterRuntime::recycle_vtbuffer), rbx);
1165     pop(state);
1166     bind(vtbuffer_done);
1167   }
1168 
1169   // remove activation
1170   // get sender sp
1171   movptr(rbx,
1172          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1173 
1174   if (load_values) {
1175     // We are returning a value type, load its fields into registers
1176 #ifndef _LP64
1177     super_call_VM_leaf(StubRoutines::load_value_type_fields_in_regs());
1178 #else
1179     load_klass(rdi, rax);
1180     movptr(rdi, Address(rdi, ValueKlass::unpack_handler_offset()));
1181 
1182     Label skip;
1183     testptr(rdi, rdi);
1184     jcc(Assembler::equal, skip);
1185 
1186     // Load fields from a buffered value with a value class specific
1187     // handler
1188     call(rdi);
1189 
1190     bind(skip);
1191 #endif
1192     // call above kills the value in rbx. Reload it.
1193     movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1194   }
1195   leave();                           // remove frame anchor
1196   pop(ret_addr);                     // get return address
1197   mov(rsp, rbx);                     // set sp to sender sp
1198 }
1199 
1200 void InterpreterMacroAssembler::get_method_counters(Register method,
1201                                                     Register mcs, Label& skip) {
1202   Label has_counters;
1203   movptr(mcs, Address(method, Method::method_counters_offset()));
1204   testptr(mcs, mcs);
1205   jcc(Assembler::notZero, has_counters);
1206   call_VM(noreg, CAST_FROM_FN_PTR(address,
1207           InterpreterRuntime::build_method_counters), method);
1208   movptr(mcs, Address(method,Method::method_counters_offset()));
1209   testptr(mcs, mcs);
1210   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1211   bind(has_counters);
1212 }
1213 
1214 
1215 // Lock object
1216 //
1217 // Args:
1218 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1219 //
1220 // Kills:
1221 //      rax, rbx
1222 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1223   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1224          "The argument is only for looks. It must be c_rarg1");
1225 
1226   if (UseHeavyMonitors) {
1227     call_VM(noreg,
1228             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1229             lock_reg);
1230   } else {
1231     Label done;
1232 
1233     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1234     const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
1235                                   // problematic case where tmp_reg = no_reg.
1236     const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1237 
1238     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1239     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1240     const int mark_offset = lock_offset +
1241                             BasicLock::displaced_header_offset_in_bytes();
1242 
1243     Label slow_case;
1244 
1245     // Load object pointer into obj_reg
1246     movptr(obj_reg, Address(lock_reg, obj_offset));
1247 
1248     if (UseBiasedLocking) {
1249       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
1250     }
1251 
1252     // Load immediate 1 into swap_reg %rax
1253     movl(swap_reg, (int32_t)1);
1254 
1255     // Load (object->mark() | 1) into swap_reg %rax
1256     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1257     if (EnableValhalla && !UseBiasedLocking) {
1258       // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
1259       andptr(swap_reg, ~markOopDesc::biased_lock_bit_in_place);
1260     }
1261 
1262     // Save (object->mark() | 1) into BasicLock's displaced header
1263     movptr(Address(lock_reg, mark_offset), swap_reg);
1264 
1265     assert(lock_offset == 0,
1266            "displaced header must be first word in BasicObjectLock");
1267 
1268     if (os::is_MP()) lock();
1269     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1270     if (PrintBiasedLockingStatistics) {
1271       cond_inc32(Assembler::zero,
1272                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1273     }
1274     jcc(Assembler::zero, done);
1275 
1276     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1277 
1278     // Test if the oopMark is an obvious stack pointer, i.e.,
1279     //  1) (mark & zero_bits) == 0, and
1280     //  2) rsp <= mark < mark + os::pagesize()
1281     //
1282     // These 3 tests can be done by evaluating the following
1283     // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1284     // assuming both stack pointer and pagesize have their
1285     // least significant bits clear.
1286     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
1287     subptr(swap_reg, rsp);
1288     andptr(swap_reg, zero_bits - os::vm_page_size());
1289 
1290     // Save the test result, for recursive case, the result is zero
1291     movptr(Address(lock_reg, mark_offset), swap_reg);
1292 
1293     if (PrintBiasedLockingStatistics) {
1294       cond_inc32(Assembler::zero,
1295                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1296     }
1297     jcc(Assembler::zero, done);
1298 
1299     bind(slow_case);
1300 
1301     // Call the runtime routine for slow case
1302     call_VM(noreg,
1303             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1304             lock_reg);
1305 
1306     bind(done);
1307   }
1308 }
1309 
1310 
1311 // Unlocks an object. Used in monitorexit bytecode and
1312 // remove_activation.  Throws an IllegalMonitorException if object is
1313 // not locked by current thread.
1314 //
1315 // Args:
1316 //      rdx, c_rarg1: BasicObjectLock for lock
1317 //
1318 // Kills:
1319 //      rax
1320 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1321 //      rscratch1 (scratch reg)
1322 // rax, rbx, rcx, rdx
1323 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1324   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1325          "The argument is only for looks. It must be c_rarg1");
1326 
1327   if (UseHeavyMonitors) {
1328     call_VM(noreg,
1329             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
1330             lock_reg);
1331   } else {
1332     Label done;
1333 
1334     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
1335     const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx);  // Will contain the old oopMark
1336     const Register obj_reg    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);  // Will contain the oop
1337 
1338     save_bcp(); // Save in case of exception
1339 
1340     // Convert from BasicObjectLock structure to object and BasicLock
1341     // structure Store the BasicLock address into %rax
1342     lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
1343 
1344     // Load oop into obj_reg(%c_rarg3)
1345     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
1346 
1347     // Free entry
1348     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
1349 
1350     if (UseBiasedLocking) {
1351       biased_locking_exit(obj_reg, header_reg, done);
1352     }
1353 
1354     // Load the old header from BasicLock structure
1355     movptr(header_reg, Address(swap_reg,
1356                                BasicLock::displaced_header_offset_in_bytes()));
1357 
1358     // Test for recursion
1359     testptr(header_reg, header_reg);
1360 
1361     // zero for recursive case
1362     jcc(Assembler::zero, done);
1363 
1364     // Atomic swap back the old header
1365     if (os::is_MP()) lock();
1366     cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1367 
1368     // zero for simple unlock of a stack-lock case
1369     jcc(Assembler::zero, done);
1370 
1371     // Call the runtime routine for slow case.
1372     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()),
1373          obj_reg); // restore obj
1374     call_VM(noreg,
1375             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
1376             lock_reg);
1377 
1378     bind(done);
1379 
1380     restore_bcp();
1381   }
1382 }
1383 
1384 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1385                                                          Label& zero_continue) {
1386   assert(ProfileInterpreter, "must be profiling interpreter");
1387   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1388   testptr(mdp, mdp);
1389   jcc(Assembler::zero, zero_continue);
1390 }
1391 
1392 
1393 // Set the method data pointer for the current bcp.
1394 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1395   assert(ProfileInterpreter, "must be profiling interpreter");
1396   Label set_mdp;
1397   push(rax);
1398   push(rbx);
1399 
1400   get_method(rbx);
1401   // Test MDO to avoid the call if it is NULL.
1402   movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
1403   testptr(rax, rax);
1404   jcc(Assembler::zero, set_mdp);
1405   // rbx: method
1406   // _bcp_register: bcp
1407   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register);
1408   // rax: mdi
1409   // mdo is guaranteed to be non-zero here, we checked for it before the call.
1410   movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));
1411   addptr(rbx, in_bytes(MethodData::data_offset()));
1412   addptr(rax, rbx);
1413   bind(set_mdp);
1414   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax);
1415   pop(rbx);
1416   pop(rax);
1417 }
1418 
1419 void InterpreterMacroAssembler::verify_method_data_pointer() {
1420   assert(ProfileInterpreter, "must be profiling interpreter");
1421 #ifdef ASSERT
1422   Label verify_continue;
1423   push(rax);
1424   push(rbx);
1425   Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
1426   Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
1427   push(arg3_reg);
1428   push(arg2_reg);
1429   test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue
1430   get_method(rbx);
1431 
1432   // If the mdp is valid, it will point to a DataLayout header which is
1433   // consistent with the bcp.  The converse is highly probable also.
1434   load_unsigned_short(arg2_reg,
1435                       Address(arg3_reg, in_bytes(DataLayout::bci_offset())));
1436   addptr(arg2_reg, Address(rbx, Method::const_offset()));
1437   lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset()));
1438   cmpptr(arg2_reg, _bcp_register);
1439   jcc(Assembler::equal, verify_continue);
1440   // rbx: method
1441   // _bcp_register: bcp
1442   // c_rarg3: mdp
1443   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
1444                rbx, _bcp_register, arg3_reg);
1445   bind(verify_continue);
1446   pop(arg2_reg);
1447   pop(arg3_reg);
1448   pop(rbx);
1449   pop(rax);
1450 #endif // ASSERT
1451 }
1452 
1453 
1454 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
1455                                                 int constant,
1456                                                 Register value) {
1457   assert(ProfileInterpreter, "must be profiling interpreter");
1458   Address data(mdp_in, constant);
1459   movptr(data, value);
1460 }
1461 
1462 
1463 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1464                                                       int constant,
1465                                                       bool decrement) {
1466   // Counter address
1467   Address data(mdp_in, constant);
1468 
1469   increment_mdp_data_at(data, decrement);
1470 }
1471 
1472 void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1473                                                       bool decrement) {
1474   assert(ProfileInterpreter, "must be profiling interpreter");
1475   // %%% this does 64bit counters at best it is wasting space
1476   // at worst it is a rare bug when counters overflow
1477 
1478   if (decrement) {
1479     // Decrement the register.  Set condition codes.
1480     addptr(data, (int32_t) -DataLayout::counter_increment);
1481     // If the decrement causes the counter to overflow, stay negative
1482     Label L;
1483     jcc(Assembler::negative, L);
1484     addptr(data, (int32_t) DataLayout::counter_increment);
1485     bind(L);
1486   } else {
1487     assert(DataLayout::counter_increment == 1,
1488            "flow-free idiom only works with 1");
1489     // Increment the register.  Set carry flag.
1490     addptr(data, DataLayout::counter_increment);
1491     // If the increment causes the counter to overflow, pull back by 1.
1492     sbbptr(data, (int32_t)0);
1493   }
1494 }
1495 
1496 
1497 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1498                                                       Register reg,
1499                                                       int constant,
1500                                                       bool decrement) {
1501   Address data(mdp_in, reg, Address::times_1, constant);
1502 
1503   increment_mdp_data_at(data, decrement);
1504 }
1505 
1506 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1507                                                 int flag_byte_constant) {
1508   assert(ProfileInterpreter, "must be profiling interpreter");
1509   int header_offset = in_bytes(DataLayout::header_offset());
1510   int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);
1511   // Set the flag
1512   orl(Address(mdp_in, header_offset), header_bits);
1513 }
1514 
1515 
1516 
1517 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1518                                                  int offset,
1519                                                  Register value,
1520                                                  Register test_value_out,
1521                                                  Label& not_equal_continue) {
1522   assert(ProfileInterpreter, "must be profiling interpreter");
1523   if (test_value_out == noreg) {
1524     cmpptr(value, Address(mdp_in, offset));
1525   } else {
1526     // Put the test value into a register, so caller can use it:
1527     movptr(test_value_out, Address(mdp_in, offset));
1528     cmpptr(test_value_out, value);
1529   }
1530   jcc(Assembler::notEqual, not_equal_continue);
1531 }
1532 
1533 
1534 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1535                                                      int offset_of_disp) {
1536   assert(ProfileInterpreter, "must be profiling interpreter");
1537   Address disp_address(mdp_in, offset_of_disp);
1538   addptr(mdp_in, disp_address);
1539   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);
1540 }
1541 
1542 
1543 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1544                                                      Register reg,
1545                                                      int offset_of_disp) {
1546   assert(ProfileInterpreter, "must be profiling interpreter");
1547   Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
1548   addptr(mdp_in, disp_address);
1549   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);
1550 }
1551 
1552 
1553 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1554                                                        int constant) {
1555   assert(ProfileInterpreter, "must be profiling interpreter");
1556   addptr(mdp_in, constant);
1557   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);
1558 }
1559 
1560 
1561 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1562   assert(ProfileInterpreter, "must be profiling interpreter");
1563   push(return_bci); // save/restore across call_VM
1564   call_VM(noreg,
1565           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1566           return_bci);
1567   pop(return_bci);
1568 }
1569 
1570 
1571 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1572                                                      Register bumped_count) {
1573   if (ProfileInterpreter) {
1574     Label profile_continue;
1575 
1576     // If no method data exists, go to profile_continue.
1577     // Otherwise, assign to mdp
1578     test_method_data_pointer(mdp, profile_continue);
1579 
1580     // We are taking a branch.  Increment the taken count.
1581     // We inline increment_mdp_data_at to return bumped_count in a register
1582     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1583     Address data(mdp, in_bytes(JumpData::taken_offset()));
1584     movptr(bumped_count, data);
1585     assert(DataLayout::counter_increment == 1,
1586             "flow-free idiom only works with 1");
1587     addptr(bumped_count, DataLayout::counter_increment);
1588     sbbptr(bumped_count, 0);
1589     movptr(data, bumped_count); // Store back out
1590 
1591     // The method data pointer needs to be updated to reflect the new target.
1592     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1593     bind(profile_continue);
1594   }
1595 }
1596 
1597 
1598 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1599   if (ProfileInterpreter) {
1600     Label profile_continue;
1601 
1602     // If no method data exists, go to profile_continue.
1603     test_method_data_pointer(mdp, profile_continue);
1604 
1605     // We are taking a branch.  Increment the not taken count.
1606     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1607 
1608     // The method data pointer needs to be updated to correspond to
1609     // the next bytecode
1610     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1611     bind(profile_continue);
1612   }
1613 }
1614 
1615 void InterpreterMacroAssembler::profile_call(Register mdp) {
1616   if (ProfileInterpreter) {
1617     Label profile_continue;
1618 
1619     // If no method data exists, go to profile_continue.
1620     test_method_data_pointer(mdp, profile_continue);
1621 
1622     // We are making a call.  Increment the count.
1623     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1624 
1625     // The method data pointer needs to be updated to reflect the new target.
1626     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1627     bind(profile_continue);
1628   }
1629 }
1630 
1631 
1632 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1633   if (ProfileInterpreter) {
1634     Label profile_continue;
1635 
1636     // If no method data exists, go to profile_continue.
1637     test_method_data_pointer(mdp, profile_continue);
1638 
1639     // We are making a call.  Increment the count.
1640     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1641 
1642     // The method data pointer needs to be updated to reflect the new target.
1643     update_mdp_by_constant(mdp,
1644                            in_bytes(VirtualCallData::
1645                                     virtual_call_data_size()));
1646     bind(profile_continue);
1647   }
1648 }
1649 
1650 
1651 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1652                                                      Register mdp,
1653                                                      Register reg2,
1654                                                      bool receiver_can_be_null) {
1655   if (ProfileInterpreter) {
1656     Label profile_continue;
1657 
1658     // If no method data exists, go to profile_continue.
1659     test_method_data_pointer(mdp, profile_continue);
1660 
1661     Label skip_receiver_profile;
1662     if (receiver_can_be_null) {
1663       Label not_null;
1664       testptr(receiver, receiver);
1665       jccb(Assembler::notZero, not_null);
1666       // We are making a call.  Increment the count for null receiver.
1667       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1668       jmp(skip_receiver_profile);
1669       bind(not_null);
1670     }
1671 
1672     // Record the receiver type.
1673     record_klass_in_profile(receiver, mdp, reg2, true);
1674     bind(skip_receiver_profile);
1675 
1676     // The method data pointer needs to be updated to reflect the new target.
1677 #if INCLUDE_JVMCI
1678     if (MethodProfileWidth == 0) {
1679       update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1680     }
1681 #else // INCLUDE_JVMCI
1682     update_mdp_by_constant(mdp,
1683                            in_bytes(VirtualCallData::
1684                                     virtual_call_data_size()));
1685 #endif // INCLUDE_JVMCI
1686     bind(profile_continue);
1687   }
1688 }
1689 
1690 #if INCLUDE_JVMCI
1691 void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) {
1692   assert_different_registers(method, mdp, reg2);
1693   if (ProfileInterpreter && MethodProfileWidth > 0) {
1694     Label profile_continue;
1695 
1696     // If no method data exists, go to profile_continue.
1697     test_method_data_pointer(mdp, profile_continue);
1698 
1699     Label done;
1700     record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth,
1701       &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset()));
1702     bind(done);
1703 
1704     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1705     bind(profile_continue);
1706   }
1707 }
1708 #endif // INCLUDE_JVMCI
1709 
1710 // This routine creates a state machine for updating the multi-row
1711 // type profile at a virtual call site (or other type-sensitive bytecode).
1712 // The machine visits each row (of receiver/count) until the receiver type
1713 // is found, or until it runs out of rows.  At the same time, it remembers
1714 // the location of the first empty row.  (An empty row records null for its
1715 // receiver, and can be allocated for a newly-observed receiver type.)
1716 // Because there are two degrees of freedom in the state, a simple linear
1717 // search will not work; it must be a decision tree.  Hence this helper
1718 // function is recursive, to generate the required tree structured code.
1719 // It's the interpreter, so we are trading off code space for speed.
1720 // See below for example code.
1721 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1722                                         Register receiver, Register mdp,
1723                                         Register reg2, int start_row,
1724                                         Label& done, bool is_virtual_call) {
1725   if (TypeProfileWidth == 0) {
1726     if (is_virtual_call) {
1727       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1728     }
1729 #if INCLUDE_JVMCI
1730     else if (EnableJVMCI) {
1731       increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()));
1732     }
1733 #endif // INCLUDE_JVMCI
1734   } else {
1735     int non_profiled_offset = -1;
1736     if (is_virtual_call) {
1737       non_profiled_offset = in_bytes(CounterData::count_offset());
1738     }
1739 #if INCLUDE_JVMCI
1740     else if (EnableJVMCI) {
1741       non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset());
1742     }
1743 #endif // INCLUDE_JVMCI
1744 
1745     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1746         &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset);
1747   }
1748 }
1749 
1750 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
1751                                         Register reg2, int start_row, Label& done, int total_rows,
1752                                         OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
1753                                         int non_profiled_offset) {
1754   int last_row = total_rows - 1;
1755   assert(start_row <= last_row, "must be work left to do");
1756   // Test this row for both the item and for null.
1757   // Take any of three different outcomes:
1758   //   1. found item => increment count and goto done
1759   //   2. found null => keep looking for case 1, maybe allocate this cell
1760   //   3. found something else => keep looking for cases 1 and 2
1761   // Case 3 is handled by a recursive call.
1762   for (int row = start_row; row <= last_row; row++) {
1763     Label next_test;
1764     bool test_for_null_also = (row == start_row);
1765 
1766     // See if the item is item[n].
1767     int item_offset = in_bytes(item_offset_fn(row));
1768     test_mdp_data_at(mdp, item_offset, item,
1769                      (test_for_null_also ? reg2 : noreg),
1770                      next_test);
1771     // (Reg2 now contains the item from the CallData.)
1772 
1773     // The item is item[n].  Increment count[n].
1774     int count_offset = in_bytes(item_count_offset_fn(row));
1775     increment_mdp_data_at(mdp, count_offset);
1776     jmp(done);
1777     bind(next_test);
1778 
1779     if (test_for_null_also) {
1780       Label found_null;
1781       // Failed the equality check on item[n]...  Test for null.
1782       testptr(reg2, reg2);
1783       if (start_row == last_row) {
1784         // The only thing left to do is handle the null case.
1785         if (non_profiled_offset >= 0) {
1786           jccb(Assembler::zero, found_null);
1787           // Item did not match any saved item and there is no empty row for it.
1788           // Increment total counter to indicate polymorphic case.
1789           increment_mdp_data_at(mdp, non_profiled_offset);
1790           jmp(done);
1791           bind(found_null);
1792         } else {
1793           jcc(Assembler::notZero, done);
1794         }
1795         break;
1796       }
1797       // Since null is rare, make it be the branch-taken case.
1798       jcc(Assembler::zero, found_null);
1799 
1800       // Put all the "Case 3" tests here.
1801       record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
1802         item_offset_fn, item_count_offset_fn, non_profiled_offset);
1803 
1804       // Found a null.  Keep searching for a matching item,
1805       // but remember that this is an empty (unused) slot.
1806       bind(found_null);
1807     }
1808   }
1809 
1810   // In the fall-through case, we found no matching item, but we
1811   // observed the item[start_row] is NULL.
1812 
1813   // Fill in the item field and increment the count.
1814   int item_offset = in_bytes(item_offset_fn(start_row));
1815   set_mdp_data_at(mdp, item_offset, item);
1816   int count_offset = in_bytes(item_count_offset_fn(start_row));
1817   movl(reg2, DataLayout::counter_increment);
1818   set_mdp_data_at(mdp, count_offset, reg2);
1819   if (start_row > 0) {
1820     jmp(done);
1821   }
1822 }
1823 
1824 // Example state machine code for three profile rows:
1825 //   // main copy of decision tree, rooted at row[1]
1826 //   if (row[0].rec == rec) { row[0].incr(); goto done; }
1827 //   if (row[0].rec != NULL) {
1828 //     // inner copy of decision tree, rooted at row[1]
1829 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1830 //     if (row[1].rec != NULL) {
1831 //       // degenerate decision tree, rooted at row[2]
1832 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1833 //       if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
1834 //       row[2].init(rec); goto done;
1835 //     } else {
1836 //       // remember row[1] is empty
1837 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1838 //       row[1].init(rec); goto done;
1839 //     }
1840 //   } else {
1841 //     // remember row[0] is empty
1842 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1843 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1844 //     row[0].init(rec); goto done;
1845 //   }
1846 //   done:
1847 
1848 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1849                                                         Register mdp, Register reg2,
1850                                                         bool is_virtual_call) {
1851   assert(ProfileInterpreter, "must be profiling");
1852   Label done;
1853 
1854   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
1855 
1856   bind (done);
1857 }
1858 
1859 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1860                                             Register mdp) {
1861   if (ProfileInterpreter) {
1862     Label profile_continue;
1863     uint row;
1864 
1865     // If no method data exists, go to profile_continue.
1866     test_method_data_pointer(mdp, profile_continue);
1867 
1868     // Update the total ret count.
1869     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1870 
1871     for (row = 0; row < RetData::row_limit(); row++) {
1872       Label next_test;
1873 
1874       // See if return_bci is equal to bci[n]:
1875       test_mdp_data_at(mdp,
1876                        in_bytes(RetData::bci_offset(row)),
1877                        return_bci, noreg,
1878                        next_test);
1879 
1880       // return_bci is equal to bci[n].  Increment the count.
1881       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1882 
1883       // The method data pointer needs to be updated to reflect the new target.
1884       update_mdp_by_offset(mdp,
1885                            in_bytes(RetData::bci_displacement_offset(row)));
1886       jmp(profile_continue);
1887       bind(next_test);
1888     }
1889 
1890     update_mdp_for_ret(return_bci);
1891 
1892     bind(profile_continue);
1893   }
1894 }
1895 
1896 
1897 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1898   if (ProfileInterpreter) {
1899     Label profile_continue;
1900 
1901     // If no method data exists, go to profile_continue.
1902     test_method_data_pointer(mdp, profile_continue);
1903 
1904     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1905 
1906     // The method data pointer needs to be updated.
1907     int mdp_delta = in_bytes(BitData::bit_data_size());
1908     if (TypeProfileCasts) {
1909       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1910     }
1911     update_mdp_by_constant(mdp, mdp_delta);
1912 
1913     bind(profile_continue);
1914   }
1915 }
1916 
1917 
1918 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {
1919   if (ProfileInterpreter && TypeProfileCasts) {
1920     Label profile_continue;
1921 
1922     // If no method data exists, go to profile_continue.
1923     test_method_data_pointer(mdp, profile_continue);
1924 
1925     int count_offset = in_bytes(CounterData::count_offset());
1926     // Back up the address, since we have already bumped the mdp.
1927     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1928 
1929     // *Decrement* the counter.  We expect to see zero or small negatives.
1930     increment_mdp_data_at(mdp, count_offset, true);
1931 
1932     bind (profile_continue);
1933   }
1934 }
1935 
1936 
1937 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1938   if (ProfileInterpreter) {
1939     Label profile_continue;
1940 
1941     // If no method data exists, go to profile_continue.
1942     test_method_data_pointer(mdp, profile_continue);
1943 
1944     // The method data pointer needs to be updated.
1945     int mdp_delta = in_bytes(BitData::bit_data_size());
1946     if (TypeProfileCasts) {
1947       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1948 
1949       // Record the object type.
1950       record_klass_in_profile(klass, mdp, reg2, false);
1951       NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");)
1952       NOT_LP64(restore_locals();)         // Restore EDI
1953     }
1954     update_mdp_by_constant(mdp, mdp_delta);
1955 
1956     bind(profile_continue);
1957   }
1958 }
1959 
1960 
1961 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1962   if (ProfileInterpreter) {
1963     Label profile_continue;
1964 
1965     // If no method data exists, go to profile_continue.
1966     test_method_data_pointer(mdp, profile_continue);
1967 
1968     // Update the default case count
1969     increment_mdp_data_at(mdp,
1970                           in_bytes(MultiBranchData::default_count_offset()));
1971 
1972     // The method data pointer needs to be updated.
1973     update_mdp_by_offset(mdp,
1974                          in_bytes(MultiBranchData::
1975                                   default_displacement_offset()));
1976 
1977     bind(profile_continue);
1978   }
1979 }
1980 
1981 
1982 void InterpreterMacroAssembler::profile_switch_case(Register index,
1983                                                     Register mdp,
1984                                                     Register reg2) {
1985   if (ProfileInterpreter) {
1986     Label profile_continue;
1987 
1988     // If no method data exists, go to profile_continue.
1989     test_method_data_pointer(mdp, profile_continue);
1990 
1991     // Build the base (index * per_case_size_in_bytes()) +
1992     // case_array_offset_in_bytes()
1993     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1994     imulptr(index, reg2); // XXX l ?
1995     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1996 
1997     // Update the case count
1998     increment_mdp_data_at(mdp,
1999                           index,
2000                           in_bytes(MultiBranchData::relative_count_offset()));
2001 
2002     // The method data pointer needs to be updated.
2003     update_mdp_by_offset(mdp,
2004                          index,
2005                          in_bytes(MultiBranchData::
2006                                   relative_displacement_offset()));
2007 
2008     bind(profile_continue);
2009   }
2010 }
2011 
2012 
2013 
2014 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
2015   if (state == atos) {
2016     MacroAssembler::verify_oop(reg);
2017   }
2018 }
2019 
2020 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2021 #ifndef _LP64
2022   if ((state == ftos && UseSSE < 1) ||
2023       (state == dtos && UseSSE < 2)) {
2024     MacroAssembler::verify_FPU(stack_depth);
2025   }
2026 #endif
2027 }
2028 
2029 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2030 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2031                                                         int increment, Address mask,
2032                                                         Register scratch, bool preloaded,
2033                                                         Condition cond, Label* where) {
2034   if (!preloaded) {
2035     movl(scratch, counter_addr);
2036   }
2037   incrementl(scratch, increment);
2038   movl(counter_addr, scratch);
2039   andl(scratch, mask);
2040   jcc(cond, *where);
2041 }
2042 
2043 void InterpreterMacroAssembler::notify_method_entry() {
2044   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
2045   // track stack depth.  If it is possible to enter interp_only_mode we add
2046   // the code to check if the event should be sent.
2047   Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
2048   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
2049   if (JvmtiExport::can_post_interpreter_events()) {
2050     Label L;
2051     NOT_LP64(get_thread(rthread);)
2052     movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
2053     testl(rdx, rdx);
2054     jcc(Assembler::zero, L);
2055     call_VM(noreg, CAST_FROM_FN_PTR(address,
2056                                     InterpreterRuntime::post_method_entry));
2057     bind(L);
2058   }
2059 
2060   {
2061     SkipIfEqual skip(this, &DTraceMethodProbes, false);
2062     NOT_LP64(get_thread(rthread);)
2063     get_method(rarg);
2064     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2065                  rthread, rarg);
2066   }
2067 
2068   // RedefineClasses() tracing support for obsolete method entry
2069   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2070     NOT_LP64(get_thread(rthread);)
2071     get_method(rarg);
2072     call_VM_leaf(
2073       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2074       rthread, rarg);
2075   }
2076 }
2077 
2078 
2079 void InterpreterMacroAssembler::notify_method_exit(
2080     TosState state, NotifyMethodExitMode mode) {
2081   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
2082   // track stack depth.  If it is possible to enter interp_only_mode we add
2083   // the code to check if the event should be sent.
2084   Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
2085   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
2086   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2087     Label L;
2088     // Note: frame::interpreter_frame_result has a dependency on how the
2089     // method result is saved across the call to post_method_exit. If this
2090     // is changed then the interpreter_frame_result implementation will
2091     // need to be updated too.
2092 
2093     // template interpreter will leave the result on the top of the stack.
2094     push(state);
2095     NOT_LP64(get_thread(rthread);)
2096     movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
2097     testl(rdx, rdx);
2098     jcc(Assembler::zero, L);
2099     call_VM(noreg,
2100             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2101     bind(L);
2102     pop(state);
2103   }
2104 
2105   {
2106     SkipIfEqual skip(this, &DTraceMethodProbes, false);
2107     push(state);
2108     NOT_LP64(get_thread(rthread);)
2109     get_method(rarg);
2110     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2111                  rthread, rarg);
2112     pop(state);
2113   }
2114 }