hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)interp_masm_x86_64.cpp       1.48 07/09/17 09:26:04 JVM"
   3 #endif
   4 /*
   5  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 #include "incls/_precompiled.incl"
  29 #include "incls/_interp_masm_x86_64.cpp.incl"
  30 
  31 
  32 // Implementation of InterpreterMacroAssembler
  33 









  34 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
  35                                                   int number_of_arguments) {
  36   // interpreter specific
  37   //
  38   // Note: No need to save/restore bcp & locals (r13 & r14) pointer
  39   //       since these are callee saved registers and no blocking/
  40   //       GC can happen in leaf calls.






  41 #ifdef ASSERT
  42   save_bcp();
  43   { 
  44     Label L;
  45     cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD);
  46     jcc(Assembler::equal, L);
  47     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
  48          " last_sp != NULL");
  49     bind(L);
  50   }
  51 #endif
  52   // super call
  53   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
  54   // interpreter specific
  55 #ifdef ASSERT
  56   { 
  57     Label L;
  58     cmpq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
  59     jcc(Assembler::equal, L);
  60     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
  61          " r13 not callee saved?");
  62     bind(L);
  63   }
  64   { 
  65     Label L;
  66     cmpq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
  67     jcc(Assembler::equal, L);
  68     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
  69          " r14 not callee saved?");
  70     bind(L);
  71   }
  72 #endif
  73 }
  74 
  75 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
  76                                              Register java_thread,
  77                                              Register last_java_sp,
  78                                              address  entry_point,
  79                                              int      number_of_arguments,
  80                                              bool     check_exceptions) {
  81   // interpreter specific
  82   //
  83   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
  84   //       really make a difference for these runtime calls, since they are
  85   //       slow anyway. Btw., bcp must be saved/restored since it may change
  86   //       due to GC.
  87   // assert(java_thread == noreg , "not expecting a precomputed java thread");
  88   save_bcp();
  89 #ifdef ASSERT
  90   { 
  91     Label L;
  92     cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD);
  93     jcc(Assembler::equal, L);
  94     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
  95          " last_sp != NULL");
  96     bind(L);
  97   }
  98 #endif /* ASSERT */
  99   // super call
 100   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
 101                                entry_point, number_of_arguments,
 102                                check_exceptions);
 103   // interpreter specific
 104   restore_bcp();
 105   restore_locals();
 106 }
 107 
 108 
 109 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
 110   if (JvmtiExport::can_pop_frame()) {
 111     Label L;
 112     // Initiate popframe handling only if it is not already being
 113     // processed.  If the flag has the popframe_processing bit set, it
 114     // means that this code is called *during* popframe handling - we
 115     // don't want to reenter.
 116     // This method is only called just after the call into the vm in
 117     // call_VM_base, so the arg registers are available.
 118     movl(c_rarg0, Address(r15_thread, JavaThread::popframe_condition_offset()));
 119     testl(c_rarg0, JavaThread::popframe_pending_bit);
 120     jcc(Assembler::zero, L);
 121     testl(c_rarg0, JavaThread::popframe_processing_bit);
 122     jcc(Assembler::notZero, L);   
 123     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 124     // address of the same-named entrypoint in the generated interpreter code.
 125     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 126     jmp(rax);
 127     bind(L);
 128   }
 129 }
 130 
 131 
 132 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 133   movq(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 134   const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
 135   const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
 136   const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
 137   switch (state) {
 138     case atos: movq(rax, oop_addr);
 139                movptr(oop_addr, NULL_WORD);
 140                verify_oop(rax, state);              break;
 141     case ltos: movq(rax, val_addr);                 break;
 142     case btos:                                   // fall through
 143     case ctos:                                   // fall through
 144     case stos:                                   // fall through
 145     case itos: movl(rax, val_addr);                 break;
 146     case ftos: movflt(xmm0, val_addr);              break;
 147     case dtos: movdbl(xmm0, val_addr);              break;
 148     case vtos: /* nothing to do */                  break;
 149     default  : ShouldNotReachHere();
 150   }
 151   // Clean up tos value in the thread object
 152   movl(tos_addr,  (int) ilgl);
 153   movl(val_addr,  (int) NULL_WORD);
 154 }
 155 
 156 
 157 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 158   if (JvmtiExport::can_force_early_return()) {
 159     Label L;
 160     movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 161     testq(c_rarg0, c_rarg0);
 162     jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
 163 
 164     // Initiate earlyret handling only if it is not already being processed.
 165     // If the flag has the earlyret_processing bit set, it means that this code
 166     // is called *during* earlyret handling - we don't want to reenter.
 167     movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_state_offset()));
 168     cmpl(c_rarg0, JvmtiThreadState::earlyret_pending);
 169     jcc(Assembler::notEqual, L);
 170 
 171     // Call Interpreter::remove_activation_early_entry() to get the address of the
 172     // same-named entrypoint in the generated interpreter code.
 173     movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 174     movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset()));
 175     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0);
 176     jmp(rax);
 177     bind(L);
 178   }
 179 }
 180 
 181 
 182 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
 183   Register reg,
 184   int bcp_offset) {
 185   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 186   movl(reg, Address(r13, bcp_offset));
 187   bswapl(reg);
 188   shrl(reg, 16);
 189 }
 190 
 191 
 192 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
 193                                                            Register index, 
 194                                                            int bcp_offset) {
 195   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 196   assert(cache != index, "must use different registers");
 197   load_unsigned_word(index, Address(r13, bcp_offset));
 198   movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
 199   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 200   // convert from field index to ConstantPoolCacheEntry index
 201   shll(index, 2);
 202 }
 203 
 204 
 205 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 206                                                                Register tmp,
 207                                                                int bcp_offset) {
 208   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 209   assert(cache != tmp, "must use different register");
 210   load_unsigned_word(tmp, Address(r13, bcp_offset));
 211   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 212   // convert from field index to ConstantPoolCacheEntry index
 213   // and from word offset to byte offset
 214   shll(tmp, 2 + LogBytesPerWord);
 215   movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
 216   // skip past the header
 217   addq(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
 218   addq(cache, tmp);  // construct pointer to cache entry
 219 }
 220 
 221 
 222 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 223 // subtype of super_klass.
 224 //
 225 // Args:
 226 //      rax: superklass
 227 //      Rsub_klass: subklass
 228 //
 229 // Kills:
 230 //      rcx, rdi
 231 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 232                                                   Label& ok_is_subtype) {
 233   assert(Rsub_klass != rax, "rax holds superklass");
 234   assert(Rsub_klass != r14, "r14 holds locals");
 235   assert(Rsub_klass != r13, "r13 holds bcp");
 236   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 237   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 238 
 239   Label not_subtype, loop;
 240 
 241   // Profile the not-null value's klass.
 242   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi
 243 
 244   // Load the super-klass's check offset into rcx
 245   movl(rcx, Address(rax, sizeof(oopDesc) + 
 246                     Klass::super_check_offset_offset_in_bytes()));
 247   // Load from the sub-klass's super-class display list, or a 1-word
 248   // cache of the secondary superclass list, or a failing value with a
 249   // sentinel offset if the super-klass is an interface or
 250   // exceptionally deep in the Java hierarchy and we have to scan the
 251   // secondary superclass list the hard way.  See if we get an
 252   // immediate positive hit
 253   cmpq(rax, Address(Rsub_klass, rcx, Address::times_1));
 254   jcc(Assembler::equal,ok_is_subtype);
 255 
 256   // Check for immediate negative hit
 257   cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
 258   jcc( Assembler::notEqual, not_subtype );
 259   // Check for self
 260   cmpq(Rsub_klass, rax);
 261   jcc(Assembler::equal, ok_is_subtype);
 262 
 263   // Now do a linear scan of the secondary super-klass chain.
 264   movq(rdi, Address(Rsub_klass, sizeof(oopDesc) + 
 265                     Klass::secondary_supers_offset_in_bytes()));
 266   // rdi holds the objArrayOop of secondary supers.
 267   // Load the array length
 268   movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); 
 269   // Skip to start of data; also clear Z flag incase rcx is zero
 270   addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
 271   // Scan rcx words at [rdi] for occurance of rax
 272   // Set NZ/Z based on last compare
 273   repne_scan();







 274   // Not equal?





 275   jcc(Assembler::notEqual, not_subtype);

 276   // Must be equal but missed in cache.  Update cache.
 277   movq(Address(Rsub_klass, sizeof(oopDesc) + 
 278                Klass::secondary_super_cache_offset_in_bytes()), rax);
 279   jmp(ok_is_subtype);
 280 



 281   bind(not_subtype);
 282   profile_typecheck_failed(rcx); // blows rcx
 283 }
 284 
 285 

 286 // Java Expression Stack
 287 
 288 #ifdef ASSERT
 289 // Verifies that the stack tag matches.  Must be called before the stack
 290 // value is popped off the stack.
 291 void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
 292   if (TaggedStackInterpreter) {
 293     frame::Tag tag = t;
 294     if (t == frame::TagCategory2) {
 295       tag = frame::TagValue;
 296       Label hokay;
 297       cmpq(Address(rsp, 3*wordSize), (int)tag);
 298       jcc(Assembler::equal, hokay);
 299       stop("Java Expression stack tag high value is bad");
 300       bind(hokay);
 301     }
 302     Label okay;
 303     cmpq(Address(rsp, wordSize), (int)tag);
 304     jcc(Assembler::equal, okay);
 305     // Also compare if the stack value is zero, then the tag might
 306     // not have been set coming from deopt.
 307     cmpq(Address(rsp, 0), 0);
 308     jcc(Assembler::equal, okay);
 309     stop("Java Expression stack tag value is bad");
 310     bind(okay);
 311   }
 312 }
 313 #endif // ASSERT
 314 
 315 void InterpreterMacroAssembler::pop_ptr(Register r) {
 316   debug_only(verify_stack_tag(frame::TagReference));
 317   popq(r);
 318   if (TaggedStackInterpreter) addq(rsp, 1 * wordSize);
 319 }
 320 
 321 void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
 322   popq(r);
 323   if (TaggedStackInterpreter) popq(tag);
 324 }
 325 
 326 void InterpreterMacroAssembler::pop_i(Register r) {
 327   // XXX can't use popq currently, upper half non clean
 328   debug_only(verify_stack_tag(frame::TagValue));
 329   movl(r, Address(rsp, 0));
 330   addq(rsp, wordSize);
 331   if (TaggedStackInterpreter) addq(rsp, 1 * wordSize);
 332 }
 333 
 334 void InterpreterMacroAssembler::pop_l(Register r) {
 335   debug_only(verify_stack_tag(frame::TagCategory2));
 336   movq(r, Address(rsp, 0));
 337   addq(rsp, 2 * Interpreter::stackElementSize());
 338 }
 339 
 340 void InterpreterMacroAssembler::pop_f(XMMRegister r) {
 341   debug_only(verify_stack_tag(frame::TagValue));
 342   movflt(r, Address(rsp, 0));
 343   addq(rsp, wordSize);
 344   if (TaggedStackInterpreter) addq(rsp, 1 * wordSize);
 345 }
 346 
 347 void InterpreterMacroAssembler::pop_d(XMMRegister r) {
 348   debug_only(verify_stack_tag(frame::TagCategory2));
 349   movdbl(r, Address(rsp, 0));
 350   addq(rsp, 2 * Interpreter::stackElementSize());
 351 }
 352 
 353 void InterpreterMacroAssembler::push_ptr(Register r) {
 354   if (TaggedStackInterpreter) pushq(frame::TagReference);
 355   pushq(r);
 356 }
 357 
 358 void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
 359   if (TaggedStackInterpreter) pushq(tag);
 360   pushq(r);
 361 }
 362 
 363 void InterpreterMacroAssembler::push_i(Register r) {
 364   if (TaggedStackInterpreter) pushq(frame::TagValue);
 365   pushq(r);
 366 }
 367 
 368 void InterpreterMacroAssembler::push_l(Register r) {
 369   if (TaggedStackInterpreter) {
 370     pushq(frame::TagValue);
 371     subq(rsp, 1 * wordSize);
 372     pushq(frame::TagValue);
 373     subq(rsp, 1 * wordSize);
 374   } else {
 375     subq(rsp, 2 * wordSize);
 376   }
 377   movq(Address(rsp, 0), r);
 378 }
 379 
 380 void InterpreterMacroAssembler::push_f(XMMRegister r) {
 381   if (TaggedStackInterpreter) pushq(frame::TagValue);
 382   subq(rsp, wordSize);
 383   movflt(Address(rsp, 0), r);
 384 }
 385 
 386 void InterpreterMacroAssembler::push_d(XMMRegister r) {
 387   if (TaggedStackInterpreter) {
 388     pushq(frame::TagValue);
 389     subq(rsp, 1 * wordSize);
 390     pushq(frame::TagValue);
 391     subq(rsp, 1 * wordSize);
 392   } else {
 393     subq(rsp, 2 * wordSize);
 394   }
 395   movdbl(Address(rsp, 0), r);
 396 }
 397 
 398 void InterpreterMacroAssembler::pop(TosState state) {
 399   switch (state) {
 400   case atos: pop_ptr();                 break;
 401   case btos:                            
 402   case ctos: 
 403   case stos: 
 404   case itos: pop_i();                   break;
 405   case ltos: pop_l();                   break;
 406   case ftos: pop_f();                   break;
 407   case dtos: pop_d();                   break;
 408   case vtos: /* nothing to do */        break;
 409   default:   ShouldNotReachHere();
 410   }
 411   verify_oop(rax, state);
 412 }
 413 
 414 void InterpreterMacroAssembler::push(TosState state) {
 415   verify_oop(rax, state);
 416   switch (state) {
 417   case atos: push_ptr();                break;
 418   case btos: 
 419   case ctos: 
 420   case stos: 
 421   case itos: push_i();                  break;
 422   case ltos: push_l();                  break;
 423   case ftos: push_f();                  break;
 424   case dtos: push_d();                  break;
 425   case vtos: /* nothing to do */        break;
 426   default  : ShouldNotReachHere();
 427   }
 428 }
 429 
 430 


 431 // Tagged stack helpers for swap and dup
 432 void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
 433                                                  Register tag) {
 434   movq(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
 435   if (TaggedStackInterpreter) {
 436     movq(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
 437   }
 438 }
 439 
 440 void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
 441                                                   Register tag) {
 442   movq(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 443   if (TaggedStackInterpreter) {
 444     movq(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
 445   }
 446 }
 447 
 448 
 449 // Tagged local support
 450 void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
 451   if (TaggedStackInterpreter) {
 452     if (tag == frame::TagCategory2) {
 453       mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), 
 454            (intptr_t)frame::TagValue);
 455       mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), 
 456            (intptr_t)frame::TagValue);
 457     } else {
 458       mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (intptr_t)tag);
 459     }
 460   }
 461 }
 462 
 463 void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
 464   if (TaggedStackInterpreter) {
 465     if (tag == frame::TagCategory2) {
 466       mov64(Address(r14, idx, Address::times_8,
 467                   Interpreter::local_tag_offset_in_bytes(1)), (intptr_t)frame::TagValue);
 468       mov64(Address(r14, idx, Address::times_8,
 469                   Interpreter::local_tag_offset_in_bytes(0)), (intptr_t)frame::TagValue);
 470     } else {
 471       mov64(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
 472            (intptr_t)tag);
 473     }
 474   }
 475 }
 476 
 477 void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
 478   if (TaggedStackInterpreter) {
 479     // can only be TagValue or TagReference
 480     movq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
 481   }
 482 }
 483 
 484 
 485 void InterpreterMacroAssembler::tag_local(Register tag, int n) {
 486   if (TaggedStackInterpreter) {
 487     // can only be TagValue or TagReference
 488     movq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
 489   }
 490 }
 491  
 492 #ifdef ASSERT
 493 void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
 494   if (TaggedStackInterpreter) {
 495      frame::Tag t = tag;
 496     if (tag == frame::TagCategory2) {
 497       Label nbl;
 498       t = frame::TagValue;  // change to what is stored in locals
 499       cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t);
 500       jcc(Assembler::equal, nbl);
 501       stop("Local tag is bad for long/double");
 502       bind(nbl);
 503     }
 504     Label notBad;
 505     cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int)t);
 506     jcc(Assembler::equal, notBad);
 507     // Also compare if the local value is zero, then the tag might
 508     // not have been set coming from deopt.
 509     cmpq(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
 510     jcc(Assembler::equal, notBad);
 511     stop("Local tag is bad");
 512     bind(notBad);
 513   }
 514 }
 515 
 516 void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
 517   if (TaggedStackInterpreter) {
 518     frame::Tag t = tag;
 519     if (tag == frame::TagCategory2) {
 520       Label nbl;
 521       t = frame::TagValue;  // change to what is stored in locals
 522       cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int)t);
 523       jcc(Assembler::equal, nbl);
 524       stop("Local tag is bad for long/double");
 525       bind(nbl);
 526     }
 527     Label notBad;
 528     cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int)t);
 529     jcc(Assembler::equal, notBad);
 530     // Also compare if the local value is zero, then the tag might
 531     // not have been set coming from deopt.
 532     cmpq(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
 533     jcc(Assembler::equal, notBad);
 534     stop("Local tag is bad");
 535     bind(notBad);
 536   }
 537 }
 538 #endif // ASSERT
 539 
 540 
 541 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
 542   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 543 }
 544 
 545 
 546 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
 547                                                    Register arg_1) {
 548   if (c_rarg0 != arg_1) {
 549     movq(c_rarg0, arg_1);
 550   }
 551   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 552 }
 553 
 554 
 555 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
 556                                                    Register arg_1,
 557                                                    Register arg_2) {
 558   assert(c_rarg0 != arg_2, "smashed argument");
 559   assert(c_rarg1 != arg_1, "smashed argument");
 560   if (c_rarg0 != arg_1) {
 561     movq(c_rarg0, arg_1);
 562   }
 563   if (c_rarg1 != arg_2) {
 564     movq(c_rarg1, arg_2);
 565   }
 566   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 567 }
 568 
 569 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
 570                                                    Register arg_1,
 571                                                    Register arg_2,
 572                                                    Register arg_3) {
 573   assert(c_rarg0 != arg_2, "smashed argument");
 574   assert(c_rarg0 != arg_3, "smashed argument");
 575   assert(c_rarg1 != arg_1, "smashed argument");
 576   assert(c_rarg1 != arg_3, "smashed argument");
 577   assert(c_rarg2 != arg_1, "smashed argument");
 578   assert(c_rarg2 != arg_2, "smashed argument");
 579   if (c_rarg0 != arg_1) {
 580     movq(c_rarg0, arg_1);
 581   }
 582   if (c_rarg1 != arg_2) {
 583     movq(c_rarg1, arg_2);
 584   }
 585   if (c_rarg2 != arg_3) {
 586     movq(c_rarg2, arg_3);
 587   }
 588   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 589 }
 590 
 591 // Jump to from_interpreted entry of a call unless single stepping is possible
 592 // in this thread in which case we must call the i2i entry
 593 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 594   // set sender sp
 595   leaq(r13, Address(rsp, wordSize));
 596   // record last_sp
 597   movq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13);
 598 
 599   if (JvmtiExport::can_post_interpreter_events()) {
 600     Label run_compiled_code;
 601     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 602     // compiled code in threads for which the event is enabled.  Check here for
 603     // interp_only_mode if these events CAN be enabled.
 604     get_thread(temp);
 605     // interp_only is an int, on little endian it is sufficient to test the byte only
 606     // Is a cmpl faster (ce
 607     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
 608     jcc(Assembler::zero, run_compiled_code);
 609     jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
 610     bind(run_compiled_code);
 611   }
 612 
 613   jmp(Address(method, methodOopDesc::from_interpreted_offset()));
 614 
 615 }
 616 
 617 
 618 // The following two routines provide a hook so that an implementation
 619 // can schedule the dispatch in two parts.  amd64 does not do this.
 620 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 621   // Nothing amd64 specific to be done here
 622 }
 623 
 624 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 625   dispatch_next(state, step);
 626 }
 627 
 628 void InterpreterMacroAssembler::dispatch_base(TosState state, 
 629                                               address* table,
 630                                               bool verifyoop) {
 631   verify_FPU(1, state);
 632   if (VerifyActivationFrameSize) {
 633     Label L;
 634     movq(rcx, rbp);
 635     subq(rcx, rsp);
 636     int min_frame_size = 
 637       (frame::link_offset - frame::interpreter_frame_initial_sp_offset) *
 638       wordSize;
 639     cmpq(rcx, min_frame_size);
 640     jcc(Assembler::greaterEqual, L);
 641     stop("broken stack frame");
 642     bind(L);
 643   }
 644   if (verifyoop) {
 645     verify_oop(rax, state);
 646   }
 647   lea(rscratch1, ExternalAddress((address)table));
 648   jmp(Address(rscratch1, rbx, Address::times_8));
 649 }
 650 
 651 void InterpreterMacroAssembler::dispatch_only(TosState state) {
 652   dispatch_base(state, Interpreter::dispatch_table(state));
 653 }
 654 
 655 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 656   dispatch_base(state, Interpreter::normal_table(state));
 657 }
 658 
 659 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 660   dispatch_base(state, Interpreter::normal_table(state), false);
 661 }
 662 
 663 
 664 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
 665   // load next bytecode (load before advancing r13 to prevent AGI)
 666   load_unsigned_byte(rbx, Address(r13, step));
 667   // advance r13
 668   incrementq(r13, step);
 669   dispatch_base(state, Interpreter::dispatch_table(state));
 670 }
 671 
 672 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 673   // load current bytecode
 674   load_unsigned_byte(rbx, Address(r13, 0));
 675   dispatch_base(state, table);
 676 }
 677 
 678 // remove activation
 679 //
 680 // Unlock the receiver if this is a synchronized method.
 681 // Unlock any Java monitors from syncronized blocks.
 682 // Remove the activation from the stack.
 683 //
 684 // If there are locked Java monitors
 685 //    If throw_monitor_exception
 686 //       throws IllegalMonitorStateException
 687 //    Else if install_monitor_exception
 688 //       installs IllegalMonitorStateException
 689 //    Else
 690 //       no error processing
 691 void InterpreterMacroAssembler::remove_activation(
 692         TosState state,
 693         Register ret_addr,
 694         bool throw_monitor_exception,
 695         bool install_monitor_exception,
 696         bool notify_jvmdi) {
 697   // Note: Registers rdx xmm0 may be in use for the
 698   // result check if synchronized method
 699   Label unlocked, unlock, no_unlock;
 700 
 701   // get the value of _do_not_unlock_if_synchronized into rdx
 702   const Address do_not_unlock_if_synchronized(r15_thread,
 703     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 704   movbool(rdx, do_not_unlock_if_synchronized);
 705   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 706 
 707  // get method access flags
 708   movq(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 709   movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
 710   testl(rcx, JVM_ACC_SYNCHRONIZED);
 711   jcc(Assembler::zero, unlocked);
 712  
 713   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 714   // is set.
 715   testbool(rdx);
 716   jcc(Assembler::notZero, no_unlock);
 717  
 718   // unlock monitor
 719   push(state); // save result
 720     
 721   // BasicObjectLock will be first in list, since this is a
 722   // synchronized method. However, need to check that the object has
 723   // not been unlocked by an explicit monitorexit bytecode.
 724   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * 
 725                         wordSize - (int) sizeof(BasicObjectLock));
 726   // We use c_rarg1 so that if we go slow path it will be the correct
 727   // register for unlock_object to pass to VM directly
 728   leaq(c_rarg1, monitor); // address of first monitor
 729   
 730   movq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
 731   testq(rax, rax);
 732   jcc(Assembler::notZero, unlock);
 733                                   
 734   pop(state);
 735   if (throw_monitor_exception) {
 736     // Entry already unlocked, need to throw exception
 737     call_VM(noreg, CAST_FROM_FN_PTR(address, 
 738                    InterpreterRuntime::throw_illegal_monitor_state_exception));
 739     should_not_reach_here();
 740   } else {
 741     // Monitor already unlocked during a stack unroll. If requested,
 742     // install an illegal_monitor_state_exception.  Continue with
 743     // stack unrolling.
 744     if (install_monitor_exception) {
 745       call_VM(noreg, CAST_FROM_FN_PTR(address, 
 746                      InterpreterRuntime::new_illegal_monitor_state_exception));
 747     }
 748     jmp(unlocked);
 749   }
 750 
 751   bind(unlock);  


 753   pop(state);
 754 
 755   // Check that for block-structured locking (i.e., that all locked
 756   // objects has been unlocked)
 757   bind(unlocked);  
 758 
 759   // rax: Might contain return value
 760 
 761   // Check that all monitors are unlocked
 762   {
 763     Label loop, exception, entry, restart;
 764     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 765     const Address monitor_block_top(
 766         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 767     const Address monitor_block_bot(
 768         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
 769     
 770     bind(restart);
 771     // We use c_rarg1 so that if we go slow path it will be the correct
 772     // register for unlock_object to pass to VM directly
 773     movq(c_rarg1, monitor_block_top); // points to current entry, starting
 774                                   // with top-most entry
 775     leaq(rbx, monitor_block_bot); // points to word before bottom of
 776                                   // monitor block
 777     jmp(entry);
 778           
 779     // Entry already locked, need to throw exception
 780     bind(exception); 
 781 
 782     if (throw_monitor_exception) {
 783       // Throw exception      
 784       MacroAssembler::call_VM(noreg, 
 785                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 786                                    throw_illegal_monitor_state_exception));
 787       should_not_reach_here();
 788     } else {
 789       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 790       // Unlock does not block, so don't have to worry about the frame.
 791       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 792 
 793       push(state);
 794       unlock_object(c_rarg1);
 795       pop(state);
 796       
 797       if (install_monitor_exception) {
 798         call_VM(noreg, CAST_FROM_FN_PTR(address, 
 799                                         InterpreterRuntime::
 800                                         new_illegal_monitor_state_exception));
 801       }
 802 
 803       jmp(restart);
 804     }
 805   
 806     bind(loop);
 807     // check if current entry is used
 808     cmpq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int) NULL);
 809     jcc(Assembler::notEqual, exception);
 810           
 811     addq(c_rarg1, entry_size); // otherwise advance to next entry
 812     bind(entry);
 813     cmpq(c_rarg1, rbx); // check if bottom reached
 814     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
 815   }        
 816 
 817   bind(no_unlock);
 818 
 819   // jvmti support
 820   if (notify_jvmdi) {
 821     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 822   } else {
 823     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 824   }
 825 
 826   // remove activation
 827   // get sender sp
 828   movq(rbx, 
 829        Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
 830   leave();                           // remove frame anchor
 831   popq(ret_addr);                    // get return address
 832   movq(rsp, rbx);                    // set sp to sender sp
 833 }
 834 


 835 // Lock object
 836 //
 837 // Args:
 838 //      c_rarg1: BasicObjectLock to be used for locking
 839 // 
 840 // Kills:
 841 //      rax
 842 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 843 //      rscratch1, rscratch2 (scratch regs)
 844 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
 845   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 846 
 847   if (UseHeavyMonitors) {
 848     call_VM(noreg, 
 849             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 850             lock_reg);
 851   } else {
 852     Label done;
 853 
 854     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
 855     const Register obj_reg = c_rarg3; // Will contain the oop
 856 
 857     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 858     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 859     const int mark_offset = lock_offset + 
 860                             BasicLock::displaced_header_offset_in_bytes(); 
 861 
 862     Label slow_case;
 863     
 864     // Load object pointer into obj_reg %c_rarg3
 865     movq(obj_reg, Address(lock_reg, obj_offset));
 866 
 867     if (UseBiasedLocking) {
 868       biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case);
 869     }
 870 
 871     // Load immediate 1 into swap_reg %rax
 872     movl(swap_reg, 1);
 873 
 874     // Load (object->mark() | 1) into swap_reg %rax
 875     orq(swap_reg, Address(obj_reg, 0));
 876 
 877     // Save (object->mark() | 1) into BasicLock's displaced header
 878     movq(Address(lock_reg, mark_offset), swap_reg);
 879 
 880     assert(lock_offset == 0, 
 881            "displached header must be first word in BasicObjectLock");
 882 
 883     if (os::is_MP()) lock();
 884     cmpxchgq(lock_reg, Address(obj_reg, 0));  
 885     if (PrintBiasedLockingStatistics) {
 886       cond_inc32(Assembler::zero,
 887                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
 888     }
 889     jcc(Assembler::zero, done);
 890 
 891     // Test if the oopMark is an obvious stack pointer, i.e.,
 892     //  1) (mark & 7) == 0, and
 893     //  2) rsp <= mark < mark + os::pagesize()
 894     //
 895     // These 3 tests can be done by evaluating the following 
 896     // expression: ((mark - rsp) & (7 - os::vm_page_size())),
 897     // assuming both stack pointer and pagesize have their
 898     // least significant 3 bits clear.
 899     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
 900     subq(swap_reg, rsp);
 901     andq(swap_reg, 7 - os::vm_page_size());
 902 
 903     // Save the test result, for recursive case, the result is zero
 904     movq(Address(lock_reg, mark_offset), swap_reg);
 905 
 906     if (PrintBiasedLockingStatistics) {
 907       cond_inc32(Assembler::zero,
 908                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
 909     }
 910     jcc(Assembler::zero, done);
 911 
 912     bind(slow_case);
 913 
 914     // Call the runtime routine for slow case
 915     call_VM(noreg, 
 916             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 917             lock_reg);
 918 
 919     bind(done);
 920   }   
 921 }
 922 
 923 
 924 // Unlocks an object. Used in monitorexit bytecode and


 933 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 934 //      rscratch1, rscratch2 (scratch regs)
 935 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
 936   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 937 
 938   if (UseHeavyMonitors) {
 939     call_VM(noreg, 
 940             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
 941             lock_reg);
 942   } else {
 943     Label done;
 944 
 945     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
 946     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 947     const Register obj_reg    = c_rarg3;  // Will contain the oop
 948 
 949     save_bcp(); // Save in case of exception
 950 
 951     // Convert from BasicObjectLock structure to object and BasicLock
 952     // structure Store the BasicLock address into %rax
 953     leaq(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
 954 
 955     // Load oop into obj_reg(%c_rarg3)
 956     movq(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 957 
 958     // Free entry
 959     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
 960 
 961     if (UseBiasedLocking) {
 962       biased_locking_exit(obj_reg, header_reg, done);
 963     }
 964 
 965     // Load the old header from BasicLock structure
 966     movq(header_reg, Address(swap_reg, 
 967                              BasicLock::displaced_header_offset_in_bytes()));
 968 
 969     // Test for recursion
 970     testq(header_reg, header_reg);
 971 
 972     // zero for recursive case
 973     jcc(Assembler::zero, done);
 974     
 975     // Atomic swap back the old header
 976     if (os::is_MP()) lock();
 977     cmpxchgq(header_reg, Address(obj_reg, 0));
 978 
 979     // zero for recursive case
 980     jcc(Assembler::zero, done);
 981 
 982     // Call the runtime routine for slow case.
 983     movq(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), 
 984          obj_reg); // restore obj
 985     call_VM(noreg, 
 986             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
 987             lock_reg);
 988 
 989     bind(done);
 990 
 991     restore_bcp();
 992   }
 993 }
 994 

 995 
 996 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 
 997                                                          Label& zero_continue) {
 998   assert(ProfileInterpreter, "must be profiling interpreter");
 999   movq(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
1000   testq(mdp, mdp);
1001   jcc(Assembler::zero, zero_continue);
1002 }
1003 
1004 
1005 // Set the method data pointer for the current bcp.
1006 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1007   assert(ProfileInterpreter, "must be profiling interpreter");
1008   Label zero_continue;
1009   pushq(rax);
1010   pushq(rbx);
1011 
1012   get_method(rbx);
1013   // Test MDO to avoid the call if it is NULL.
1014   movq(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
1015   testq(rax, rax);
1016   jcc(Assembler::zero, zero_continue);
1017 
1018   // rbx: method
1019   // r13: bcp
1020   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
1021   // rax: mdi
1022 
1023   movq(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
1024   testq(rbx, rbx);
1025   jcc(Assembler::zero, zero_continue);
1026   addq(rbx, in_bytes(methodDataOopDesc::data_offset()));
1027   addq(rbx, rax);
1028   movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
1029 
1030   bind(zero_continue);
1031   popq(rbx);
1032   popq(rax);
1033 }
1034 
1035 void InterpreterMacroAssembler::verify_method_data_pointer() {
1036   assert(ProfileInterpreter, "must be profiling interpreter");
1037 #ifdef ASSERT
1038   Label verify_continue;
1039   pushq(rax);
1040   pushq(rbx);
1041   pushq(c_rarg3);
1042   pushq(c_rarg2);
1043   test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue
1044   get_method(rbx);
1045 
1046   // If the mdp is valid, it will point to a DataLayout header which is
1047   // consistent with the bcp.  The converse is highly probable also.
1048   load_unsigned_word(c_rarg2, 
1049                      Address(c_rarg3, in_bytes(DataLayout::bci_offset())));
1050   addq(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
1051   leaq(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
1052   cmpq(c_rarg2, r13);
1053   jcc(Assembler::equal, verify_continue);
1054   // rbx: method
1055   // r13: bcp
1056   // c_rarg3: mdp
1057   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
1058                rbx, r13, c_rarg3);
1059   bind(verify_continue);
1060   popq(c_rarg2);
1061   popq(c_rarg3);
1062   popq(rbx);
1063   popq(rax);
1064 #endif // ASSERT
1065 }
1066 
1067 
1068 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 
1069                                                 int constant, 
1070                                                 Register value) {
1071   assert(ProfileInterpreter, "must be profiling interpreter");
1072   Address data(mdp_in, constant);
1073   movq(data, value);
1074 }
1075 
1076 
1077 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1078                                                       int constant,
1079                                                       bool decrement) {
1080   // Counter address
1081   Address data(mdp_in, constant);
1082 
1083   increment_mdp_data_at(data, decrement);
1084 }
1085 
1086 void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1087                                                       bool decrement) {
1088   assert(ProfileInterpreter, "must be profiling interpreter");


1089 
1090   if (decrement) {
1091     // Decrement the register.  Set condition codes.
1092     addq(data, -DataLayout::counter_increment);
1093     // If the decrement causes the counter to overflow, stay negative
1094     Label L;
1095     jcc(Assembler::negative, L);
1096     addq(data, DataLayout::counter_increment);
1097     bind(L);
1098   } else {
1099     assert(DataLayout::counter_increment == 1,
1100            "flow-free idiom only works with 1");
1101     // Increment the register.  Set carry flag.
1102     addq(data, DataLayout::counter_increment);
1103     // If the increment causes the counter to overflow, pull back by 1.
1104     sbbq(data, 0);
1105   }
1106 }
1107 
1108 
1109 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1110                                                       Register reg,
1111                                                       int constant,
1112                                                       bool decrement) {
1113   Address data(mdp_in, reg, Address::times_1, constant);
1114 
1115   increment_mdp_data_at(data, decrement);
1116 }
1117 
1118 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 
1119                                                 int flag_byte_constant) {
1120   assert(ProfileInterpreter, "must be profiling interpreter");
1121   int header_offset = in_bytes(DataLayout::header_offset());
1122   int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);
1123   // Set the flag
1124   orl(Address(mdp_in, header_offset), header_bits);
1125 }
1126 
1127 
1128 
1129 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1130                                                  int offset,
1131                                                  Register value,
1132                                                  Register test_value_out,
1133                                                  Label& not_equal_continue) {
1134   assert(ProfileInterpreter, "must be profiling interpreter");
1135   if (test_value_out == noreg) {
1136     cmpq(value, Address(mdp_in, offset));
1137   } else {
1138     // Put the test value into a register, so caller can use it:
1139     movq(test_value_out, Address(mdp_in, offset));
1140     cmpq(test_value_out, value);
1141   }
1142   jcc(Assembler::notEqual, not_equal_continue);
1143 }
1144 
1145 
1146 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1147                                                      int offset_of_disp) {
1148   assert(ProfileInterpreter, "must be profiling interpreter");
1149   Address disp_address(mdp_in, offset_of_disp);
1150   addq(mdp_in, disp_address);
1151   movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1152 }
1153 
1154 
1155 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 
1156                                                      Register reg,
1157                                                      int offset_of_disp) {
1158   assert(ProfileInterpreter, "must be profiling interpreter");
1159   Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
1160   addq(mdp_in, disp_address);
1161   movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1162 }
1163 
1164 
1165 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1166                                                        int constant) {
1167   assert(ProfileInterpreter, "must be profiling interpreter");
1168   addq(mdp_in, constant);
1169   movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1170 }
1171 
1172 
1173 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1174   assert(ProfileInterpreter, "must be profiling interpreter");
1175   pushq(return_bci); // save/restore across call_VM
1176   call_VM(noreg, 
1177           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1178           return_bci);
1179   popq(return_bci);
1180 }
1181 
1182 
1183 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 
1184                                                      Register bumped_count) {
1185   if (ProfileInterpreter) {
1186     Label profile_continue;
1187 
1188     // If no method data exists, go to profile_continue.
1189     // Otherwise, assign to mdp
1190     test_method_data_pointer(mdp, profile_continue);
1191 
1192     // We are taking a branch.  Increment the taken count.
1193     // We inline increment_mdp_data_at to return bumped_count in a register
1194     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1195     Address data(mdp, in_bytes(JumpData::taken_offset()));
1196     movq(bumped_count, data);
1197     assert(DataLayout::counter_increment == 1,
1198             "flow-free idiom only works with 1");
1199     addq(bumped_count, DataLayout::counter_increment);
1200     sbbq(bumped_count, 0);
1201     movq(data, bumped_count); // Store back out
1202 
1203     // The method data pointer needs to be updated to reflect the new target.
1204     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1205     bind(profile_continue);
1206   }
1207 }
1208 
1209 
1210 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1211   if (ProfileInterpreter) {
1212     Label profile_continue;
1213 
1214     // If no method data exists, go to profile_continue.
1215     test_method_data_pointer(mdp, profile_continue);
1216 
1217     // We are taking a branch.  Increment the not taken count.
1218     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1219 
1220     // The method data pointer needs to be updated to correspond to
1221     // the next bytecode


1309   // Case 3 is handled by a recursive call.
1310   for (int row = start_row; row <= last_row; row++) {
1311     Label next_test;
1312     bool test_for_null_also = (row == start_row);
1313 
1314     // See if the receiver is receiver[n].
1315     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1316     test_mdp_data_at(mdp, recvr_offset, receiver,
1317                      (test_for_null_also ? reg2 : noreg),
1318                      next_test);
1319     // (Reg2 now contains the receiver from the CallData.)
1320 
1321     // The receiver is receiver[n].  Increment count[n].
1322     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1323     increment_mdp_data_at(mdp, count_offset);
1324     jmp(done);
1325     bind(next_test);
1326 
1327     if (test_for_null_also) {
1328       // Failed the equality check on receiver[n]...  Test for null.
1329       testq(reg2, reg2);
1330       if (start_row == last_row) {
1331         // The only thing left to do is handle the null case.
1332         jcc(Assembler::notZero, done);
1333         break;
1334       }
1335       // Since null is rare, make it be the branch-taken case.
1336       Label found_null;
1337       jcc(Assembler::zero, found_null);
1338 
1339       // Put all the "Case 3" tests here.
1340       record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
1341 
1342       // Found a null.  Keep searching for a matching receiver,
1343       // but remember that this is an empty (unused) slot.
1344       bind(found_null);
1345     }
1346   }
1347 
1348   // In the fall-through case, we found no matching receiver, but we
1349   // observed the receiver[start_row] is NULL.


1505                          in_bytes(MultiBranchData::
1506                                   default_displacement_offset()));
1507 
1508     bind(profile_continue);
1509   }
1510 }
1511 
1512 
1513 void InterpreterMacroAssembler::profile_switch_case(Register index,
1514                                                     Register mdp,
1515                                                     Register reg2) {
1516   if (ProfileInterpreter) {
1517     Label profile_continue;
1518 
1519     // If no method data exists, go to profile_continue.
1520     test_method_data_pointer(mdp, profile_continue);
1521 
1522     // Build the base (index * per_case_size_in_bytes()) +
1523     // case_array_offset_in_bytes()
1524     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1525     imulq(index, reg2); // XXX l ?
1526     addq(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1527 
1528     // Update the case count
1529     increment_mdp_data_at(mdp, 
1530                           index, 
1531                           in_bytes(MultiBranchData::relative_count_offset())); 
1532 
1533     // The method data pointer needs to be updated.
1534     update_mdp_by_offset(mdp, 
1535                          index, 
1536                          in_bytes(MultiBranchData::
1537                                   relative_displacement_offset()));
1538 
1539     bind(profile_continue);
1540   }
1541 }
1542 
1543 

1544 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1545   if (state == atos) {
1546     MacroAssembler::verify_oop(reg);
1547   }
1548 }
1549 
1550 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
1551 }

1552 
1553  
1554 void InterpreterMacroAssembler::notify_method_entry() {
1555   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1556   // track stack depth.  If it is possible to enter interp_only_mode we add
1557   // the code to check if the event should be sent.
1558   if (JvmtiExport::can_post_interpreter_events()) {
1559     Label L;
1560     movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));
1561     testl(rdx, rdx);
1562     jcc(Assembler::zero, L);
1563     call_VM(noreg, CAST_FROM_FN_PTR(address, 
1564                                     InterpreterRuntime::post_method_entry));
1565     bind(L);
1566   }
1567 
1568   {
1569     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1570     get_method(c_rarg1);
1571     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1572                  r15_thread, c_rarg1);
1573   }








1574 }
1575 
1576  
1577 void InterpreterMacroAssembler::notify_method_exit(
1578     TosState state, NotifyMethodExitMode mode) {
1579   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1580   // track stack depth.  If it is possible to enter interp_only_mode we add
1581   // the code to check if the event should be sent.
1582   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1583     Label L;
1584     // Note: frame::interpreter_frame_result has a dependency on how the 
1585     // method result is saved across the call to post_method_exit. If this
1586     // is changed then the interpreter_frame_result implementation will
1587     // need to be updated too.
1588     push(state);



1589     movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));
1590     testl(rdx, rdx);
1591     jcc(Assembler::zero, L);
1592     call_VM(noreg, 
1593             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1594     bind(L);
1595     pop(state);     
1596   }
1597 
1598   {
1599     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1600     push(state);
1601     get_method(c_rarg1);
1602     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1603                  r15_thread, c_rarg1);
1604     pop(state);
1605   }
1606 }



   1 /*
   2  * Copyright 2003-2008 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_interp_masm_x86_64.cpp.incl"
  27 
  28 
  29 // Implementation of InterpreterMacroAssembler
  30 
  31 #ifdef CC_INTERP
  32 void InterpreterMacroAssembler::get_method(Register reg) {
  33   movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
  34   movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
  35 }
  36 #endif // CC_INTERP
  37 
  38 #ifndef CC_INTERP
  39 
  40 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
  41                                                   int number_of_arguments) {
  42   // interpreter specific
  43   //
  44   // Note: No need to save/restore bcp & locals (r13 & r14) pointer
  45   //       since these are callee saved registers and no blocking/
  46   //       GC can happen in leaf calls.
  47   // Further Note: DO NOT save/restore bcp/locals. If a caller has
  48   // already saved them so that it can use esi/edi as temporaries
  49   // then a save/restore here will DESTROY the copy the caller
  50   // saved! There used to be a save_bcp() that only happened in
  51   // the ASSERT path (no restore_bcp). Which caused bizarre failures
  52   // when jvm built with ASSERTs.
  53 #ifdef ASSERT

  54   {
  55     Label L;
  56     cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
  57     jcc(Assembler::equal, L);
  58     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
  59          " last_sp != NULL");
  60     bind(L);
  61   }
  62 #endif
  63   // super call
  64   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
  65   // interpreter specific
  66   // Used to ASSERT that r13/r14 were equal to frame's bcp/locals
  67   // but since they may not have been saved (and we don't want to
  68   // save thme here (see note above) the assert is invalid.















  69 }
  70 
  71 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
  72                                              Register java_thread,
  73                                              Register last_java_sp,
  74                                              address  entry_point,
  75                                              int      number_of_arguments,
  76                                              bool     check_exceptions) {
  77   // interpreter specific
  78   //
  79   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
  80   //       really make a difference for these runtime calls, since they are
  81   //       slow anyway. Btw., bcp must be saved/restored since it may change
  82   //       due to GC.
  83   // assert(java_thread == noreg , "not expecting a precomputed java thread");
  84   save_bcp();
  85 #ifdef ASSERT
  86   {
  87     Label L;
  88     cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
  89     jcc(Assembler::equal, L);
  90     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
  91          " last_sp != NULL");
  92     bind(L);
  93   }
  94 #endif /* ASSERT */
  95   // super call
  96   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
  97                                entry_point, number_of_arguments,
  98                                check_exceptions);
  99   // interpreter specific
 100   restore_bcp();
 101   restore_locals();
 102 }
 103 
 104 
 105 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
 106   if (JvmtiExport::can_pop_frame()) {
 107     Label L;
 108     // Initiate popframe handling only if it is not already being
 109     // processed.  If the flag has the popframe_processing bit set, it
 110     // means that this code is called *during* popframe handling - we
 111     // don't want to reenter.
 112     // This method is only called just after the call into the vm in
 113     // call_VM_base, so the arg registers are available.
 114     movl(c_rarg0, Address(r15_thread, JavaThread::popframe_condition_offset()));
 115     testl(c_rarg0, JavaThread::popframe_pending_bit);
 116     jcc(Assembler::zero, L);
 117     testl(c_rarg0, JavaThread::popframe_processing_bit);
 118     jcc(Assembler::notZero, L);
 119     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 120     // address of the same-named entrypoint in the generated interpreter code.
 121     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 122     jmp(rax);
 123     bind(L);
 124   }
 125 }
 126 
 127 
 128 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 129   movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 130   const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
 131   const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
 132   const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
 133   switch (state) {
 134     case atos: movptr(rax, oop_addr);
 135                movptr(oop_addr, (int32_t)NULL_WORD);
 136                verify_oop(rax, state);              break;
 137     case ltos: movptr(rax, val_addr);                 break;
 138     case btos:                                   // fall through
 139     case ctos:                                   // fall through
 140     case stos:                                   // fall through
 141     case itos: movl(rax, val_addr);                 break;
 142     case ftos: movflt(xmm0, val_addr);              break;
 143     case dtos: movdbl(xmm0, val_addr);              break;
 144     case vtos: /* nothing to do */                  break;
 145     default  : ShouldNotReachHere();
 146   }
 147   // Clean up tos value in the thread object
 148   movl(tos_addr,  (int) ilgl);
 149   movl(val_addr,  (int32_t) NULL_WORD);
 150 }
 151 
 152 
 153 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 154   if (JvmtiExport::can_force_early_return()) {
 155     Label L;
 156     movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 157     testptr(c_rarg0, c_rarg0);
 158     jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
 159 
 160     // Initiate earlyret handling only if it is not already being processed.
 161     // If the flag has the earlyret_processing bit set, it means that this code
 162     // is called *during* earlyret handling - we don't want to reenter.
 163     movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_state_offset()));
 164     cmpl(c_rarg0, JvmtiThreadState::earlyret_pending);
 165     jcc(Assembler::notEqual, L);
 166 
 167     // Call Interpreter::remove_activation_early_entry() to get the address of the
 168     // same-named entrypoint in the generated interpreter code.
 169     movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 170     movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset()));
 171     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0);
 172     jmp(rax);
 173     bind(L);
 174   }
 175 }
 176 
 177 
 178 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
 179   Register reg,
 180   int bcp_offset) {
 181   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 182   movl(reg, Address(r13, bcp_offset));
 183   bswapl(reg);
 184   shrl(reg, 16);
 185 }
 186 
 187 
 188 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
 189                                                            Register index,
 190                                                            int bcp_offset) {
 191   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 192   assert(cache != index, "must use different registers");
 193   load_unsigned_word(index, Address(r13, bcp_offset));
 194   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
 195   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 196   // convert from field index to ConstantPoolCacheEntry index
 197   shll(index, 2);
 198 }
 199 
 200 
 201 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 202                                                                Register tmp,
 203                                                                int bcp_offset) {
 204   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 205   assert(cache != tmp, "must use different register");
 206   load_unsigned_word(tmp, Address(r13, bcp_offset));
 207   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 208   // convert from field index to ConstantPoolCacheEntry index
 209   // and from word offset to byte offset
 210   shll(tmp, 2 + LogBytesPerWord);
 211   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
 212   // skip past the header
 213   addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
 214   addptr(cache, tmp);  // construct pointer to cache entry
 215 }
 216 
 217 
 218 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 219 // subtype of super_klass.
 220 //
 221 // Args:
 222 //      rax: superklass
 223 //      Rsub_klass: subklass
 224 //
 225 // Kills:
 226 //      rcx, rdi
 227 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 228                                                   Label& ok_is_subtype) {
 229   assert(Rsub_klass != rax, "rax holds superklass");
 230   assert(Rsub_klass != r14, "r14 holds locals");
 231   assert(Rsub_klass != r13, "r13 holds bcp");
 232   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 233   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 234 
 235   Label not_subtype, not_subtype_pop, loop;
 236 
 237   // Profile the not-null value's klass.
 238   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi
 239 
 240   // Load the super-klass's check offset into rcx
 241   movl(rcx, Address(rax, sizeof(oopDesc) +
 242                     Klass::super_check_offset_offset_in_bytes()));
 243   // Load from the sub-klass's super-class display list, or a 1-word
 244   // cache of the secondary superclass list, or a failing value with a
 245   // sentinel offset if the super-klass is an interface or
 246   // exceptionally deep in the Java hierarchy and we have to scan the
 247   // secondary superclass list the hard way.  See if we get an
 248   // immediate positive hit
 249   cmpptr(rax, Address(Rsub_klass, rcx, Address::times_1));
 250   jcc(Assembler::equal,ok_is_subtype);
 251 
 252   // Check for immediate negative hit
 253   cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
 254   jcc( Assembler::notEqual, not_subtype );
 255   // Check for self
 256   cmpptr(Rsub_klass, rax);
 257   jcc(Assembler::equal, ok_is_subtype);
 258 
 259   // Now do a linear scan of the secondary super-klass chain.
 260   movptr(rdi, Address(Rsub_klass, sizeof(oopDesc) +
 261                       Klass::secondary_supers_offset_in_bytes()));
 262   // rdi holds the objArrayOop of secondary supers.
 263   // Load the array length
 264   movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
 265   // Skip to start of data; also clear Z flag incase rcx is zero
 266   addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
 267   // Scan rcx words at [rdi] for occurance of rax
 268   // Set NZ/Z based on last compare
 269 
 270   // this part is kind tricky, as values in supers array could be 32 or 64 bit wide
 271   // and we store values in objArrays always encoded, thus we need to encode value
 272   // before repne
 273   if (UseCompressedOops) {
 274     push(rax);
 275     encode_heap_oop(rax);
 276     repne_scanl();
 277     // Not equal?
 278     jcc(Assembler::notEqual, not_subtype_pop);
 279     // restore heap oop here for movq
 280     pop(rax);
 281   } else {
 282     repne_scan();
 283     jcc(Assembler::notEqual, not_subtype);
 284   }
 285   // Must be equal but missed in cache.  Update cache.
 286   movptr(Address(Rsub_klass, sizeof(oopDesc) +
 287                Klass::secondary_super_cache_offset_in_bytes()), rax);
 288   jmp(ok_is_subtype);
 289 
 290   bind(not_subtype_pop);
 291   // restore heap oop here for miss
 292   if (UseCompressedOops) pop(rax);
 293   bind(not_subtype);
 294   profile_typecheck_failed(rcx); // blows rcx
 295 }
 296 
 297 
 298 
 299 // Java Expression Stack
 300 
 301 #ifdef ASSERT
 302 // Verifies that the stack tag matches.  Must be called before the stack
 303 // value is popped off the stack.
 304 void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
 305   if (TaggedStackInterpreter) {
 306     frame::Tag tag = t;
 307     if (t == frame::TagCategory2) {
 308       tag = frame::TagValue;
 309       Label hokay;
 310       cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
 311       jcc(Assembler::equal, hokay);
 312       stop("Java Expression stack tag high value is bad");
 313       bind(hokay);
 314     }
 315     Label okay;
 316     cmpptr(Address(rsp, wordSize), (int32_t)tag);
 317     jcc(Assembler::equal, okay);
 318     // Also compare if the stack value is zero, then the tag might
 319     // not have been set coming from deopt.
 320     cmpptr(Address(rsp, 0), 0);
 321     jcc(Assembler::equal, okay);
 322     stop("Java Expression stack tag value is bad");
 323     bind(okay);
 324   }
 325 }
 326 #endif // ASSERT
 327 
 328 void InterpreterMacroAssembler::pop_ptr(Register r) {
 329   debug_only(verify_stack_tag(frame::TagReference));
 330   pop(r);
 331   if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 332 }
 333 
 334 void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
 335   pop(r);
 336   if (TaggedStackInterpreter) pop(tag);
 337 }
 338 
 339 void InterpreterMacroAssembler::pop_i(Register r) {
 340   // XXX can't use pop currently, upper half non clean
 341   debug_only(verify_stack_tag(frame::TagValue));
 342   movl(r, Address(rsp, 0));
 343   addptr(rsp, wordSize);
 344   if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 345 }
 346 
 347 void InterpreterMacroAssembler::pop_l(Register r) {
 348   debug_only(verify_stack_tag(frame::TagCategory2));
 349   movq(r, Address(rsp, 0));
 350   addptr(rsp, 2 * Interpreter::stackElementSize());
 351 }
 352 
 353 void InterpreterMacroAssembler::pop_f(XMMRegister r) {
 354   debug_only(verify_stack_tag(frame::TagValue));
 355   movflt(r, Address(rsp, 0));
 356   addptr(rsp, wordSize);
 357   if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 358 }
 359 
 360 void InterpreterMacroAssembler::pop_d(XMMRegister r) {
 361   debug_only(verify_stack_tag(frame::TagCategory2));
 362   movdbl(r, Address(rsp, 0));
 363   addptr(rsp, 2 * Interpreter::stackElementSize());
 364 }
 365 
 366 void InterpreterMacroAssembler::push_ptr(Register r) {
 367   if (TaggedStackInterpreter) push(frame::TagReference);
 368   push(r);
 369 }
 370 
 371 void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
 372   if (TaggedStackInterpreter) push(tag);
 373   push(r);
 374 }
 375 
 376 void InterpreterMacroAssembler::push_i(Register r) {
 377   if (TaggedStackInterpreter) push(frame::TagValue);
 378   push(r);
 379 }
 380 
 381 void InterpreterMacroAssembler::push_l(Register r) {
 382   if (TaggedStackInterpreter) {
 383     push(frame::TagValue);
 384     subptr(rsp, 1 * wordSize);
 385     push(frame::TagValue);
 386     subptr(rsp, 1 * wordSize);
 387   } else {
 388     subptr(rsp, 2 * wordSize);
 389   }
 390   movq(Address(rsp, 0), r);
 391 }
 392 
 393 void InterpreterMacroAssembler::push_f(XMMRegister r) {
 394   if (TaggedStackInterpreter) push(frame::TagValue);
 395   subptr(rsp, wordSize);
 396   movflt(Address(rsp, 0), r);
 397 }
 398 
 399 void InterpreterMacroAssembler::push_d(XMMRegister r) {
 400   if (TaggedStackInterpreter) {
 401     push(frame::TagValue);
 402     subptr(rsp, 1 * wordSize);
 403     push(frame::TagValue);
 404     subptr(rsp, 1 * wordSize);
 405   } else {
 406     subptr(rsp, 2 * wordSize);
 407   }
 408   movdbl(Address(rsp, 0), r);
 409 }
 410 
 411 void InterpreterMacroAssembler::pop(TosState state) {
 412   switch (state) {
 413   case atos: pop_ptr();                 break;
 414   case btos:
 415   case ctos:
 416   case stos:
 417   case itos: pop_i();                   break;
 418   case ltos: pop_l();                   break;
 419   case ftos: pop_f();                   break;
 420   case dtos: pop_d();                   break;
 421   case vtos: /* nothing to do */        break;
 422   default:   ShouldNotReachHere();
 423   }
 424   verify_oop(rax, state);
 425 }
 426 
 427 void InterpreterMacroAssembler::push(TosState state) {
 428   verify_oop(rax, state);
 429   switch (state) {
 430   case atos: push_ptr();                break;
 431   case btos:
 432   case ctos:
 433   case stos:
 434   case itos: push_i();                  break;
 435   case ltos: push_l();                  break;
 436   case ftos: push_f();                  break;
 437   case dtos: push_d();                  break;
 438   case vtos: /* nothing to do */        break;
 439   default  : ShouldNotReachHere();
 440   }
 441 }
 442 
 443 
 444 
 445 
 446 // Tagged stack helpers for swap and dup
 447 void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
 448                                                  Register tag) {
 449   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
 450   if (TaggedStackInterpreter) {
 451     movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
 452   }
 453 }
 454 
 455 void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
 456                                                   Register tag) {
 457   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 458   if (TaggedStackInterpreter) {
 459     movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
 460   }
 461 }
 462 
 463 
 464 // Tagged local support
 465 void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
 466   if (TaggedStackInterpreter) {
 467     if (tag == frame::TagCategory2) {
 468       movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
 469            (int32_t)frame::TagValue);
 470       movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
 471            (int32_t)frame::TagValue);
 472     } else {
 473       movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
 474     }
 475   }
 476 }
 477 
 478 void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
 479   if (TaggedStackInterpreter) {
 480     if (tag == frame::TagCategory2) {
 481       movptr(Address(r14, idx, Address::times_8,
 482                   Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
 483       movptr(Address(r14, idx, Address::times_8,
 484                   Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
 485     } else {
 486       movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
 487            (int32_t)tag);
 488     }
 489   }
 490 }
 491 
 492 void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
 493   if (TaggedStackInterpreter) {
 494     // can only be TagValue or TagReference
 495     movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
 496   }
 497 }
 498 
 499 
 500 void InterpreterMacroAssembler::tag_local(Register tag, int n) {
 501   if (TaggedStackInterpreter) {
 502     // can only be TagValue or TagReference
 503     movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
 504   }
 505 }
 506 
 507 #ifdef ASSERT
 508 void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
 509   if (TaggedStackInterpreter) {
 510      frame::Tag t = tag;
 511     if (tag == frame::TagCategory2) {
 512       Label nbl;
 513       t = frame::TagValue;  // change to what is stored in locals
 514       cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
 515       jcc(Assembler::equal, nbl);
 516       stop("Local tag is bad for long/double");
 517       bind(nbl);
 518     }
 519     Label notBad;
 520     cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
 521     jcc(Assembler::equal, notBad);
 522     // Also compare if the local value is zero, then the tag might
 523     // not have been set coming from deopt.
 524     cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
 525     jcc(Assembler::equal, notBad);
 526     stop("Local tag is bad");
 527     bind(notBad);
 528   }
 529 }
 530 
 531 void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
 532   if (TaggedStackInterpreter) {
 533     frame::Tag t = tag;
 534     if (tag == frame::TagCategory2) {
 535       Label nbl;
 536       t = frame::TagValue;  // change to what is stored in locals
 537       cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
 538       jcc(Assembler::equal, nbl);
 539       stop("Local tag is bad for long/double");
 540       bind(nbl);
 541     }
 542     Label notBad;
 543     cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
 544     jcc(Assembler::equal, notBad);
 545     // Also compare if the local value is zero, then the tag might
 546     // not have been set coming from deopt.
 547     cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
 548     jcc(Assembler::equal, notBad);
 549     stop("Local tag is bad");
 550     bind(notBad);
 551   }
 552 }
 553 #endif // ASSERT
 554 
 555 
 556 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
 557   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 558 }
 559 
 560 
 561 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
 562                                                    Register arg_1) {
 563   if (c_rarg0 != arg_1) {
 564     mov(c_rarg0, arg_1);
 565   }
 566   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 567 }
 568 
 569 
 570 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
 571                                                    Register arg_1,
 572                                                    Register arg_2) {
 573   assert(c_rarg0 != arg_2, "smashed argument");
 574   assert(c_rarg1 != arg_1, "smashed argument");
 575   if (c_rarg0 != arg_1) {
 576     mov(c_rarg0, arg_1);
 577   }
 578   if (c_rarg1 != arg_2) {
 579     mov(c_rarg1, arg_2);
 580   }
 581   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 582 }
 583 
 584 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
 585                                                    Register arg_1,
 586                                                    Register arg_2,
 587                                                    Register arg_3) {
 588   assert(c_rarg0 != arg_2, "smashed argument");
 589   assert(c_rarg0 != arg_3, "smashed argument");
 590   assert(c_rarg1 != arg_1, "smashed argument");
 591   assert(c_rarg1 != arg_3, "smashed argument");
 592   assert(c_rarg2 != arg_1, "smashed argument");
 593   assert(c_rarg2 != arg_2, "smashed argument");
 594   if (c_rarg0 != arg_1) {
 595     mov(c_rarg0, arg_1);
 596   }
 597   if (c_rarg1 != arg_2) {
 598     mov(c_rarg1, arg_2);
 599   }
 600   if (c_rarg2 != arg_3) {
 601     mov(c_rarg2, arg_3);
 602   }
 603   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 604 }
 605 
 606 // Jump to from_interpreted entry of a call unless single stepping is possible
 607 // in this thread in which case we must call the i2i entry
 608 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 609   // set sender sp
 610   lea(r13, Address(rsp, wordSize));
 611   // record last_sp
 612   movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13);
 613 
 614   if (JvmtiExport::can_post_interpreter_events()) {
 615     Label run_compiled_code;
 616     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 617     // compiled code in threads for which the event is enabled.  Check here for
 618     // interp_only_mode if these events CAN be enabled.
 619     get_thread(temp);
 620     // interp_only is an int, on little endian it is sufficient to test the byte only
 621     // Is a cmpl faster (ce
 622     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
 623     jcc(Assembler::zero, run_compiled_code);
 624     jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
 625     bind(run_compiled_code);
 626   }
 627 
 628   jmp(Address(method, methodOopDesc::from_interpreted_offset()));
 629 
 630 }
 631 
 632 
 633 // The following two routines provide a hook so that an implementation
 634 // can schedule the dispatch in two parts.  amd64 does not do this.
 635 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 636   // Nothing amd64 specific to be done here
 637 }
 638 
 639 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 640   dispatch_next(state, step);
 641 }
 642 
 643 void InterpreterMacroAssembler::dispatch_base(TosState state,
 644                                               address* table,
 645                                               bool verifyoop) {
 646   verify_FPU(1, state);
 647   if (VerifyActivationFrameSize) {
 648     Label L;
 649     mov(rcx, rbp);
 650     subptr(rcx, rsp);
 651     int32_t min_frame_size =
 652       (frame::link_offset - frame::interpreter_frame_initial_sp_offset) *
 653       wordSize;
 654     cmpptr(rcx, (int32_t)min_frame_size);
 655     jcc(Assembler::greaterEqual, L);
 656     stop("broken stack frame");
 657     bind(L);
 658   }
 659   if (verifyoop) {
 660     verify_oop(rax, state);
 661   }
 662   lea(rscratch1, ExternalAddress((address)table));
 663   jmp(Address(rscratch1, rbx, Address::times_8));
 664 }
 665 
 666 void InterpreterMacroAssembler::dispatch_only(TosState state) {
 667   dispatch_base(state, Interpreter::dispatch_table(state));
 668 }
 669 
 670 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 671   dispatch_base(state, Interpreter::normal_table(state));
 672 }
 673 
 674 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 675   dispatch_base(state, Interpreter::normal_table(state), false);
 676 }
 677 
 678 
 679 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
 680   // load next bytecode (load before advancing r13 to prevent AGI)
 681   load_unsigned_byte(rbx, Address(r13, step));
 682   // advance r13
 683   increment(r13, step);
 684   dispatch_base(state, Interpreter::dispatch_table(state));
 685 }
 686 
 687 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 688   // load current bytecode
 689   load_unsigned_byte(rbx, Address(r13, 0));
 690   dispatch_base(state, table);
 691 }
 692 
 693 // remove activation
 694 //
 695 // Unlock the receiver if this is a synchronized method.
 696 // Unlock any Java monitors from syncronized blocks.
 697 // Remove the activation from the stack.
 698 //
 699 // If there are locked Java monitors
 700 //    If throw_monitor_exception
 701 //       throws IllegalMonitorStateException
 702 //    Else if install_monitor_exception
 703 //       installs IllegalMonitorStateException
 704 //    Else
 705 //       no error processing
 706 void InterpreterMacroAssembler::remove_activation(
 707         TosState state,
 708         Register ret_addr,
 709         bool throw_monitor_exception,
 710         bool install_monitor_exception,
 711         bool notify_jvmdi) {
 712   // Note: Registers rdx xmm0 may be in use for the
 713   // result check if synchronized method
 714   Label unlocked, unlock, no_unlock;
 715 
 716   // get the value of _do_not_unlock_if_synchronized into rdx
 717   const Address do_not_unlock_if_synchronized(r15_thread,
 718     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 719   movbool(rdx, do_not_unlock_if_synchronized);
 720   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 721 
 722  // get method access flags
 723   movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 724   movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
 725   testl(rcx, JVM_ACC_SYNCHRONIZED);
 726   jcc(Assembler::zero, unlocked);
 727 
 728   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 729   // is set.
 730   testbool(rdx);
 731   jcc(Assembler::notZero, no_unlock);
 732 
 733   // unlock monitor
 734   push(state); // save result
 735 
 736   // BasicObjectLock will be first in list, since this is a
 737   // synchronized method. However, need to check that the object has
 738   // not been unlocked by an explicit monitorexit bytecode.
 739   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
 740                         wordSize - (int) sizeof(BasicObjectLock));
 741   // We use c_rarg1 so that if we go slow path it will be the correct
 742   // register for unlock_object to pass to VM directly
 743   lea(c_rarg1, monitor); // address of first monitor
 744 
 745   movptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
 746   testptr(rax, rax);
 747   jcc(Assembler::notZero, unlock);
 748 
 749   pop(state);
 750   if (throw_monitor_exception) {
 751     // Entry already unlocked, need to throw exception
 752     call_VM(noreg, CAST_FROM_FN_PTR(address,
 753                    InterpreterRuntime::throw_illegal_monitor_state_exception));
 754     should_not_reach_here();
 755   } else {
 756     // Monitor already unlocked during a stack unroll. If requested,
 757     // install an illegal_monitor_state_exception.  Continue with
 758     // stack unrolling.
 759     if (install_monitor_exception) {
 760       call_VM(noreg, CAST_FROM_FN_PTR(address,
 761                      InterpreterRuntime::new_illegal_monitor_state_exception));
 762     }
 763     jmp(unlocked);
 764   }
 765 
 766   bind(unlock);


 768   pop(state);
 769 
 770   // Check that for block-structured locking (i.e., that all locked
 771   // objects has been unlocked)
 772   bind(unlocked);
 773 
 774   // rax: Might contain return value
 775 
 776   // Check that all monitors are unlocked
 777   {
 778     Label loop, exception, entry, restart;
 779     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 780     const Address monitor_block_top(
 781         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 782     const Address monitor_block_bot(
 783         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
 784 
 785     bind(restart);
 786     // We use c_rarg1 so that if we go slow path it will be the correct
 787     // register for unlock_object to pass to VM directly
 788     movptr(c_rarg1, monitor_block_top); // points to current entry, starting
 789                                   // with top-most entry
 790     lea(rbx, monitor_block_bot);  // points to word before bottom of
 791                                   // monitor block
 792     jmp(entry);
 793 
 794     // Entry already locked, need to throw exception
 795     bind(exception);
 796 
 797     if (throw_monitor_exception) {
 798       // Throw exception
 799       MacroAssembler::call_VM(noreg,
 800                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 801                                    throw_illegal_monitor_state_exception));
 802       should_not_reach_here();
 803     } else {
 804       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 805       // Unlock does not block, so don't have to worry about the frame.
 806       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 807 
 808       push(state);
 809       unlock_object(c_rarg1);
 810       pop(state);
 811 
 812       if (install_monitor_exception) {
 813         call_VM(noreg, CAST_FROM_FN_PTR(address,
 814                                         InterpreterRuntime::
 815                                         new_illegal_monitor_state_exception));
 816       }
 817 
 818       jmp(restart);
 819     }
 820 
 821     bind(loop);
 822     // check if current entry is used
 823     cmpptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
 824     jcc(Assembler::notEqual, exception);
 825 
 826     addptr(c_rarg1, entry_size); // otherwise advance to next entry
 827     bind(entry);
 828     cmpptr(c_rarg1, rbx); // check if bottom reached
 829     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
 830   }
 831 
 832   bind(no_unlock);
 833 
 834   // jvmti support
 835   if (notify_jvmdi) {
 836     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 837   } else {
 838     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 839   }
 840 
 841   // remove activation
 842   // get sender sp
 843   movptr(rbx,
 844          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
 845   leave();                           // remove frame anchor
 846   pop(ret_addr);                     // get return address
 847   mov(rsp, rbx);                     // set sp to sender sp
 848 }
 849 
 850 #endif // C_INTERP
 851 
 852 // Lock object
 853 //
 854 // Args:
 855 //      c_rarg1: BasicObjectLock to be used for locking
 856 //
 857 // Kills:
 858 //      rax
 859 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 860 //      rscratch1, rscratch2 (scratch regs)
 861 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
 862   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 863 
 864   if (UseHeavyMonitors) {
 865     call_VM(noreg,
 866             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 867             lock_reg);
 868   } else {
 869     Label done;
 870 
 871     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
 872     const Register obj_reg = c_rarg3; // Will contain the oop
 873 
 874     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 875     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 876     const int mark_offset = lock_offset +
 877                             BasicLock::displaced_header_offset_in_bytes();
 878 
 879     Label slow_case;
 880 
 881     // Load object pointer into obj_reg %c_rarg3
 882     movptr(obj_reg, Address(lock_reg, obj_offset));
 883 
 884     if (UseBiasedLocking) {
 885       biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case);
 886     }
 887 
 888     // Load immediate 1 into swap_reg %rax
 889     movl(swap_reg, 1);
 890 
 891     // Load (object->mark() | 1) into swap_reg %rax
 892     orptr(swap_reg, Address(obj_reg, 0));
 893 
 894     // Save (object->mark() | 1) into BasicLock's displaced header
 895     movptr(Address(lock_reg, mark_offset), swap_reg);
 896 
 897     assert(lock_offset == 0,
 898            "displached header must be first word in BasicObjectLock");
 899 
 900     if (os::is_MP()) lock();
 901     cmpxchgptr(lock_reg, Address(obj_reg, 0));
 902     if (PrintBiasedLockingStatistics) {
 903       cond_inc32(Assembler::zero,
 904                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
 905     }
 906     jcc(Assembler::zero, done);
 907 
 908     // Test if the oopMark is an obvious stack pointer, i.e.,
 909     //  1) (mark & 7) == 0, and
 910     //  2) rsp <= mark < mark + os::pagesize()
 911     //
 912     // These 3 tests can be done by evaluating the following
 913     // expression: ((mark - rsp) & (7 - os::vm_page_size())),
 914     // assuming both stack pointer and pagesize have their
 915     // least significant 3 bits clear.
 916     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
 917     subptr(swap_reg, rsp);
 918     andptr(swap_reg, 7 - os::vm_page_size());
 919 
 920     // Save the test result, for recursive case, the result is zero
 921     movptr(Address(lock_reg, mark_offset), swap_reg);
 922 
 923     if (PrintBiasedLockingStatistics) {
 924       cond_inc32(Assembler::zero,
 925                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
 926     }
 927     jcc(Assembler::zero, done);
 928 
 929     bind(slow_case);
 930 
 931     // Call the runtime routine for slow case
 932     call_VM(noreg,
 933             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 934             lock_reg);
 935 
 936     bind(done);
 937   }
 938 }
 939 
 940 
 941 // Unlocks an object. Used in monitorexit bytecode and


 950 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 951 //      rscratch1, rscratch2 (scratch regs)
 952 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
 953   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 954 
 955   if (UseHeavyMonitors) {
 956     call_VM(noreg,
 957             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
 958             lock_reg);
 959   } else {
 960     Label done;
 961 
 962     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
 963     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 964     const Register obj_reg    = c_rarg3;  // Will contain the oop
 965 
 966     save_bcp(); // Save in case of exception
 967 
 968     // Convert from BasicObjectLock structure to object and BasicLock
 969     // structure Store the BasicLock address into %rax
 970     lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
 971 
 972     // Load oop into obj_reg(%c_rarg3)
 973     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 974 
 975     // Free entry
 976     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
 977 
 978     if (UseBiasedLocking) {
 979       biased_locking_exit(obj_reg, header_reg, done);
 980     }
 981 
 982     // Load the old header from BasicLock structure
 983     movptr(header_reg, Address(swap_reg,
 984                                BasicLock::displaced_header_offset_in_bytes()));
 985 
 986     // Test for recursion
 987     testptr(header_reg, header_reg);
 988 
 989     // zero for recursive case
 990     jcc(Assembler::zero, done);
 991 
 992     // Atomic swap back the old header
 993     if (os::is_MP()) lock();
 994     cmpxchgptr(header_reg, Address(obj_reg, 0));
 995 
 996     // zero for recursive case
 997     jcc(Assembler::zero, done);
 998 
 999     // Call the runtime routine for slow case.
1000     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()),
1001          obj_reg); // restore obj
1002     call_VM(noreg,
1003             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
1004             lock_reg);
1005 
1006     bind(done);
1007 
1008     restore_bcp();
1009   }
1010 }
1011 
1012 #ifndef CC_INTERP
1013 
1014 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1015                                                          Label& zero_continue) {
1016   assert(ProfileInterpreter, "must be profiling interpreter");
1017   movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
1018   testptr(mdp, mdp);
1019   jcc(Assembler::zero, zero_continue);
1020 }
1021 
1022 
1023 // Set the method data pointer for the current bcp.
1024 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1025   assert(ProfileInterpreter, "must be profiling interpreter");
1026   Label zero_continue;
1027   push(rax);
1028   push(rbx);
1029 
1030   get_method(rbx);
1031   // Test MDO to avoid the call if it is NULL.
1032   movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
1033   testptr(rax, rax);
1034   jcc(Assembler::zero, zero_continue);
1035 
1036   // rbx: method
1037   // r13: bcp
1038   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
1039   // rax: mdi
1040 
1041   movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
1042   testptr(rbx, rbx);
1043   jcc(Assembler::zero, zero_continue);
1044   addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
1045   addptr(rbx, rax);
1046   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
1047 
1048   bind(zero_continue);
1049   pop(rbx);
1050   pop(rax);
1051 }
1052 
1053 void InterpreterMacroAssembler::verify_method_data_pointer() {
1054   assert(ProfileInterpreter, "must be profiling interpreter");
1055 #ifdef ASSERT
1056   Label verify_continue;
1057   push(rax);
1058   push(rbx);
1059   push(c_rarg3);
1060   push(c_rarg2);
1061   test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue
1062   get_method(rbx);
1063 
1064   // If the mdp is valid, it will point to a DataLayout header which is
1065   // consistent with the bcp.  The converse is highly probable also.
1066   load_unsigned_word(c_rarg2,
1067                      Address(c_rarg3, in_bytes(DataLayout::bci_offset())));
1068   addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
1069   lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
1070   cmpptr(c_rarg2, r13);
1071   jcc(Assembler::equal, verify_continue);
1072   // rbx: method
1073   // r13: bcp
1074   // c_rarg3: mdp
1075   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
1076                rbx, r13, c_rarg3);
1077   bind(verify_continue);
1078   pop(c_rarg2);
1079   pop(c_rarg3);
1080   pop(rbx);
1081   pop(rax);
1082 #endif // ASSERT
1083 }
1084 
1085 
1086 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
1087                                                 int constant,
1088                                                 Register value) {
1089   assert(ProfileInterpreter, "must be profiling interpreter");
1090   Address data(mdp_in, constant);
1091   movptr(data, value);
1092 }
1093 
1094 
1095 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1096                                                       int constant,
1097                                                       bool decrement) {
1098   // Counter address
1099   Address data(mdp_in, constant);
1100 
1101   increment_mdp_data_at(data, decrement);
1102 }
1103 
1104 void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1105                                                       bool decrement) {
1106   assert(ProfileInterpreter, "must be profiling interpreter");
1107   // %%% this does 64bit counters at best it is wasting space
1108   // at worst it is a rare bug when counters overflow
1109 
1110   if (decrement) {
1111     // Decrement the register.  Set condition codes.
1112     addptr(data, (int32_t) -DataLayout::counter_increment);
1113     // If the decrement causes the counter to overflow, stay negative
1114     Label L;
1115     jcc(Assembler::negative, L);
1116     addptr(data, (int32_t) DataLayout::counter_increment);
1117     bind(L);
1118   } else {
1119     assert(DataLayout::counter_increment == 1,
1120            "flow-free idiom only works with 1");
1121     // Increment the register.  Set carry flag.
1122     addptr(data, DataLayout::counter_increment);
1123     // If the increment causes the counter to overflow, pull back by 1.
1124     sbbptr(data, (int32_t)0);
1125   }
1126 }
1127 
1128 
1129 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1130                                                       Register reg,
1131                                                       int constant,
1132                                                       bool decrement) {
1133   Address data(mdp_in, reg, Address::times_1, constant);
1134 
1135   increment_mdp_data_at(data, decrement);
1136 }
1137 
1138 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1139                                                 int flag_byte_constant) {
1140   assert(ProfileInterpreter, "must be profiling interpreter");
1141   int header_offset = in_bytes(DataLayout::header_offset());
1142   int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);
1143   // Set the flag
1144   orl(Address(mdp_in, header_offset), header_bits);
1145 }
1146 
1147 
1148 
1149 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1150                                                  int offset,
1151                                                  Register value,
1152                                                  Register test_value_out,
1153                                                  Label& not_equal_continue) {
1154   assert(ProfileInterpreter, "must be profiling interpreter");
1155   if (test_value_out == noreg) {
1156     cmpptr(value, Address(mdp_in, offset));
1157   } else {
1158     // Put the test value into a register, so caller can use it:
1159     movptr(test_value_out, Address(mdp_in, offset));
1160     cmpptr(test_value_out, value);
1161   }
1162   jcc(Assembler::notEqual, not_equal_continue);
1163 }
1164 
1165 
1166 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1167                                                      int offset_of_disp) {
1168   assert(ProfileInterpreter, "must be profiling interpreter");
1169   Address disp_address(mdp_in, offset_of_disp);
1170   addptr(mdp_in, disp_address);
1171   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1172 }
1173 
1174 
1175 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1176                                                      Register reg,
1177                                                      int offset_of_disp) {
1178   assert(ProfileInterpreter, "must be profiling interpreter");
1179   Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
1180   addptr(mdp_in, disp_address);
1181   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1182 }
1183 
1184 
1185 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1186                                                        int constant) {
1187   assert(ProfileInterpreter, "must be profiling interpreter");
1188   addptr(mdp_in, constant);
1189   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1190 }
1191 
1192 
1193 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1194   assert(ProfileInterpreter, "must be profiling interpreter");
1195   push(return_bci); // save/restore across call_VM
1196   call_VM(noreg,
1197           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1198           return_bci);
1199   pop(return_bci);
1200 }
1201 
1202 
1203 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1204                                                      Register bumped_count) {
1205   if (ProfileInterpreter) {
1206     Label profile_continue;
1207 
1208     // If no method data exists, go to profile_continue.
1209     // Otherwise, assign to mdp
1210     test_method_data_pointer(mdp, profile_continue);
1211 
1212     // We are taking a branch.  Increment the taken count.
1213     // We inline increment_mdp_data_at to return bumped_count in a register
1214     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1215     Address data(mdp, in_bytes(JumpData::taken_offset()));
1216     movptr(bumped_count, data);
1217     assert(DataLayout::counter_increment == 1,
1218             "flow-free idiom only works with 1");
1219     addptr(bumped_count, DataLayout::counter_increment);
1220     sbbptr(bumped_count, 0);
1221     movptr(data, bumped_count); // Store back out
1222 
1223     // The method data pointer needs to be updated to reflect the new target.
1224     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1225     bind(profile_continue);
1226   }
1227 }
1228 
1229 
1230 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1231   if (ProfileInterpreter) {
1232     Label profile_continue;
1233 
1234     // If no method data exists, go to profile_continue.
1235     test_method_data_pointer(mdp, profile_continue);
1236 
1237     // We are taking a branch.  Increment the not taken count.
1238     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1239 
1240     // The method data pointer needs to be updated to correspond to
1241     // the next bytecode


1329   // Case 3 is handled by a recursive call.
1330   for (int row = start_row; row <= last_row; row++) {
1331     Label next_test;
1332     bool test_for_null_also = (row == start_row);
1333 
1334     // See if the receiver is receiver[n].
1335     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1336     test_mdp_data_at(mdp, recvr_offset, receiver,
1337                      (test_for_null_also ? reg2 : noreg),
1338                      next_test);
1339     // (Reg2 now contains the receiver from the CallData.)
1340 
1341     // The receiver is receiver[n].  Increment count[n].
1342     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1343     increment_mdp_data_at(mdp, count_offset);
1344     jmp(done);
1345     bind(next_test);
1346 
1347     if (test_for_null_also) {
1348       // Failed the equality check on receiver[n]...  Test for null.
1349       testptr(reg2, reg2);
1350       if (start_row == last_row) {
1351         // The only thing left to do is handle the null case.
1352         jcc(Assembler::notZero, done);
1353         break;
1354       }
1355       // Since null is rare, make it be the branch-taken case.
1356       Label found_null;
1357       jcc(Assembler::zero, found_null);
1358 
1359       // Put all the "Case 3" tests here.
1360       record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
1361 
1362       // Found a null.  Keep searching for a matching receiver,
1363       // but remember that this is an empty (unused) slot.
1364       bind(found_null);
1365     }
1366   }
1367 
1368   // In the fall-through case, we found no matching receiver, but we
1369   // observed the receiver[start_row] is NULL.


1525                          in_bytes(MultiBranchData::
1526                                   default_displacement_offset()));
1527 
1528     bind(profile_continue);
1529   }
1530 }
1531 
1532 
1533 void InterpreterMacroAssembler::profile_switch_case(Register index,
1534                                                     Register mdp,
1535                                                     Register reg2) {
1536   if (ProfileInterpreter) {
1537     Label profile_continue;
1538 
1539     // If no method data exists, go to profile_continue.
1540     test_method_data_pointer(mdp, profile_continue);
1541 
1542     // Build the base (index * per_case_size_in_bytes()) +
1543     // case_array_offset_in_bytes()
1544     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1545     imulptr(index, reg2); // XXX l ?
1546     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1547 
1548     // Update the case count
1549     increment_mdp_data_at(mdp,
1550                           index,
1551                           in_bytes(MultiBranchData::relative_count_offset()));
1552 
1553     // The method data pointer needs to be updated.
1554     update_mdp_by_offset(mdp,
1555                          index,
1556                          in_bytes(MultiBranchData::
1557                                   relative_displacement_offset()));
1558 
1559     bind(profile_continue);
1560   }
1561 }
1562 
1563 
1564 
1565 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1566   if (state == atos) {
1567     MacroAssembler::verify_oop(reg);
1568   }
1569 }
1570 
1571 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
1572 }
1573 #endif // !CC_INTERP
1574 
1575 
1576 void InterpreterMacroAssembler::notify_method_entry() {
1577   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1578   // track stack depth.  If it is possible to enter interp_only_mode we add
1579   // the code to check if the event should be sent.
1580   if (JvmtiExport::can_post_interpreter_events()) {
1581     Label L;
1582     movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));
1583     testl(rdx, rdx);
1584     jcc(Assembler::zero, L);
1585     call_VM(noreg, CAST_FROM_FN_PTR(address,
1586                                     InterpreterRuntime::post_method_entry));
1587     bind(L);
1588   }
1589 
1590   {
1591     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1592     get_method(c_rarg1);
1593     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1594                  r15_thread, c_rarg1);
1595   }
1596 
1597   // RedefineClasses() tracing support for obsolete method entry
1598   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1599     get_method(c_rarg1);
1600     call_VM_leaf(
1601       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1602       r15_thread, c_rarg1);
1603   }
1604 }
1605 
1606 
1607 void InterpreterMacroAssembler::notify_method_exit(
1608     TosState state, NotifyMethodExitMode mode) {
1609   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1610   // track stack depth.  If it is possible to enter interp_only_mode we add
1611   // the code to check if the event should be sent.
1612   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1613     Label L;
1614     // Note: frame::interpreter_frame_result has a dependency on how the
1615     // method result is saved across the call to post_method_exit. If this
1616     // is changed then the interpreter_frame_result implementation will
1617     // need to be updated too.
1618 
1619     // For c++ interpreter the result is always stored at a known location in the frame
1620     // template interpreter will leave it on the top of the stack.
1621     NOT_CC_INTERP(push(state);)
1622     movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));
1623     testl(rdx, rdx);
1624     jcc(Assembler::zero, L);
1625     call_VM(noreg,
1626             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1627     bind(L);
1628     NOT_CC_INTERP(pop(state));
1629   }
1630 
1631   {
1632     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1633     NOT_CC_INTERP(push(state));
1634     get_method(c_rarg1);
1635     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1636                  r15_thread, c_rarg1);
1637     NOT_CC_INTERP(pop(state));
1638   }
1639 }