1 /*
   2  * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_methodHandles_x86.cpp.incl"
  27 
  28 #define __ _masm->
  29 
  30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
  31                                                 address interpreted_entry) {
  32   // Just before the actual machine code entry point, allocate space
  33   // for a MethodHandleEntry::Data record, so that we can manage everything
  34   // from one base pointer.
  35   __ align(wordSize);
  36   address target = __ pc() + sizeof(Data);
  37   while (__ pc() < target) {
  38     __ nop();
  39     __ align(wordSize);
  40   }
  41 
  42   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
  43   me->set_end_address(__ pc());         // set a temporary end_address
  44   me->set_from_interpreted_entry(interpreted_entry);
  45   me->set_type_checking_entry(NULL);
  46 
  47   return (address) me;
  48 }
  49 
  50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
  51                                                 address start_addr) {
  52   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
  53   assert(me->end_address() == start_addr, "valid ME");
  54 
  55   // Fill in the real end_address:
  56   __ align(wordSize);
  57   me->set_end_address(__ pc());
  58 
  59   return me;
  60 }
  61 
  62 #ifdef ASSERT
  63 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
  64                            const char* error_message) {
  65   // Verify that argslot lies within (rsp, rbp].
  66   Label L_ok, L_bad;
  67   __ cmpptr(argslot_reg, rbp);
  68   __ jccb(Assembler::above, L_bad);
  69   __ cmpptr(rsp, argslot_reg);
  70   __ jccb(Assembler::below, L_ok);
  71   __ bind(L_bad);
  72   __ stop(error_message);
  73   __ bind(L_ok);
  74 }
  75 #endif
  76 
  77 
  78 // Code generation
  79 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
  80   // rbx: methodOop
  81   // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
  82   // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
  83   // rdx: garbage temp, blown away
  84 
  85   Register rbx_method = rbx;
  86   Register rcx_recv   = rcx;
  87   Register rax_mtype  = rax;
  88   Register rdx_temp   = rdx;
  89 
  90   // emit WrongMethodType path first, to enable jccb back-branch from main path
  91   Label wrong_method_type;
  92   __ bind(wrong_method_type);
  93   __ push(rax_mtype);       // required mtype
  94   __ push(rcx_recv);        // bad mh (1st stacked argument)
  95   __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
  96 
  97   // here's where control starts out:
  98   __ align(CodeEntryAlignment);
  99   address entry_point = __ pc();
 100 
 101   // fetch the MethodType from the method handle into rax (the 'check' register)
 102   {
 103     Register tem = rbx_method;
 104     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
 105       __ movptr(rax_mtype, Address(tem, *pchase));
 106       tem = rax_mtype;          // in case there is another indirection
 107     }
 108   }
 109   Register rbx_temp = rbx_method; // done with incoming methodOop
 110 
 111   // given the MethodType, find out where the MH argument is buried
 112   __ movptr(rdx_temp, Address(rax_mtype,
 113                               __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
 114   __ movl(rdx_temp, Address(rdx_temp,
 115                             __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
 116   __ movptr(rcx_recv, __ argument_address(rdx_temp));
 117 
 118   __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
 119   __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 120 
 121   return entry_point;
 122 }
 123 
 124 // Helper to insert argument slots into the stack.
 125 // arg_slots must be a multiple of stack_move_unit() and <= 0
 126 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
 127                                      RegisterOrConstant arg_slots,
 128                                      int arg_mask,
 129                                      Register rax_argslot,
 130                                      Register rbx_temp, Register rdx_temp) {
 131   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
 132                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 133 
 134 #ifdef ASSERT
 135   verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
 136   if (arg_slots.is_register()) {
 137     Label L_ok, L_bad;
 138     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
 139     __ jccb(Assembler::greater, L_bad);
 140     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
 141     __ jccb(Assembler::zero, L_ok);
 142     __ bind(L_bad);
 143     __ stop("assert arg_slots <= 0 and clear low bits");
 144     __ bind(L_ok);
 145   } else {
 146     assert(arg_slots.as_constant() <= 0, "");
 147     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 148   }
 149 #endif //ASSERT
 150 
 151 #ifdef _LP64
 152   if (arg_slots.is_register()) {
 153     // clean high bits of stack motion register (was loaded as an int)
 154     __ movslq(arg_slots.as_register(), arg_slots.as_register());
 155   }
 156 #endif
 157 
 158   // Make space on the stack for the inserted argument(s).
 159   // Then pull down everything shallower than rax_argslot.
 160   // The stacked return address gets pulled down with everything else.
 161   // That is, copy [rsp, argslot) downward by -size words.  In pseudo-code:
 162   //   rsp -= size;
 163   //   for (rdx = rsp + size; rdx < argslot; rdx++)
 164   //     rdx[-size] = rdx[0]
 165   //   argslot -= size;
 166   __ mov(rdx_temp, rsp);                        // source pointer for copy
 167   __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
 168   {
 169     Label loop;
 170     __ bind(loop);
 171     // pull one word down each time through the loop
 172     __ movptr(rbx_temp, Address(rdx_temp, 0));
 173     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
 174     __ addptr(rdx_temp, wordSize);
 175     __ cmpptr(rdx_temp, rax_argslot);
 176     __ jccb(Assembler::less, loop);
 177   }
 178 
 179   // Now move the argslot down, to point to the opened-up space.
 180   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
 181 }
 182 
 183 // Helper to remove argument slots from the stack.
 184 // arg_slots must be a multiple of stack_move_unit() and >= 0
 185 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
 186                                     RegisterOrConstant arg_slots,
 187                                     Register rax_argslot,
 188                                     Register rbx_temp, Register rdx_temp) {
 189   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
 190                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 191 
 192 #ifdef ASSERT
 193   // Verify that [argslot..argslot+size) lies within (rsp, rbp).
 194   __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
 195   verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
 196   if (arg_slots.is_register()) {
 197     Label L_ok, L_bad;
 198     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
 199     __ jccb(Assembler::less, L_bad);
 200     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
 201     __ jccb(Assembler::zero, L_ok);
 202     __ bind(L_bad);
 203     __ stop("assert arg_slots >= 0 and clear low bits");
 204     __ bind(L_ok);
 205   } else {
 206     assert(arg_slots.as_constant() >= 0, "");
 207     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 208   }
 209 #endif //ASSERT
 210 
 211 #ifdef _LP64
 212   if (false) {                  // not needed, since register is positive
 213     // clean high bits of stack motion register (was loaded as an int)
 214     if (arg_slots.is_register())
 215       __ movslq(arg_slots.as_register(), arg_slots.as_register());
 216   }
 217 #endif
 218 
 219   // Pull up everything shallower than rax_argslot.
 220   // Then remove the excess space on the stack.
 221   // The stacked return address gets pulled up with everything else.
 222   // That is, copy [rsp, argslot) upward by size words.  In pseudo-code:
 223   //   for (rdx = argslot-1; rdx >= rsp; --rdx)
 224   //     rdx[size] = rdx[0]
 225   //   argslot += size;
 226   //   rsp += size;
 227   __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
 228   {
 229     Label loop;
 230     __ bind(loop);
 231     // pull one word up each time through the loop
 232     __ movptr(rbx_temp, Address(rdx_temp, 0));
 233     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
 234     __ addptr(rdx_temp, -wordSize);
 235     __ cmpptr(rdx_temp, rsp);
 236     __ jccb(Assembler::greaterEqual, loop);
 237   }
 238 
 239   // Now move the argslot up, to point to the just-copied block.
 240   __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
 241   // And adjust the argslot address to point at the deletion point.
 242   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
 243 }
 244 
 245 #ifndef PRODUCT
 246 extern "C" void print_method_handle(oop mh);
 247 void trace_method_handle_stub(const char* adaptername,
 248                               oop mh,
 249                               intptr_t* entry_sp,
 250                               intptr_t* saved_sp,
 251                               intptr_t* saved_bp) {
 252   // called as a leaf from native code: do not block the JVM!
 253   intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
 254   intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
 255   printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
 256          adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
 257   if (last_sp != saved_sp)
 258     printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
 259   if (Verbose)  print_method_handle(mh);
 260 }
 261 #endif //PRODUCT
 262 
 263 // Generate an "entry" field for a method handle.
 264 // This determines how the method handle will respond to calls.
 265 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
 266   // Here is the register state during an interpreted call,
 267   // as set up by generate_method_handle_interpreter_entry():
 268   // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
 269   // - rcx: receiver method handle
 270   // - rax: method handle type (only used by the check_mtype entry point)
 271   // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
 272   // - rdx: garbage temp, can blow away
 273 
 274   Register rcx_recv    = rcx;
 275   Register rax_argslot = rax;
 276   Register rbx_temp    = rbx;
 277   Register rdx_temp    = rdx;
 278 
 279   // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
 280   // and gen_c2i_adapter (from compiled calls):
 281   Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
 282 
 283   guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 284 
 285   // some handy addresses
 286   Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
 287 
 288   Address rcx_mh_vmtarget(    rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
 289   Address rcx_dmh_vmindex(    rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
 290 
 291   Address rcx_bmh_vmargslot(  rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() );
 292   Address rcx_bmh_argument(   rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() );
 293 
 294   Address rcx_amh_vmargslot(  rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() );
 295   Address rcx_amh_argument(   rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() );
 296   Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
 297   Address vmarg;                // __ argument_address(vmargslot)
 298 
 299   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 300 
 301   if (have_entry(ek)) {
 302     __ nop();                   // empty stubs make SG sick
 303     return;
 304   }
 305 
 306   address interp_entry = __ pc();
 307   if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
 308 
 309 #ifndef PRODUCT
 310   if (TraceMethodHandles) {
 311     __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
 312     __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
 313     // arguments:
 314     __ push(rbp);               // interpreter frame pointer
 315     __ push(rsi);               // saved_sp
 316     __ push(rax);               // entry_sp
 317     __ push(rcx);               // mh
 318     __ push(rcx);
 319     __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
 320     __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
 321     __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
 322   }
 323 #endif //PRODUCT
 324 
 325   switch ((int) ek) {
 326   case _raise_exception:
 327     {
 328       // Not a real MH entry, but rather shared code for raising an exception.
 329       // Extra local arguments are pushed on stack, as required type at TOS+8,
 330       // failing object (or NULL) at TOS+4, failing bytecode type at TOS.
 331       // Beyond those local arguments are the PC, of course.
 332       Register rdx_code = rdx_temp;
 333       Register rcx_fail = rcx_recv;
 334       Register rax_want = rax_argslot;
 335       Register rdi_pc   = rdi;
 336       __ pop(rdx_code);  // TOS+0
 337       __ pop(rcx_fail);  // TOS+4
 338       __ pop(rax_want);  // TOS+8
 339       __ pop(rdi_pc);    // caller PC
 340 
 341       __ mov(rsp, rsi);   // cut the stack back to where the caller started
 342 
 343       // Repush the arguments as if coming from the interpreter.
 344       __ push(rdx_code);
 345       __ push(rcx_fail);
 346       __ push(rax_want);
 347 
 348       Register rbx_method = rbx_temp;
 349       Label no_method;
 350       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
 351       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
 352       __ testptr(rbx_method, rbx_method);
 353       __ jccb(Assembler::zero, no_method);
 354       int jobject_oop_offset = 0;
 355       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
 356       __ testptr(rbx_method, rbx_method);
 357       __ jccb(Assembler::zero, no_method);
 358       __ verify_oop(rbx_method);
 359       __ push(rdi_pc);          // and restore caller PC
 360       __ jmp(rbx_method_fie);
 361 
 362       // If we get here, the Java runtime did not do its job of creating the exception.
 363       // Do something that is at least causes a valid throw from the interpreter.
 364       __ bind(no_method);
 365       __ pop(rax_want);
 366       __ pop(rcx_fail);
 367       __ push(rax_want);
 368       __ push(rcx_fail);
 369       __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
 370     }
 371     break;
 372 
 373   case _invokestatic_mh:
 374   case _invokespecial_mh:
 375     {
 376       Register rbx_method = rbx_temp;
 377       __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
 378       __ verify_oop(rbx_method);
 379       // same as TemplateTable::invokestatic or invokespecial,
 380       // minus the CP setup and profiling:
 381       if (ek == _invokespecial_mh) {
 382         // Must load & check the first argument before entering the target method.
 383         __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
 384         __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
 385         __ null_check(rcx_recv);
 386         __ verify_oop(rcx_recv);
 387       }
 388       __ jmp(rbx_method_fie);
 389     }
 390     break;
 391 
 392   case _invokevirtual_mh:
 393     {
 394       // same as TemplateTable::invokevirtual,
 395       // minus the CP setup and profiling:
 396 
 397       // pick out the vtable index and receiver offset from the MH,
 398       // and then we can discard it:
 399       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
 400       Register rbx_index = rbx_temp;
 401       __ movl(rbx_index, rcx_dmh_vmindex);
 402       // Note:  The verifier allows us to ignore rcx_mh_vmtarget.
 403       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
 404       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
 405 
 406       // get receiver klass
 407       Register rax_klass = rax_argslot;
 408       __ load_klass(rax_klass, rcx_recv);
 409       __ verify_oop(rax_klass);
 410 
 411       // get target methodOop & entry point
 412       const int base = instanceKlass::vtable_start_offset() * wordSize;
 413       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 414       Address vtable_entry_addr(rax_klass,
 415                                 rbx_index, Address::times_ptr,
 416                                 base + vtableEntry::method_offset_in_bytes());
 417       Register rbx_method = rbx_temp;
 418       __ movptr(rbx_method, vtable_entry_addr);
 419 
 420       __ verify_oop(rbx_method);
 421       __ jmp(rbx_method_fie);
 422     }
 423     break;
 424 
 425   case _invokeinterface_mh:
 426     {
 427       // same as TemplateTable::invokeinterface,
 428       // minus the CP setup and profiling:
 429 
 430       // pick out the interface and itable index from the MH.
 431       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
 432       Register rdx_intf  = rdx_temp;
 433       Register rbx_index = rbx_temp;
 434       __ movptr(rdx_intf,  rcx_mh_vmtarget);
 435       __ movl(rbx_index,   rcx_dmh_vmindex);
 436       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
 437       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
 438 
 439       // get receiver klass
 440       Register rax_klass = rax_argslot;
 441       __ load_klass(rax_klass, rcx_recv);
 442       __ verify_oop(rax_klass);
 443 
 444       Register rdi_temp   = rdi;
 445       Register rbx_method = rbx_index;
 446 
 447       // get interface klass
 448       Label no_such_interface;
 449       __ verify_oop(rdx_intf);
 450       __ lookup_interface_method(rax_klass, rdx_intf,
 451                                  // note: next two args must be the same:
 452                                  rbx_index, rbx_method,
 453                                  rdi_temp,
 454                                  no_such_interface);
 455 
 456       __ verify_oop(rbx_method);
 457       __ jmp(rbx_method_fie);
 458       __ hlt();
 459 
 460       __ bind(no_such_interface);
 461       // Throw an exception.
 462       // For historical reasons, it will be IncompatibleClassChangeError.
 463       __ pushptr(Address(rdx_intf, java_mirror_offset));  // required interface
 464       __ push(rcx_recv);        // bad receiver
 465       __ push((int)Bytecodes::_invokeinterface);  // who is complaining?
 466       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
 467     }
 468     break;
 469 
 470   case _bound_ref_mh:
 471   case _bound_int_mh:
 472   case _bound_long_mh:
 473   case _bound_ref_direct_mh:
 474   case _bound_int_direct_mh:
 475   case _bound_long_direct_mh:
 476     {
 477       bool direct_to_method = (ek >= _bound_ref_direct_mh);
 478       BasicType arg_type  = T_ILLEGAL;
 479       int       arg_mask  = _INSERT_NO_MASK;
 480       int       arg_slots = -1;
 481       get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
 482 
 483       // make room for the new argument:
 484       __ movl(rax_argslot, rcx_bmh_vmargslot);
 485       __ lea(rax_argslot, __ argument_address(rax_argslot));
 486       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask,
 487                        rax_argslot, rbx_temp, rdx_temp);
 488 
 489       // store bound argument into the new stack slot:
 490       __ movptr(rbx_temp, rcx_bmh_argument);
 491       Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
 492       if (arg_type == T_OBJECT) {
 493         __ movptr(Address(rax_argslot, 0), rbx_temp);
 494       } else {
 495         __ load_sized_value(rdx_temp, prim_value_addr,
 496                             type2aelembytes(arg_type), is_signed_subword_type(arg_type));
 497         __ movptr(Address(rax_argslot, 0), rdx_temp);
 498 #ifndef _LP64
 499         if (arg_slots == 2) {
 500           __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
 501           __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
 502         }
 503 #endif //_LP64
 504       }
 505 
 506       if (direct_to_method) {
 507         Register rbx_method = rbx_temp;
 508         __ movptr(rbx_method, rcx_mh_vmtarget);
 509         __ verify_oop(rbx_method);
 510         __ jmp(rbx_method_fie);
 511       } else {
 512         __ movptr(rcx_recv, rcx_mh_vmtarget);
 513         __ verify_oop(rcx_recv);
 514         __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 515       }
 516     }
 517     break;
 518 
 519   case _adapter_retype_only:
 520   case _adapter_retype_raw:
 521     // immediately jump to the next MH layer:
 522     __ movptr(rcx_recv, rcx_mh_vmtarget);
 523     __ verify_oop(rcx_recv);
 524     __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 525     // This is OK when all parameter types widen.
 526     // It is also OK when a return type narrows.
 527     break;
 528 
 529   case _adapter_check_cast:
 530     {
 531       // temps:
 532       Register rbx_klass = rbx_temp; // interesting AMH data
 533 
 534       // check a reference argument before jumping to the next layer of MH:
 535       __ movl(rax_argslot, rcx_amh_vmargslot);
 536       vmarg = __ argument_address(rax_argslot);
 537 
 538       // What class are we casting to?
 539       __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
 540       __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
 541 
 542       Label done;
 543       __ movptr(rdx_temp, vmarg);
 544       __ testptr(rdx_temp, rdx_temp);
 545       __ jccb(Assembler::zero, done);         // no cast if null
 546       __ load_klass(rdx_temp, rdx_temp);
 547 
 548       // live at this point:
 549       // - rbx_klass:  klass required by the target method
 550       // - rdx_temp:   argument klass to test
 551       // - rcx_recv:   adapter method handle
 552       __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
 553 
 554       // If we get here, the type check failed!
 555       // Call the wrong_method_type stub, passing the failing argument type in rax.
 556       Register rax_mtype = rax_argslot;
 557       __ movl(rax_argslot, rcx_amh_vmargslot);  // reload argslot field
 558       __ movptr(rdx_temp, vmarg);
 559 
 560       __ pushptr(rcx_amh_argument); // required class
 561       __ push(rdx_temp);            // bad object
 562       __ push((int)Bytecodes::_checkcast);  // who is complaining?
 563       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
 564 
 565       __ bind(done);
 566       // get the new MH:
 567       __ movptr(rcx_recv, rcx_mh_vmtarget);
 568       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 569     }
 570     break;
 571 
 572   case _adapter_prim_to_prim:
 573   case _adapter_ref_to_prim:
 574     // handled completely by optimized cases
 575     __ stop("init_AdapterMethodHandle should not issue this");
 576     break;
 577 
 578   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
 579 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
 580   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
 581   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
 582     {
 583       // perform an in-place conversion to int or an int subword
 584       __ movl(rax_argslot, rcx_amh_vmargslot);
 585       vmarg = __ argument_address(rax_argslot);
 586 
 587       switch (ek) {
 588       case _adapter_opt_i2i:
 589         __ movl(rdx_temp, vmarg);
 590         break;
 591       case _adapter_opt_l2i:
 592         {
 593           // just delete the extra slot; on a little-endian machine we keep the first
 594           __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
 595           remove_arg_slots(_masm, -stack_move_unit(),
 596                            rax_argslot, rbx_temp, rdx_temp);
 597           vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
 598           __ movl(rdx_temp, vmarg);
 599         }
 600         break;
 601       case _adapter_opt_unboxi:
 602         {
 603           // Load the value up from the heap.
 604           __ movptr(rdx_temp, vmarg);
 605           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
 606 #ifdef ASSERT
 607           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
 608             if (is_subword_type(BasicType(bt)))
 609               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
 610           }
 611 #endif
 612           __ null_check(rdx_temp, value_offset);
 613           __ movl(rdx_temp, Address(rdx_temp, value_offset));
 614           // We load this as a word.  Because we are little-endian,
 615           // the low bits will be correct, but the high bits may need cleaning.
 616           // The vminfo will guide us to clean those bits.
 617         }
 618         break;
 619       default:
 620         ShouldNotReachHere();
 621       }
 622 
 623       // Do the requested conversion and store the value.
 624       Register rbx_vminfo = rbx_temp;
 625       __ movl(rbx_vminfo, rcx_amh_conversion);
 626       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 627 
 628       // get the new MH:
 629       __ movptr(rcx_recv, rcx_mh_vmtarget);
 630       // (now we are done with the old MH)
 631 
 632       // original 32-bit vmdata word must be of this form:
 633       //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
 634       __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
 635       __ shll(rdx_temp /*, rcx*/);
 636       Label zero_extend, done;
 637       __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
 638       __ jccb(Assembler::zero, zero_extend);
 639 
 640       // this path is taken for int->byte, int->short
 641       __ sarl(rdx_temp /*, rcx*/);
 642       __ jmpb(done);
 643 
 644       __ bind(zero_extend);
 645       // this is taken for int->char
 646       __ shrl(rdx_temp /*, rcx*/);
 647 
 648       __ bind(done);
 649       __ movl(vmarg, rdx_temp);  // Store the value.
 650       __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv
 651 
 652       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 653     }
 654     break;
 655 
 656   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
 657   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
 658     {
 659       // perform an in-place int-to-long or ref-to-long conversion
 660       __ movl(rax_argslot, rcx_amh_vmargslot);
 661 
 662       // on a little-endian machine we keep the first slot and add another after
 663       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
 664       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
 665                        rax_argslot, rbx_temp, rdx_temp);
 666       Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
 667       Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
 668 
 669       switch (ek) {
 670       case _adapter_opt_i2l:
 671         {
 672 #ifdef _LP64
 673           __ movslq(rdx_temp, vmarg1);  // Load sign-extended
 674           __ movq(vmarg1, rdx_temp);    // Store into first slot
 675 #else
 676           __ movl(rdx_temp, vmarg1);
 677           __ sarl(rdx_temp, BitsPerInt - 1);  // __ extend_sign()
 678           __ movl(vmarg2, rdx_temp); // store second word
 679 #endif
 680         }
 681         break;
 682       case _adapter_opt_unboxl:
 683         {
 684           // Load the value up from the heap.
 685           __ movptr(rdx_temp, vmarg1);
 686           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
 687           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
 688           __ null_check(rdx_temp, value_offset);
 689 #ifdef _LP64
 690           __ movq(rbx_temp, Address(rdx_temp, value_offset));
 691           __ movq(vmarg1, rbx_temp);
 692 #else
 693           __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
 694           __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
 695           __ movl(vmarg1, rbx_temp);
 696           __ movl(vmarg2, rdx_temp);
 697 #endif
 698         }
 699         break;
 700       default:
 701         ShouldNotReachHere();
 702       }
 703 
 704       __ movptr(rcx_recv, rcx_mh_vmtarget);
 705       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 706     }
 707     break;
 708 
 709   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
 710   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
 711     {
 712       // perform an in-place floating primitive conversion
 713       __ movl(rax_argslot, rcx_amh_vmargslot);
 714       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
 715       if (ek == _adapter_opt_f2d) {
 716         insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
 717                          rax_argslot, rbx_temp, rdx_temp);
 718       }
 719       Address vmarg(rax_argslot, -Interpreter::stackElementSize());
 720 
 721 #ifdef _LP64
 722       if (ek == _adapter_opt_f2d) {
 723         __ movflt(xmm0, vmarg);
 724         __ cvtss2sd(xmm0, xmm0);
 725         __ movdbl(vmarg, xmm0);
 726       } else {
 727         __ movdbl(xmm0, vmarg);
 728         __ cvtsd2ss(xmm0, xmm0);
 729         __ movflt(vmarg, xmm0);
 730       }
 731 #else //_LP64
 732       if (ek == _adapter_opt_f2d) {
 733         __ fld_s(vmarg);        // load float to ST0
 734         __ fstp_s(vmarg);       // store single
 735       } else {
 736         __ fld_d(vmarg);        // load double to ST0
 737         __ fstp_s(vmarg);       // store single
 738       }
 739 #endif //_LP64
 740 
 741       if (ek == _adapter_opt_d2f) {
 742         remove_arg_slots(_masm, -stack_move_unit(),
 743                          rax_argslot, rbx_temp, rdx_temp);
 744       }
 745 
 746       __ movptr(rcx_recv, rcx_mh_vmtarget);
 747       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 748     }
 749     break;
 750 
 751   case _adapter_prim_to_ref:
 752     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 753     break;
 754 
 755   case _adapter_swap_args:
 756   case _adapter_rot_args:
 757     // handled completely by optimized cases
 758     __ stop("init_AdapterMethodHandle should not issue this");
 759     break;
 760 
 761   case _adapter_opt_swap_1:
 762   case _adapter_opt_swap_2:
 763   case _adapter_opt_rot_1_up:
 764   case _adapter_opt_rot_1_down:
 765   case _adapter_opt_rot_2_up:
 766   case _adapter_opt_rot_2_down:
 767     {
 768       int swap_bytes = 0, rotate = 0;
 769       get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
 770 
 771       // 'argslot' is the position of the first argument to swap
 772       __ movl(rax_argslot, rcx_amh_vmargslot);
 773       __ lea(rax_argslot, __ argument_address(rax_argslot));
 774 
 775       // 'vminfo' is the second
 776       Register rbx_destslot = rbx_temp;
 777       __ movl(rbx_destslot, rcx_amh_conversion);
 778       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 779       __ andl(rbx_destslot, CONV_VMINFO_MASK);
 780       __ lea(rbx_destslot, __ argument_address(rbx_destslot));
 781       DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));
 782 
 783       if (!rotate) {
 784         for (int i = 0; i < swap_bytes; i += wordSize) {
 785           __ movptr(rdx_temp, Address(rax_argslot , i));
 786           __ push(rdx_temp);
 787           __ movptr(rdx_temp, Address(rbx_destslot, i));
 788           __ movptr(Address(rax_argslot, i), rdx_temp);
 789           __ pop(rdx_temp);
 790           __ movptr(Address(rbx_destslot, i), rdx_temp);
 791         }
 792       } else {
 793         // push the first chunk, which is going to get overwritten
 794         for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
 795           __ movptr(rdx_temp, Address(rax_argslot, i));
 796           __ push(rdx_temp);
 797         }
 798 
 799         if (rotate > 0) {
 800           // rotate upward
 801           __ subptr(rax_argslot, swap_bytes);
 802 #ifdef ASSERT
 803           {
 804             // Verify that argslot > destslot, by at least swap_bytes.
 805             Label L_ok;
 806             __ cmpptr(rax_argslot, rbx_destslot);
 807             __ jccb(Assembler::aboveEqual, L_ok);
 808             __ stop("source must be above destination (upward rotation)");
 809             __ bind(L_ok);
 810           }
 811 #endif
 812           // work argslot down to destslot, copying contiguous data upwards
 813           // pseudo-code:
 814           //   rax = src_addr - swap_bytes
 815           //   rbx = dest_addr
 816           //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
 817           Label loop;
 818           __ bind(loop);
 819           __ movptr(rdx_temp, Address(rax_argslot, 0));
 820           __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
 821           __ addptr(rax_argslot, -wordSize);
 822           __ cmpptr(rax_argslot, rbx_destslot);
 823           __ jccb(Assembler::aboveEqual, loop);
 824         } else {
 825           __ addptr(rax_argslot, swap_bytes);
 826 #ifdef ASSERT
 827           {
 828             // Verify that argslot < destslot, by at least swap_bytes.
 829             Label L_ok;
 830             __ cmpptr(rax_argslot, rbx_destslot);
 831             __ jccb(Assembler::belowEqual, L_ok);
 832             __ stop("source must be below destination (downward rotation)");
 833             __ bind(L_ok);
 834           }
 835 #endif
 836           // work argslot up to destslot, copying contiguous data downwards
 837           // pseudo-code:
 838           //   rax = src_addr + swap_bytes
 839           //   rbx = dest_addr
 840           //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
 841           Label loop;
 842           __ bind(loop);
 843           __ movptr(rdx_temp, Address(rax_argslot, 0));
 844           __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
 845           __ addptr(rax_argslot, wordSize);
 846           __ cmpptr(rax_argslot, rbx_destslot);
 847           __ jccb(Assembler::belowEqual, loop);
 848         }
 849 
 850         // pop the original first chunk into the destination slot, now free
 851         for (int i = 0; i < swap_bytes; i += wordSize) {
 852           __ pop(rdx_temp);
 853           __ movptr(Address(rbx_destslot, i), rdx_temp);
 854         }
 855       }
 856 
 857       __ movptr(rcx_recv, rcx_mh_vmtarget);
 858       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 859     }
 860     break;
 861 
 862   case _adapter_dup_args:
 863     {
 864       // 'argslot' is the position of the first argument to duplicate
 865       __ movl(rax_argslot, rcx_amh_vmargslot);
 866       __ lea(rax_argslot, __ argument_address(rax_argslot));
 867 
 868       // 'stack_move' is negative number of words to duplicate
 869       Register rdx_stack_move = rdx_temp;
 870       __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
 871       __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
 872 
 873       int argslot0_num = 0;
 874       Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
 875       assert(argslot0.base() == rsp, "");
 876       int pre_arg_size = argslot0.disp();
 877       assert(pre_arg_size % wordSize == 0, "");
 878       assert(pre_arg_size > 0, "must include PC");
 879 
 880       // remember the old rsp+1 (argslot[0])
 881       Register rbx_oldarg = rbx_temp;
 882       __ lea(rbx_oldarg, argslot0);
 883 
 884       // move rsp down to make room for dups
 885       __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
 886 
 887       // compute the new rsp+1 (argslot[0])
 888       Register rdx_newarg = rdx_temp;
 889       __ lea(rdx_newarg, argslot0);
 890 
 891       __ push(rdi);             // need a temp
 892       // (preceding push must be done after arg addresses are taken!)
 893 
 894       // pull down the pre_arg_size data (PC)
 895       for (int i = -pre_arg_size; i < 0; i += wordSize) {
 896         __ movptr(rdi, Address(rbx_oldarg, i));
 897         __ movptr(Address(rdx_newarg, i), rdi);
 898       }
 899 
 900       // copy from rax_argslot[0...] down to new_rsp[1...]
 901       // pseudo-code:
 902       //   rbx = old_rsp+1
 903       //   rdx = new_rsp+1
 904       //   rax = argslot
 905       //   while (rdx < rbx) *rdx++ = *rax++
 906       Label loop;
 907       __ bind(loop);
 908       __ movptr(rdi, Address(rax_argslot, 0));
 909       __ movptr(Address(rdx_newarg, 0), rdi);
 910       __ addptr(rax_argslot, wordSize);
 911       __ addptr(rdx_newarg, wordSize);
 912       __ cmpptr(rdx_newarg, rbx_oldarg);
 913       __ jccb(Assembler::less, loop);
 914 
 915       __ pop(rdi);              // restore temp
 916 
 917       __ movptr(rcx_recv, rcx_mh_vmtarget);
 918       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 919     }
 920     break;
 921 
 922   case _adapter_drop_args:
 923     {
 924       // 'argslot' is the position of the first argument to nuke
 925       __ movl(rax_argslot, rcx_amh_vmargslot);
 926       __ lea(rax_argslot, __ argument_address(rax_argslot));
 927 
 928       __ push(rdi);             // need a temp
 929       // (must do previous push after argslot address is taken)
 930 
 931       // 'stack_move' is number of words to drop
 932       Register rdi_stack_move = rdi;
 933       __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
 934       __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
 935       remove_arg_slots(_masm, rdi_stack_move,
 936                        rax_argslot, rbx_temp, rdx_temp);
 937 
 938       __ pop(rdi);              // restore temp
 939 
 940       __ movptr(rcx_recv, rcx_mh_vmtarget);
 941       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 942     }
 943     break;
 944 
 945   case _adapter_collect_args:
 946     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 947     break;
 948 
 949   case _adapter_spread_args:
 950     // handled completely by optimized cases
 951     __ stop("init_AdapterMethodHandle should not issue this");
 952     break;
 953 
 954   case _adapter_opt_spread_0:
 955   case _adapter_opt_spread_1:
 956   case _adapter_opt_spread_more:
 957     {
 958       // spread an array out into a group of arguments
 959       int length_constant = get_ek_adapter_opt_spread_info(ek);
 960 
 961       // find the address of the array argument
 962       __ movl(rax_argslot, rcx_amh_vmargslot);
 963       __ lea(rax_argslot, __ argument_address(rax_argslot));
 964 
 965       // grab some temps
 966       { __ push(rsi); __ push(rdi); }
 967       // (preceding pushes must be done after argslot address is taken!)
 968 #define UNPUSH_RSI_RDI \
 969       { __ pop(rdi); __ pop(rsi); }
 970 
 971       // arx_argslot points both to the array and to the first output arg
 972       vmarg = Address(rax_argslot, 0);
 973 
 974       // Get the array value.
 975       Register  rsi_array       = rsi;
 976       Register  rdx_array_klass = rdx_temp;
 977       BasicType elem_type       = T_OBJECT;
 978       int       length_offset   = arrayOopDesc::length_offset_in_bytes();
 979       int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
 980       __ movptr(rsi_array, vmarg);
 981       Label skip_array_check;
 982       if (length_constant == 0) {
 983         __ testptr(rsi_array, rsi_array);
 984         __ jcc(Assembler::zero, skip_array_check);
 985       }
 986       __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
 987       __ load_klass(rdx_array_klass, rsi_array);
 988 
 989       // Check the array type.
 990       Register rbx_klass = rbx_temp;
 991       __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
 992       __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
 993 
 994       Label ok_array_klass, bad_array_klass, bad_array_length;
 995       __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
 996       // If we get here, the type check failed!
 997       __ jmp(bad_array_klass);
 998       __ bind(ok_array_klass);
 999 
1000       // Check length.
1001       if (length_constant >= 0) {
1002         __ cmpl(Address(rsi_array, length_offset), length_constant);
1003       } else {
1004         Register rbx_vminfo = rbx_temp;
1005         __ movl(rbx_vminfo, rcx_amh_conversion);
1006         assert(CONV_VMINFO_SHIFT == 0, "preshifted");
1007         __ andl(rbx_vminfo, CONV_VMINFO_MASK);
1008         __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
1009       }
1010       __ jcc(Assembler::notEqual, bad_array_length);
1011 
1012       Register rdx_argslot_limit = rdx_temp;
1013 
1014       // Array length checks out.  Now insert any required stack slots.
1015       if (length_constant == -1) {
1016         // Form a pointer to the end of the affected region.
1017         __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
1018         // 'stack_move' is negative number of words to insert
1019         Register rdi_stack_move = rdi;
1020         __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
1021         __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
1022         Register rsi_temp = rsi_array;  // spill this
1023         insert_arg_slots(_masm, rdi_stack_move, -1,
1024                          rax_argslot, rbx_temp, rsi_temp);
1025         // reload the array (since rsi was killed)
1026         __ movptr(rsi_array, vmarg);
1027       } else if (length_constant > 1) {
1028         int arg_mask = 0;
1029         int new_slots = (length_constant - 1);
1030         for (int i = 0; i < new_slots; i++) {
1031           arg_mask <<= 1;
1032           arg_mask |= _INSERT_REF_MASK;
1033         }
1034         insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
1035                          rax_argslot, rbx_temp, rdx_temp);
1036       } else if (length_constant == 1) {
1037         // no stack resizing required
1038       } else if (length_constant == 0) {
1039         remove_arg_slots(_masm, -stack_move_unit(),
1040                          rax_argslot, rbx_temp, rdx_temp);
1041       }
1042 
1043       // Copy from the array to the new slots.
1044       // Note: Stack change code preserves integrity of rax_argslot pointer.
1045       // So even after slot insertions, rax_argslot still points to first argument.
1046       if (length_constant == -1) {
1047         // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
1048         Register rsi_source = rsi_array;
1049         __ lea(rsi_source, Address(rsi_array, elem0_offset));
1050         Label loop;
1051         __ bind(loop);
1052         __ movptr(rbx_temp, Address(rsi_source, 0));
1053         __ movptr(Address(rax_argslot, 0), rbx_temp);
1054         __ addptr(rsi_source, type2aelembytes(elem_type));
1055         __ addptr(rax_argslot, Interpreter::stackElementSize());
1056         __ cmpptr(rax_argslot, rdx_argslot_limit);
1057         __ jccb(Assembler::less, loop);
1058       } else if (length_constant == 0) {
1059         __ bind(skip_array_check);
1060         // nothing to copy
1061       } else {
1062         int elem_offset = elem0_offset;
1063         int slot_offset = 0;
1064         for (int index = 0; index < length_constant; index++) {
1065           __ movptr(rbx_temp, Address(rsi_array, elem_offset));
1066           __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
1067           elem_offset += type2aelembytes(elem_type);
1068            slot_offset += Interpreter::stackElementSize();
1069         }
1070       }
1071 
1072       // Arguments are spread.  Move to next method handle.
1073       UNPUSH_RSI_RDI;
1074       __ movptr(rcx_recv, rcx_mh_vmtarget);
1075       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1076 
1077       __ bind(bad_array_klass);
1078       UNPUSH_RSI_RDI;
1079       __ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type
1080       __ pushptr(vmarg);                // bad array
1081       __ push((int)Bytecodes::_aaload); // who is complaining?
1082       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1083 
1084       __ bind(bad_array_length);
1085       UNPUSH_RSI_RDI;
1086       __ push(rcx_recv);        // AMH requiring a certain length
1087       __ pushptr(vmarg);        // bad array
1088       __ push((int)Bytecodes::_arraylength); // who is complaining?
1089       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1090 
1091 #undef UNPUSH_RSI_RDI
1092     }
1093     break;
1094 
1095   case _adapter_flyby:
1096   case _adapter_ricochet:
1097     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1098     break;
1099 
1100   default:  ShouldNotReachHere();
1101   }
1102   __ hlt();
1103 
1104   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1105   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1106 
1107   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
1108 }