1 /*
   2  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interpreter/interpreter.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "prims/methodHandles.hpp"
  29 
  30 #define __ _masm->
  31 
  32 #ifdef PRODUCT
  33 #define BLOCK_COMMENT(str) /* nothing */
  34 #else
  35 #define BLOCK_COMMENT(str) __ block_comment(str)
  36 #endif
  37 
  38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  39 
  40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
  41                                                 address interpreted_entry) {
  42   // Just before the actual machine code entry point, allocate space
  43   // for a MethodHandleEntry::Data record, so that we can manage everything
  44   // from one base pointer.
  45   __ align(wordSize);
  46   address target = __ pc() + sizeof(Data);
  47   while (__ pc() < target) {
  48     __ nop();
  49     __ align(wordSize);
  50   }
  51 
  52   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
  53   me->set_end_address(__ pc());         // set a temporary end_address
  54   me->set_from_interpreted_entry(interpreted_entry);
  55   me->set_type_checking_entry(NULL);
  56 
  57   return (address) me;
  58 }
  59 
  60 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
  61                                                 address start_addr) {
  62   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
  63   assert(me->end_address() == start_addr, "valid ME");
  64 
  65   // Fill in the real end_address:
  66   __ align(wordSize);
  67   me->set_end_address(__ pc());
  68 
  69   return me;
  70 }
  71 
  72 
  73 // Code generation
  74 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
  75   // I5_savedSP/O5_savedSP: sender SP (must preserve)
  76   // G4 (Gargs): incoming argument list (must preserve)
  77   // G5_method:  invoke methodOop
  78   // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
  79   // O0, O1, O2, O3, O4: garbage temps, blown away
  80   Register O0_mtype   = O0;
  81   Register O1_scratch = O1;
  82   Register O2_scratch = O2;
  83   Register O3_scratch = O3;
  84   Register O4_argslot = O4;
  85   Register O4_argbase = O4;
  86 
  87   // emit WrongMethodType path first, to enable back-branch from main path
  88   Label wrong_method_type;
  89   __ bind(wrong_method_type);
  90   Label invoke_generic_slow_path;
  91   assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
  92   __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
  93   __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact);
  94   __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
  95   __ delayed()->nop();
  96   __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
  97   // mov(G3_method_handle, G3_method_handle);  // already in this register
  98   __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
  99   __ delayed()->nop();
 100 
 101   // here's where control starts out:
 102   __ align(CodeEntryAlignment);
 103   address entry_point = __ pc();
 104 
 105   // fetch the MethodType from the method handle
 106   {
 107     Register tem = G5_method;
 108     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
 109       __ ld_ptr(Address(tem, *pchase), O0_mtype);
 110       tem = O0_mtype;          // in case there is another indirection
 111     }
 112   }
 113 
 114   // given the MethodType, find out where the MH argument is buried
 115   __ load_heap_oop(Address(O0_mtype,   __ delayed_value(java_dyn_MethodType::form_offset_in_bytes,        O1_scratch)), O4_argslot);
 116   __ ldsw(         Address(O4_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
 117   __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase);
 118   // Note: argument_address uses its input as a scratch register!
 119   __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle);
 120 
 121   trace_method_handle(_masm, "invokeExact");
 122 
 123   __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
 124   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 125 
 126   // for invokeGeneric (only), apply argument and result conversions on the fly
 127   __ bind(invoke_generic_slow_path);
 128 #ifdef ASSERT
 129   { Label L;
 130     __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
 131     __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
 132     __ brx(Assembler::equal, false, Assembler::pt, L);
 133     __ delayed()->nop();
 134     __ stop("bad methodOop::intrinsic_id");
 135     __ bind(L);
 136   }
 137 #endif //ASSERT
 138 
 139   // make room on the stack for another pointer:
 140   insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch);
 141   // load up an adapter from the calling type (Java weaves this)
 142   Register O2_form    = O2_scratch;
 143   Register O3_adapter = O3_scratch;
 144   __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes,               O1_scratch)), O2_form);
 145   // load_heap_oop(Address(O2_form,  __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
 146   // deal with old JDK versions:
 147   __ add(          Address(O2_form,  __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
 148   __ cmp(O3_adapter, O2_form);
 149   Label sorry_no_invoke_generic;
 150   __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic);
 151   __ delayed()->nop();
 152 
 153   __ load_heap_oop(Address(O3_adapter, 0), O3_adapter);
 154   __ tst(O3_adapter);
 155   __ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic);
 156   __ delayed()->nop();
 157   __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize));
 158   // As a trusted first argument, pass the type being called, so the adapter knows
 159   // the actual types of the arguments and return values.
 160   // (Generic invokers are shared among form-families of method-type.)
 161   __ st_ptr(O0_mtype,   Address(O4_argbase, 0 * Interpreter::stackElementSize));
 162   // FIXME: assert that O3_adapter is of the right method-type.
 163   __ mov(O3_adapter, G3_method_handle);
 164   trace_method_handle(_masm, "invokeGeneric");
 165   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 166 
 167   __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
 168   __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
 169   // mov(G3_method_handle, G3_method_handle);  // already in this register
 170   __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
 171   __ delayed()->nop();
 172 
 173   return entry_point;
 174 }
 175 
 176 
 177 #ifdef ASSERT
 178 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
 179   // Verify that argslot lies within (Gargs, FP].
 180   Label L_ok, L_bad;
 181   BLOCK_COMMENT("{ verify_argslot");
 182 #ifdef _LP64
 183   __ add(FP, STACK_BIAS, temp_reg);
 184   __ cmp(argslot_reg, temp_reg);
 185 #else
 186   __ cmp(argslot_reg, FP);
 187 #endif
 188   __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
 189   __ delayed()->nop();
 190   __ cmp(Gargs, argslot_reg);
 191   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 192   __ delayed()->nop();
 193   __ bind(L_bad);
 194   __ stop(error_message);
 195   __ bind(L_ok);
 196   BLOCK_COMMENT("} verify_argslot");
 197 }
 198 #endif
 199 
 200 
 201 // Helper to insert argument slots into the stack.
 202 // arg_slots must be a multiple of stack_move_unit() and <= 0
 203 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
 204                                      RegisterOrConstant arg_slots,
 205                                      int arg_mask,
 206                                      Register argslot_reg,
 207                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 208   assert(temp3_reg != noreg, "temp3 required");
 209   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 210                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 211 
 212 #ifdef ASSERT
 213   verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
 214   if (arg_slots.is_register()) {
 215     Label L_ok, L_bad;
 216     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 217     __ br(Assembler::greater, false, Assembler::pn, L_bad);
 218     __ delayed()->nop();
 219     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 220     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 221     __ delayed()->nop();
 222     __ bind(L_bad);
 223     __ stop("assert arg_slots <= 0 and clear low bits");
 224     __ bind(L_ok);
 225   } else {
 226     assert(arg_slots.as_constant() <= 0, "");
 227     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 228   }
 229 #endif // ASSERT
 230 
 231 #ifdef _LP64
 232   if (arg_slots.is_register()) {
 233     // Was arg_slots register loaded as signed int?
 234     Label L_ok;
 235     __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
 236     __ sra(temp_reg, BitsPerInt, temp_reg);
 237     __ cmp(arg_slots.as_register(), temp_reg);
 238     __ br(Assembler::equal, false, Assembler::pt, L_ok);
 239     __ delayed()->nop();
 240     __ stop("arg_slots register not loaded as signed int");
 241     __ bind(L_ok);
 242   }
 243 #endif
 244 
 245   // Make space on the stack for the inserted argument(s).
 246   // Then pull down everything shallower than argslot_reg.
 247   // The stacked return address gets pulled down with everything else.
 248   // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
 249   //   sp -= size;
 250   //   for (temp = sp + size; temp < argslot; temp++)
 251   //     temp[-size] = temp[0]
 252   //   argslot -= size;
 253   BLOCK_COMMENT("insert_arg_slots {");
 254   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 255 
 256   // Keep the stack pointer 2*wordSize aligned.
 257   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 258   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
 259   __ add(SP, masked_offset, SP);
 260 
 261   __ mov(Gargs, temp_reg);  // source pointer for copy
 262   __ add(Gargs, offset, Gargs);
 263 
 264   {
 265     Label loop;
 266     __ BIND(loop);
 267     // pull one word down each time through the loop
 268     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 269     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 270     __ add(temp_reg, wordSize, temp_reg);
 271     __ cmp(temp_reg, argslot_reg);
 272     __ brx(Assembler::less, false, Assembler::pt, loop);
 273     __ delayed()->nop();  // FILLME
 274   }
 275 
 276   // Now move the argslot down, to point to the opened-up space.
 277   __ add(argslot_reg, offset, argslot_reg);
 278   BLOCK_COMMENT("} insert_arg_slots");
 279 }
 280 
 281 
 282 // Helper to remove argument slots from the stack.
 283 // arg_slots must be a multiple of stack_move_unit() and >= 0
 284 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
 285                                      RegisterOrConstant arg_slots,
 286                                      Register argslot_reg,
 287                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 288   assert(temp3_reg != noreg, "temp3 required");
 289   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 290                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 291 
 292   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 293 
 294 #ifdef ASSERT
 295   // Verify that [argslot..argslot+size) lies within (Gargs, FP).
 296   __ add(argslot_reg, offset, temp2_reg);
 297   verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
 298   if (arg_slots.is_register()) {
 299     Label L_ok, L_bad;
 300     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 301     __ br(Assembler::less, false, Assembler::pn, L_bad);
 302     __ delayed()->nop();
 303     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 304     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 305     __ delayed()->nop();
 306     __ bind(L_bad);
 307     __ stop("assert arg_slots >= 0 and clear low bits");
 308     __ bind(L_ok);
 309   } else {
 310     assert(arg_slots.as_constant() >= 0, "");
 311     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 312   }
 313 #endif // ASSERT
 314 
 315   BLOCK_COMMENT("remove_arg_slots {");
 316   // Pull up everything shallower than argslot.
 317   // Then remove the excess space on the stack.
 318   // The stacked return address gets pulled up with everything else.
 319   // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
 320   //   for (temp = argslot-1; temp >= sp; --temp)
 321   //     temp[size] = temp[0]
 322   //   argslot += size;
 323   //   sp += size;
 324   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
 325   {
 326     Label loop;
 327     __ BIND(loop);
 328     // pull one word up each time through the loop
 329     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 330     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 331     __ sub(temp_reg, wordSize, temp_reg);
 332     __ cmp(temp_reg, Gargs);
 333     __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
 334     __ delayed()->nop();  // FILLME
 335   }
 336 
 337   // Now move the argslot up, to point to the just-copied block.
 338   __ add(Gargs, offset, Gargs);
 339   // And adjust the argslot address to point at the deletion point.
 340   __ add(argslot_reg, offset, argslot_reg);
 341 
 342   // Keep the stack pointer 2*wordSize aligned.
 343   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 344   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
 345   __ add(SP, masked_offset, SP);
 346   BLOCK_COMMENT("} remove_arg_slots");
 347 }
 348 
 349 
 350 #ifndef PRODUCT
 351 extern "C" void print_method_handle(oop mh);
 352 void trace_method_handle_stub(const char* adaptername,
 353                               oopDesc* mh) {
 354   printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
 355   print_method_handle(mh);
 356 }
 357 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
 358   if (!TraceMethodHandles)  return;
 359   BLOCK_COMMENT("trace_method_handle {");
 360   // save: Gargs, O5_savedSP
 361   __ save_frame(16);
 362   __ set((intptr_t) adaptername, O0);
 363   __ mov(G3_method_handle, O1);
 364   __ mov(G3_method_handle, L3);
 365   __ mov(Gargs, L4);
 366   __ mov(G5_method_type, L5);
 367   __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
 368 
 369   __ mov(L3, G3_method_handle);
 370   __ mov(L4, Gargs);
 371   __ mov(L5, G5_method_type);
 372   __ restore();
 373   BLOCK_COMMENT("} trace_method_handle");
 374 }
 375 #endif // PRODUCT
 376 
 377 // which conversion op types are implemented here?
 378 int MethodHandles::adapter_conversion_ops_supported_mask() {
 379   return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
 380          |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
 381          |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
 382          |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
 383          |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
 384          |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
 385          |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
 386          |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
 387          |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
 388          //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
 389          );
 390   // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 391 }
 392 
 393 //------------------------------------------------------------------------------
 394 // MethodHandles::generate_method_handle_stub
 395 //
 396 // Generate an "entry" field for a method handle.
 397 // This determines how the method handle will respond to calls.
 398 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
 399   // Here is the register state during an interpreted call,
 400   // as set up by generate_method_handle_interpreter_entry():
 401   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
 402   // - G3: receiver method handle
 403   // - O5_savedSP: sender SP (must preserve)
 404 
 405   const Register O0_argslot = O0;
 406   const Register O1_scratch = O1;
 407   const Register O2_scratch = O2;
 408   const Register O3_scratch = O3;
 409   const Register G5_index   = G5;
 410 
 411   // Argument registers for _raise_exception.
 412   const Register O0_code     = O0;
 413   const Register O1_actual   = O1;
 414   const Register O2_required = O2;
 415 
 416   guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 417 
 418   // Some handy addresses:
 419   Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
 420 
 421   Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
 422 
 423   Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
 424 
 425   Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
 426   Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
 427 
 428   Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
 429   Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
 430   Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
 431 
 432   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 433 
 434   if (have_entry(ek)) {
 435     __ nop();  // empty stubs make SG sick
 436     return;
 437   }
 438 
 439   address interp_entry = __ pc();
 440 
 441   trace_method_handle(_masm, entry_name(ek));
 442 
 443   switch ((int) ek) {
 444   case _raise_exception:
 445     {
 446       // Not a real MH entry, but rather shared code for raising an
 447       // exception.  Since we use a C2I adapter to set up the
 448       // interpreter state, arguments are expected in compiler
 449       // argument registers.
 450       assert(raise_exception_method(), "must be set");
 451       address c2i_entry = raise_exception_method()->get_c2i_entry();
 452       assert(c2i_entry, "method must be linked");
 453 
 454       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 455 
 456       Label L_no_method;
 457       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
 458       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
 459       __ ld_ptr(Address(G5_method, 0), G5_method);
 460       __ tst(G5_method);
 461       __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
 462       __ delayed()->nop();
 463 
 464       const int jobject_oop_offset = 0;
 465       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
 466       __ tst(G5_method);
 467       __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
 468       __ delayed()->nop();
 469 
 470       __ verify_oop(G5_method);
 471       __ jump_to(AddressLiteral(c2i_entry), O3_scratch);
 472       __ delayed()->nop();
 473 
 474       // If we get here, the Java runtime did not do its job of creating the exception.
 475       // Do something that is at least causes a valid throw from the interpreter.
 476       __ bind(L_no_method);
 477       __ unimplemented("call throw_WrongMethodType_entry");
 478     }
 479     break;
 480 
 481   case _invokestatic_mh:
 482   case _invokespecial_mh:
 483     {
 484       __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
 485       __ verify_oop(G5_method);
 486       // Same as TemplateTable::invokestatic or invokespecial,
 487       // minus the CP setup and profiling:
 488       if (ek == _invokespecial_mh) {
 489         // Must load & check the first argument before entering the target method.
 490         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 491         __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
 492         __ null_check(G3_method_handle);
 493         __ verify_oop(G3_method_handle);
 494       }
 495       __ jump_indirect_to(G5_method_fie, O1_scratch);
 496       __ delayed()->nop();
 497     }
 498     break;
 499 
 500   case _invokevirtual_mh:
 501     {
 502       // Same as TemplateTable::invokevirtual,
 503       // minus the CP setup and profiling:
 504 
 505       // Pick out the vtable index and receiver offset from the MH,
 506       // and then we can discard it:
 507       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 508       __ ldsw(G3_dmh_vmindex, G5_index);
 509       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
 510       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 511       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 512 
 513       // Get receiver klass:
 514       Register O0_klass = O0_argslot;
 515       __ load_klass(G3_method_handle, O0_klass);
 516       __ verify_oop(O0_klass);
 517 
 518       // Get target methodOop & entry point:
 519       const int base = instanceKlass::vtable_start_offset() * wordSize;
 520       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 521 
 522       __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
 523       __ add(O0_klass, G5_index, O0_klass);
 524       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
 525       __ ld_ptr(vtable_entry_addr, G5_method);
 526 
 527       __ verify_oop(G5_method);
 528       __ jump_indirect_to(G5_method_fie, O1_scratch);
 529       __ delayed()->nop();
 530     }
 531     break;
 532 
 533   case _invokeinterface_mh:
 534     {
 535       // Same as TemplateTable::invokeinterface,
 536       // minus the CP setup and profiling:
 537       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 538       Register O1_intf  = O1_scratch;
 539       __ load_heap_oop(G3_mh_vmtarget, O1_intf);
 540       __ ldsw(G3_dmh_vmindex, G5_index);
 541       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 542       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 543 
 544       // Get receiver klass:
 545       Register O0_klass = O0_argslot;
 546       __ load_klass(G3_method_handle, O0_klass);
 547       __ verify_oop(O0_klass);
 548 
 549       // Get interface:
 550       Label no_such_interface;
 551       __ verify_oop(O1_intf);
 552       __ lookup_interface_method(O0_klass, O1_intf,
 553                                  // Note: next two args must be the same:
 554                                  G5_index, G5_method,
 555                                  O2_scratch,
 556                                  O3_scratch,
 557                                  no_such_interface);
 558 
 559       __ verify_oop(G5_method);
 560       __ jump_indirect_to(G5_method_fie, O1_scratch);
 561       __ delayed()->nop();
 562 
 563       __ bind(no_such_interface);
 564       // Throw an exception.
 565       // For historical reasons, it will be IncompatibleClassChangeError.
 566       __ unimplemented("not tested yet");
 567       __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required);  // required interface
 568       __ mov(   O0_klass,                             O1_actual);    // bad receiver
 569       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
 570       __ delayed()->mov(Bytecodes::_invokeinterface,  O0_code);      // who is complaining?
 571     }
 572     break;
 573 
 574   case _bound_ref_mh:
 575   case _bound_int_mh:
 576   case _bound_long_mh:
 577   case _bound_ref_direct_mh:
 578   case _bound_int_direct_mh:
 579   case _bound_long_direct_mh:
 580     {
 581       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
 582       BasicType arg_type  = T_ILLEGAL;
 583       int       arg_mask  = _INSERT_NO_MASK;
 584       int       arg_slots = -1;
 585       get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
 586 
 587       // Make room for the new argument:
 588       __ ldsw(G3_bmh_vmargslot, O0_argslot);
 589       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 590 
 591       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
 592 
 593       // Store bound argument into the new stack slot:
 594       __ load_heap_oop(G3_bmh_argument, O1_scratch);
 595       if (arg_type == T_OBJECT) {
 596         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
 597       } else {
 598         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
 599         __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
 600         if (arg_slots == 2) {
 601           __ unimplemented("not yet tested");
 602 #ifndef _LP64
 603           __ signx(O2_scratch, O3_scratch);  // Sign extend
 604 #endif
 605           __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
 606         } else {
 607           __ st_ptr( O2_scratch, Address(O0_argslot, 0));
 608         }
 609       }
 610 
 611       if (direct_to_method) {
 612         __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
 613         __ verify_oop(G5_method);
 614         __ jump_indirect_to(G5_method_fie, O1_scratch);
 615         __ delayed()->nop();
 616       } else {
 617         __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
 618         __ verify_oop(G3_method_handle);
 619         __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 620       }
 621     }
 622     break;
 623 
 624   case _adapter_retype_only:
 625   case _adapter_retype_raw:
 626     // Immediately jump to the next MH layer:
 627     __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 628     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 629     // This is OK when all parameter types widen.
 630     // It is also OK when a return type narrows.
 631     break;
 632 
 633   case _adapter_check_cast:
 634     {
 635       // Temps:
 636       Register G5_klass = G5_index;  // Interesting AMH data.
 637 
 638       // Check a reference argument before jumping to the next layer of MH:
 639       __ ldsw(G3_amh_vmargslot, O0_argslot);
 640       Address vmarg = __ argument_address(O0_argslot);
 641 
 642       // What class are we casting to?
 643       __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
 644       __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
 645 
 646       Label done;
 647       __ ld_ptr(vmarg, O1_scratch);
 648       __ tst(O1_scratch);
 649       __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
 650       __ delayed()->nop();
 651       __ load_klass(O1_scratch, O1_scratch);
 652 
 653       // Live at this point:
 654       // - G5_klass        :  klass required by the target method
 655       // - O1_scratch      :  argument klass to test
 656       // - G3_method_handle:  adapter method handle
 657       __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
 658 
 659       // If we get here, the type check failed!
 660       __ load_heap_oop(G3_amh_argument,        O2_required);  // required class
 661       __ ld_ptr(       vmarg,                  O1_actual);    // bad object
 662       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
 663       __ delayed()->mov(Bytecodes::_checkcast, O0_code);      // who is complaining?
 664 
 665       __ bind(done);
 666       // Get the new MH:
 667       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 668       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 669     }
 670     break;
 671 
 672   case _adapter_prim_to_prim:
 673   case _adapter_ref_to_prim:
 674     // Handled completely by optimized cases.
 675     __ stop("init_AdapterMethodHandle should not issue this");
 676     break;
 677 
 678   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
 679 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
 680   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
 681   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
 682     {
 683       // Perform an in-place conversion to int or an int subword.
 684       __ ldsw(G3_amh_vmargslot, O0_argslot);
 685       Address value;
 686       Address vmarg = __ argument_address(O0_argslot);
 687       bool value_left_justified = false;
 688 
 689       switch (ek) {
 690       case _adapter_opt_i2i:
 691         value = vmarg;
 692         break;
 693       case _adapter_opt_l2i:
 694         {
 695           // just delete the extra slot
 696 #ifdef _LP64
 697           // In V9, longs are given 2 64-bit slots in the interpreter, but the
 698           // data is passed in only 1 slot.
 699           // Keep the second slot.
 700           __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot);
 701           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 702           value = Address(O0_argslot, 4);  // Get least-significant 32-bit of 64-bit value.
 703           vmarg = Address(O0_argslot, Interpreter::stackElementSize);
 704 #else
 705           // Keep the first slot.
 706           __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 707           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 708           value = Address(O0_argslot, 0);
 709           vmarg = value;
 710 #endif
 711         }
 712         break;
 713       case _adapter_opt_unboxi:
 714         {
 715           // Load the value up from the heap.
 716           __ ld_ptr(vmarg, O1_scratch);
 717           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
 718 #ifdef ASSERT
 719           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
 720             if (is_subword_type(BasicType(bt)))
 721               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
 722           }
 723 #endif
 724           __ null_check(O1_scratch, value_offset);
 725           value = Address(O1_scratch, value_offset);
 726 #ifdef _BIG_ENDIAN
 727           // Values stored in objects are packed.
 728           value_left_justified = true;
 729 #endif
 730         }
 731         break;
 732       default:
 733         ShouldNotReachHere();
 734       }
 735 
 736       // This check is required on _BIG_ENDIAN
 737       Register G5_vminfo = G5_index;
 738       __ ldsw(G3_amh_conversion, G5_vminfo);
 739       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 740 
 741       // Original 32-bit vmdata word must be of this form:
 742       // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
 743       __ lduw(value, O1_scratch);
 744       if (!value_left_justified)
 745         __ sll(O1_scratch, G5_vminfo, O1_scratch);
 746       Label zero_extend, done;
 747       __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
 748       __ br(Assembler::zero, false, Assembler::pn, zero_extend);
 749       __ delayed()->nop();
 750 
 751       // this path is taken for int->byte, int->short
 752       __ sra(O1_scratch, G5_vminfo, O1_scratch);
 753       __ ba(false, done);
 754       __ delayed()->nop();
 755 
 756       __ bind(zero_extend);
 757       // this is taken for int->char
 758       __ srl(O1_scratch, G5_vminfo, O1_scratch);
 759 
 760       __ bind(done);
 761       __ st(O1_scratch, vmarg);
 762 
 763       // Get the new MH:
 764       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 765       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 766     }
 767     break;
 768 
 769   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
 770   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
 771     {
 772       // Perform an in-place int-to-long or ref-to-long conversion.
 773       __ ldsw(G3_amh_vmargslot, O0_argslot);
 774 
 775       // On big-endian machine we duplicate the slot and store the MSW
 776       // in the first slot.
 777       __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
 778 
 779       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
 780 
 781       Address arg_lsw(O0_argslot, 0);
 782       Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
 783 
 784       switch (ek) {
 785       case _adapter_opt_i2l:
 786         {
 787           __ ldsw(arg_lsw, O2_scratch);      // Load LSW
 788 #ifndef _LP64
 789           __ signx(O2_scratch, O3_scratch);  // Sign extend
 790 #endif
 791           __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
 792         }
 793         break;
 794       case _adapter_opt_unboxl:
 795         {
 796           // Load the value up from the heap.
 797           __ ld_ptr(arg_lsw, O1_scratch);
 798           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
 799           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
 800           __ null_check(O1_scratch, value_offset);
 801           __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
 802           __ st_long(O2_scratch, arg_msw);
 803         }
 804         break;
 805       default:
 806         ShouldNotReachHere();
 807       }
 808 
 809       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 810       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 811     }
 812     break;
 813 
 814   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
 815   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
 816     {
 817       // perform an in-place floating primitive conversion
 818       __ unimplemented(entry_name(ek));
 819     }
 820     break;
 821 
 822   case _adapter_prim_to_ref:
 823     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 824     break;
 825 
 826   case _adapter_swap_args:
 827   case _adapter_rot_args:
 828     // handled completely by optimized cases
 829     __ stop("init_AdapterMethodHandle should not issue this");
 830     break;
 831 
 832   case _adapter_opt_swap_1:
 833   case _adapter_opt_swap_2:
 834   case _adapter_opt_rot_1_up:
 835   case _adapter_opt_rot_1_down:
 836   case _adapter_opt_rot_2_up:
 837   case _adapter_opt_rot_2_down:
 838     {
 839       int swap_bytes = 0, rotate = 0;
 840       get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
 841 
 842       // 'argslot' is the position of the first argument to swap.
 843       __ ldsw(G3_amh_vmargslot, O0_argslot);
 844       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 845 
 846       // 'vminfo' is the second.
 847       Register O1_destslot = O1_scratch;
 848       __ ldsw(G3_amh_conversion, O1_destslot);
 849       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 850       __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
 851       __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
 852 
 853       if (!rotate) {
 854         for (int i = 0; i < swap_bytes; i += wordSize) {
 855           __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
 856           __ ld_ptr(Address(O1_destslot, i), O3_scratch);
 857           __ st_ptr(O3_scratch, Address(O0_argslot,  i));
 858           __ st_ptr(O2_scratch, Address(O1_destslot, i));
 859         }
 860       } else {
 861         // Save the first chunk, which is going to get overwritten.
 862         switch (swap_bytes) {
 863         case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
 864         case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
 865         case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
 866         default: ShouldNotReachHere();
 867         }
 868 
 869         if (rotate > 0) {
 870           // Rorate upward.
 871           __ sub(O0_argslot, swap_bytes, O0_argslot);
 872 #if ASSERT
 873           {
 874             // Verify that argslot > destslot, by at least swap_bytes.
 875             Label L_ok;
 876             __ cmp(O0_argslot, O1_destslot);
 877             __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
 878             __ delayed()->nop();
 879             __ stop("source must be above destination (upward rotation)");
 880             __ bind(L_ok);
 881           }
 882 #endif
 883           // Work argslot down to destslot, copying contiguous data upwards.
 884           // Pseudo-code:
 885           //   argslot  = src_addr - swap_bytes
 886           //   destslot = dest_addr
 887           //   while (argslot >= destslot) {
 888           //     *(argslot + swap_bytes) = *(argslot + 0);
 889           //     argslot--;
 890           //   }
 891           Label loop;
 892           __ bind(loop);
 893           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 894           __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
 895           __ sub(O0_argslot, wordSize, O0_argslot);
 896           __ cmp(O0_argslot, O1_destslot);
 897           __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
 898           __ delayed()->nop();  // FILLME
 899         } else {
 900           __ add(O0_argslot, swap_bytes, O0_argslot);
 901 #if ASSERT
 902           {
 903             // Verify that argslot < destslot, by at least swap_bytes.
 904             Label L_ok;
 905             __ cmp(O0_argslot, O1_destslot);
 906             __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 907             __ delayed()->nop();
 908             __ stop("source must be above destination (upward rotation)");
 909             __ bind(L_ok);
 910           }
 911 #endif
 912           // Work argslot up to destslot, copying contiguous data downwards.
 913           // Pseudo-code:
 914           //   argslot  = src_addr + swap_bytes
 915           //   destslot = dest_addr
 916           //   while (argslot >= destslot) {
 917           //     *(argslot - swap_bytes) = *(argslot + 0);
 918           //     argslot++;
 919           //   }
 920           Label loop;
 921           __ bind(loop);
 922           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 923           __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
 924           __ add(O0_argslot, wordSize, O0_argslot);
 925           __ cmp(O0_argslot, O1_destslot);
 926           __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
 927           __ delayed()->nop();  // FILLME
 928         }
 929 
 930         // Store the original first chunk into the destination slot, now free.
 931         switch (swap_bytes) {
 932         case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
 933         case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
 934         case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
 935         default: ShouldNotReachHere();
 936         }
 937       }
 938 
 939       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 940       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 941     }
 942     break;
 943 
 944   case _adapter_dup_args:
 945     {
 946       // 'argslot' is the position of the first argument to duplicate.
 947       __ ldsw(G3_amh_vmargslot, O0_argslot);
 948       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 949 
 950       // 'stack_move' is negative number of words to duplicate.
 951       Register G5_stack_move = G5_index;
 952       __ ldsw(G3_amh_conversion, G5_stack_move);
 953       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 954 
 955       // Remember the old Gargs (argslot[0]).
 956       Register O1_oldarg = O1_scratch;
 957       __ mov(Gargs, O1_oldarg);
 958 
 959       // Move Gargs down to make room for dups.
 960       __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
 961       __ add(Gargs, G5_stack_move, Gargs);
 962 
 963       // Compute the new Gargs (argslot[0]).
 964       Register O2_newarg = O2_scratch;
 965       __ mov(Gargs, O2_newarg);
 966 
 967       // Copy from oldarg[0...] down to newarg[0...]
 968       // Pseude-code:
 969       //   O1_oldarg  = old-Gargs
 970       //   O2_newarg  = new-Gargs
 971       //   O0_argslot = argslot
 972       //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
 973       Label loop;
 974       __ bind(loop);
 975       __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
 976       __ st_ptr(O3_scratch, Address(O2_newarg, 0));
 977       __ add(O0_argslot, wordSize, O0_argslot);
 978       __ add(O2_newarg,  wordSize, O2_newarg);
 979       __ cmp(O2_newarg, O1_oldarg);
 980       __ brx(Assembler::less, false, Assembler::pt, loop);
 981       __ delayed()->nop();  // FILLME
 982 
 983       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 984       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 985     }
 986     break;
 987 
 988   case _adapter_drop_args:
 989     {
 990       // 'argslot' is the position of the first argument to nuke.
 991       __ ldsw(G3_amh_vmargslot, O0_argslot);
 992       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 993 
 994       // 'stack_move' is number of words to drop.
 995       Register G5_stack_move = G5_index;
 996       __ ldsw(G3_amh_conversion, G5_stack_move);
 997       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 998 
 999       remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
1000 
1001       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1002       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1003     }
1004     break;
1005 
1006   case _adapter_collect_args:
1007     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1008     break;
1009 
1010   case _adapter_spread_args:
1011     // Handled completely by optimized cases.
1012     __ stop("init_AdapterMethodHandle should not issue this");
1013     break;
1014 
1015   case _adapter_opt_spread_0:
1016   case _adapter_opt_spread_1:
1017   case _adapter_opt_spread_more:
1018     {
1019       // spread an array out into a group of arguments
1020       __ unimplemented(entry_name(ek));
1021     }
1022     break;
1023 
1024   case _adapter_flyby:
1025   case _adapter_ricochet:
1026     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1027     break;
1028 
1029   default:
1030     ShouldNotReachHere();
1031   }
1032 
1033   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1034   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1035 
1036   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
1037 }
--- EOF ---