1 /*
   2  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interpreter/interpreter.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "prims/methodHandles.hpp"
  29 
  30 #define __ _masm->
  31 
  32 #ifdef PRODUCT
  33 #define BLOCK_COMMENT(str) /* nothing */
  34 #else
  35 #define BLOCK_COMMENT(str) __ block_comment(str)
  36 #endif
  37 
  38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  39 
  40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
  41                                                 address interpreted_entry) {
  42   // Just before the actual machine code entry point, allocate space
  43   // for a MethodHandleEntry::Data record, so that we can manage everything
  44   // from one base pointer.
  45   __ align(wordSize);
  46   address target = __ pc() + sizeof(Data);
  47   while (__ pc() < target) {
  48     __ nop();
  49     __ align(wordSize);
  50   }
  51 
  52   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
  53   me->set_end_address(__ pc());         // set a temporary end_address
  54   me->set_from_interpreted_entry(interpreted_entry);
  55   me->set_type_checking_entry(NULL);
  56 
  57   return (address) me;
  58 }
  59 
  60 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
  61                                                 address start_addr) {
  62   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
  63   assert(me->end_address() == start_addr, "valid ME");
  64 
  65   // Fill in the real end_address:
  66   __ align(wordSize);
  67   me->set_end_address(__ pc());
  68 
  69   return me;
  70 }
  71 
  72 
  73 // Code generation
  74 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
  75   // I5_savedSP/O5_savedSP: sender SP (must preserve)
  76   // G4 (Gargs): incoming argument list (must preserve)
  77   // G5_method:  invoke methodOop
  78   // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
  79   // O0, O1, O2, O3, O4: garbage temps, blown away
  80   Register O0_mtype   = O0;
  81   Register O1_scratch = O1;
  82   Register O2_scratch = O2;
  83   Register O3_scratch = O3;
  84   Register O4_argslot = O4;
  85   Register O4_argbase = O4;
  86 
  87   // emit WrongMethodType path first, to enable back-branch from main path
  88   Label wrong_method_type;
  89   __ bind(wrong_method_type);
  90   Label invoke_generic_slow_path;
  91   assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
  92   __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
  93   __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact);
  94   __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
  95   __ delayed()->nop();
  96   __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
  97   // mov(G3_method_handle, G3_method_handle);  // already in this register
  98   __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
  99   __ delayed()->nop();
 100 
 101   // here's where control starts out:
 102   __ align(CodeEntryAlignment);
 103   address entry_point = __ pc();
 104 
 105   // fetch the MethodType from the method handle
 106   {
 107     Register tem = G5_method;
 108     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
 109       __ ld_ptr(Address(tem, *pchase), O0_mtype);
 110       tem = O0_mtype;          // in case there is another indirection
 111     }
 112   }
 113 
 114   // given the MethodType, find out where the MH argument is buried
 115   __ load_heap_oop(Address(O0_mtype,   __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,        O1_scratch)), O4_argslot);
 116   __ ldsw(         Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
 117   __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase);
 118   // Note: argument_address uses its input as a scratch register!
 119   __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle);
 120 
 121   trace_method_handle(_masm, "invokeExact");
 122 
 123   __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
 124   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 125 
 126   // for invokeGeneric (only), apply argument and result conversions on the fly
 127   __ bind(invoke_generic_slow_path);
 128 #ifdef ASSERT
 129   { Label L;
 130     __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
 131     __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
 132     __ brx(Assembler::equal, false, Assembler::pt, L);
 133     __ delayed()->nop();
 134     __ stop("bad methodOop::intrinsic_id");
 135     __ bind(L);
 136   }
 137 #endif //ASSERT
 138 
 139   // make room on the stack for another pointer:
 140   insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch);
 141   // load up an adapter from the calling type (Java weaves this)
 142   Register O2_form    = O2_scratch;
 143   Register O3_adapter = O3_scratch;
 144   __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,               O1_scratch)), O2_form);
 145   __ load_heap_oop(Address(O2_form,  __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
 146   __ verify_oop(O3_adapter);
 147   __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize));
 148   // As a trusted first argument, pass the type being called, so the adapter knows
 149   // the actual types of the arguments and return values.
 150   // (Generic invokers are shared among form-families of method-type.)
 151   __ st_ptr(O0_mtype,   Address(O4_argbase, 0 * Interpreter::stackElementSize));
 152   // FIXME: assert that O3_adapter is of the right method-type.
 153   __ mov(O3_adapter, G3_method_handle);
 154   trace_method_handle(_masm, "invokeGeneric");
 155   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 156 
 157   return entry_point;
 158 }
 159 
 160 
 161 #ifdef ASSERT
 162 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
 163   // Verify that argslot lies within (Gargs, FP].
 164   Label L_ok, L_bad;
 165   BLOCK_COMMENT("{ verify_argslot");
 166 #ifdef _LP64
 167   __ add(FP, STACK_BIAS, temp_reg);
 168   __ cmp(argslot_reg, temp_reg);
 169 #else
 170   __ cmp(argslot_reg, FP);
 171 #endif
 172   __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
 173   __ delayed()->nop();
 174   __ cmp(Gargs, argslot_reg);
 175   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 176   __ delayed()->nop();
 177   __ bind(L_bad);
 178   __ stop(error_message);
 179   __ bind(L_ok);
 180   BLOCK_COMMENT("} verify_argslot");
 181 }
 182 #endif
 183 
 184 
 185 // Helper to insert argument slots into the stack.
 186 // arg_slots must be a multiple of stack_move_unit() and <= 0
 187 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
 188                                      RegisterOrConstant arg_slots,
 189                                      int arg_mask,
 190                                      Register argslot_reg,
 191                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 192   assert(temp3_reg != noreg, "temp3 required");
 193   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 194                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 195 
 196 #ifdef ASSERT
 197   verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
 198   if (arg_slots.is_register()) {
 199     Label L_ok, L_bad;
 200     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 201     __ br(Assembler::greater, false, Assembler::pn, L_bad);
 202     __ delayed()->nop();
 203     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 204     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 205     __ delayed()->nop();
 206     __ bind(L_bad);
 207     __ stop("assert arg_slots <= 0 and clear low bits");
 208     __ bind(L_ok);
 209   } else {
 210     assert(arg_slots.as_constant() <= 0, "");
 211     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 212   }
 213 #endif // ASSERT
 214 
 215 #ifdef _LP64
 216   if (arg_slots.is_register()) {
 217     // Was arg_slots register loaded as signed int?
 218     Label L_ok;
 219     __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
 220     __ sra(temp_reg, BitsPerInt, temp_reg);
 221     __ cmp(arg_slots.as_register(), temp_reg);
 222     __ br(Assembler::equal, false, Assembler::pt, L_ok);
 223     __ delayed()->nop();
 224     __ stop("arg_slots register not loaded as signed int");
 225     __ bind(L_ok);
 226   }
 227 #endif
 228 
 229   // Make space on the stack for the inserted argument(s).
 230   // Then pull down everything shallower than argslot_reg.
 231   // The stacked return address gets pulled down with everything else.
 232   // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
 233   //   sp -= size;
 234   //   for (temp = sp + size; temp < argslot; temp++)
 235   //     temp[-size] = temp[0]
 236   //   argslot -= size;
 237   BLOCK_COMMENT("insert_arg_slots {");
 238   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 239 
 240   // Keep the stack pointer 2*wordSize aligned.
 241   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 242   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
 243   __ add(SP, masked_offset, SP);
 244 
 245   __ mov(Gargs, temp_reg);  // source pointer for copy
 246   __ add(Gargs, offset, Gargs);
 247 
 248   {
 249     Label loop;
 250     __ BIND(loop);
 251     // pull one word down each time through the loop
 252     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 253     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 254     __ add(temp_reg, wordSize, temp_reg);
 255     __ cmp(temp_reg, argslot_reg);
 256     __ brx(Assembler::less, false, Assembler::pt, loop);
 257     __ delayed()->nop();  // FILLME
 258   }
 259 
 260   // Now move the argslot down, to point to the opened-up space.
 261   __ add(argslot_reg, offset, argslot_reg);
 262   BLOCK_COMMENT("} insert_arg_slots");
 263 }
 264 
 265 
 266 // Helper to remove argument slots from the stack.
 267 // arg_slots must be a multiple of stack_move_unit() and >= 0
 268 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
 269                                      RegisterOrConstant arg_slots,
 270                                      Register argslot_reg,
 271                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 272   assert(temp3_reg != noreg, "temp3 required");
 273   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 274                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 275 
 276   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 277 
 278 #ifdef ASSERT
 279   // Verify that [argslot..argslot+size) lies within (Gargs, FP).
 280   __ add(argslot_reg, offset, temp2_reg);
 281   verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
 282   if (arg_slots.is_register()) {
 283     Label L_ok, L_bad;
 284     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 285     __ br(Assembler::less, false, Assembler::pn, L_bad);
 286     __ delayed()->nop();
 287     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 288     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 289     __ delayed()->nop();
 290     __ bind(L_bad);
 291     __ stop("assert arg_slots >= 0 and clear low bits");
 292     __ bind(L_ok);
 293   } else {
 294     assert(arg_slots.as_constant() >= 0, "");
 295     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 296   }
 297 #endif // ASSERT
 298 
 299   BLOCK_COMMENT("remove_arg_slots {");
 300   // Pull up everything shallower than argslot.
 301   // Then remove the excess space on the stack.
 302   // The stacked return address gets pulled up with everything else.
 303   // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
 304   //   for (temp = argslot-1; temp >= sp; --temp)
 305   //     temp[size] = temp[0]
 306   //   argslot += size;
 307   //   sp += size;
 308   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
 309   {
 310     Label loop;
 311     __ BIND(loop);
 312     // pull one word up each time through the loop
 313     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 314     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 315     __ sub(temp_reg, wordSize, temp_reg);
 316     __ cmp(temp_reg, Gargs);
 317     __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
 318     __ delayed()->nop();  // FILLME
 319   }
 320 
 321   // Now move the argslot up, to point to the just-copied block.
 322   __ add(Gargs, offset, Gargs);
 323   // And adjust the argslot address to point at the deletion point.
 324   __ add(argslot_reg, offset, argslot_reg);
 325 
 326   // Keep the stack pointer 2*wordSize aligned.
 327   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 328   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
 329   __ add(SP, masked_offset, SP);
 330   BLOCK_COMMENT("} remove_arg_slots");
 331 }
 332 
 333 
 334 #ifndef PRODUCT
 335 extern "C" void print_method_handle(oop mh);
 336 void trace_method_handle_stub(const char* adaptername,
 337                               oopDesc* mh,
 338                               intptr_t* saved_sp) {
 339   tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
 340   print_method_handle(mh);
 341 }
 342 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
 343   if (!TraceMethodHandles)  return;
 344   BLOCK_COMMENT("trace_method_handle {");
 345   // save: Gargs, O5_savedSP
 346   __ save_frame(16);
 347   __ set((intptr_t) adaptername, O0);
 348   __ mov(G3_method_handle, O1);
 349   __ mov(I5_savedSP, O2);
 350   __ mov(G3_method_handle, L3);
 351   __ mov(Gargs, L4);
 352   __ mov(G5_method_type, L5);
 353   __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
 354 
 355   __ mov(L3, G3_method_handle);
 356   __ mov(L4, Gargs);
 357   __ mov(L5, G5_method_type);
 358   __ restore();
 359   BLOCK_COMMENT("} trace_method_handle");
 360 }
 361 #endif // PRODUCT
 362 
 363 // which conversion op types are implemented here?
 364 int MethodHandles::adapter_conversion_ops_supported_mask() {
 365   return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
 366          |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
 367          |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
 368          |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
 369          |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
 370          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
 371          |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
 372          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
 373          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
 374          //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
 375          );
 376   // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 377 }
 378 
 379 //------------------------------------------------------------------------------
 380 // MethodHandles::generate_method_handle_stub
 381 //
 382 // Generate an "entry" field for a method handle.
 383 // This determines how the method handle will respond to calls.
 384 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
 385   // Here is the register state during an interpreted call,
 386   // as set up by generate_method_handle_interpreter_entry():
 387   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
 388   // - G3: receiver method handle
 389   // - O5_savedSP: sender SP (must preserve)
 390 
 391   const Register O0_argslot = O0;
 392   const Register O1_scratch = O1;
 393   const Register O2_scratch = O2;
 394   const Register O3_scratch = O3;
 395   const Register G5_index   = G5;
 396 
 397   // Argument registers for _raise_exception.
 398   const Register O0_code     = O0;
 399   const Register O1_actual   = O1;
 400   const Register O2_required = O2;
 401 
 402   guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 403 
 404   // Some handy addresses:
 405   Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
 406   Address G5_method_fce(    G5_method,        in_bytes(methodOopDesc::from_compiled_offset()));
 407 
 408   Address G3_mh_vmtarget(   G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
 409 
 410   Address G3_dmh_vmindex(   G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
 411 
 412   Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes());
 413   Address G3_bmh_argument(  G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes());
 414 
 415   Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes());
 416   Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
 417   Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
 418 
 419   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 420 
 421   if (have_entry(ek)) {
 422     __ nop();  // empty stubs make SG sick
 423     return;
 424   }
 425 
 426   address interp_entry = __ pc();
 427 
 428   trace_method_handle(_masm, entry_name(ek));
 429 
 430   switch ((int) ek) {
 431   case _raise_exception:
 432     {
 433       // Not a real MH entry, but rather shared code for raising an
 434       // exception.  Since we use the compiled entry, arguments are
 435       // expected in compiler argument registers.
 436       assert(raise_exception_method(), "must be set");
 437       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 438 
 439       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 440 
 441       Label L_no_method;
 442       // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
 443       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
 444       __ ld_ptr(Address(G5_method, 0), G5_method);
 445       __ tst(G5_method);
 446       __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
 447       __ delayed()->nop();
 448 
 449       const int jobject_oop_offset = 0;
 450       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
 451       __ tst(G5_method);
 452       __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
 453       __ delayed()->nop();
 454 
 455       __ verify_oop(G5_method);
 456       __ jump_indirect_to(G5_method_fce, O3_scratch);  // jump to compiled entry
 457       __ delayed()->nop();
 458 
 459       // Do something that is at least causes a valid throw from the interpreter.
 460       __ bind(L_no_method);
 461       __ unimplemented("call throw_WrongMethodType_entry");
 462     }
 463     break;
 464 
 465   case _invokestatic_mh:
 466   case _invokespecial_mh:
 467     {
 468       __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
 469       __ verify_oop(G5_method);
 470       // Same as TemplateTable::invokestatic or invokespecial,
 471       // minus the CP setup and profiling:
 472       if (ek == _invokespecial_mh) {
 473         // Must load & check the first argument before entering the target method.
 474         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 475         __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 476         __ null_check(G3_method_handle);
 477         __ verify_oop(G3_method_handle);
 478       }
 479       __ jump_indirect_to(G5_method_fie, O1_scratch);
 480       __ delayed()->nop();
 481     }
 482     break;
 483 
 484   case _invokevirtual_mh:
 485     {
 486       // Same as TemplateTable::invokevirtual,
 487       // minus the CP setup and profiling:
 488 
 489       // Pick out the vtable index and receiver offset from the MH,
 490       // and then we can discard it:
 491       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 492       __ ldsw(G3_dmh_vmindex, G5_index);
 493       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
 494       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 495       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 496 
 497       // Get receiver klass:
 498       Register O0_klass = O0_argslot;
 499       __ load_klass(G3_method_handle, O0_klass);
 500       __ verify_oop(O0_klass);
 501 
 502       // Get target methodOop & entry point:
 503       const int base = instanceKlass::vtable_start_offset() * wordSize;
 504       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 505 
 506       __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
 507       __ add(O0_klass, G5_index, O0_klass);
 508       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
 509       __ ld_ptr(vtable_entry_addr, G5_method);
 510 
 511       __ verify_oop(G5_method);
 512       __ jump_indirect_to(G5_method_fie, O1_scratch);
 513       __ delayed()->nop();
 514     }
 515     break;
 516 
 517   case _invokeinterface_mh:
 518     {
 519       // Same as TemplateTable::invokeinterface,
 520       // minus the CP setup and profiling:
 521       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 522       Register O1_intf  = O1_scratch;
 523       __ load_heap_oop(G3_mh_vmtarget, O1_intf);
 524       __ ldsw(G3_dmh_vmindex, G5_index);
 525       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 526       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 527 
 528       // Get receiver klass:
 529       Register O0_klass = O0_argslot;
 530       __ load_klass(G3_method_handle, O0_klass);
 531       __ verify_oop(O0_klass);
 532 
 533       // Get interface:
 534       Label no_such_interface;
 535       __ verify_oop(O1_intf);
 536       __ lookup_interface_method(O0_klass, O1_intf,
 537                                  // Note: next two args must be the same:
 538                                  G5_index, G5_method,
 539                                  O2_scratch,
 540                                  O3_scratch,
 541                                  no_such_interface);
 542 
 543       __ verify_oop(G5_method);
 544       __ jump_indirect_to(G5_method_fie, O1_scratch);
 545       __ delayed()->nop();
 546 
 547       __ bind(no_such_interface);
 548       // Throw an exception.
 549       // For historical reasons, it will be IncompatibleClassChangeError.
 550       __ unimplemented("not tested yet");
 551       __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required);  // required interface
 552       __ mov(   O0_klass,                             O1_actual);    // bad receiver
 553       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
 554       __ delayed()->mov(Bytecodes::_invokeinterface,  O0_code);      // who is complaining?
 555     }
 556     break;
 557 
 558   case _bound_ref_mh:
 559   case _bound_int_mh:
 560   case _bound_long_mh:
 561   case _bound_ref_direct_mh:
 562   case _bound_int_direct_mh:
 563   case _bound_long_direct_mh:
 564     {
 565       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
 566       BasicType arg_type  = T_ILLEGAL;
 567       int       arg_mask  = _INSERT_NO_MASK;
 568       int       arg_slots = -1;
 569       get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
 570 
 571       // Make room for the new argument:
 572       __ ldsw(G3_bmh_vmargslot, O0_argslot);
 573       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 574 
 575       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
 576 
 577       // Store bound argument into the new stack slot:
 578       __ load_heap_oop(G3_bmh_argument, O1_scratch);
 579       if (arg_type == T_OBJECT) {
 580         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
 581       } else {
 582         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
 583         const int arg_size = type2aelembytes(arg_type);
 584         __ load_sized_value(prim_value_addr, O2_scratch, arg_size, is_signed_subword_type(arg_type));
 585         __ store_sized_value(O2_scratch, Address(O0_argslot, 0), arg_size);  // long store uses O2/O3 on !_LP64
 586       }
 587 
 588       if (direct_to_method) {
 589         __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
 590         __ verify_oop(G5_method);
 591         __ jump_indirect_to(G5_method_fie, O1_scratch);
 592         __ delayed()->nop();
 593       } else {
 594         __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
 595         __ verify_oop(G3_method_handle);
 596         __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 597       }
 598     }
 599     break;
 600 
 601   case _adapter_retype_only:
 602   case _adapter_retype_raw:
 603     // Immediately jump to the next MH layer:
 604     __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 605     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 606     // This is OK when all parameter types widen.
 607     // It is also OK when a return type narrows.
 608     break;
 609 
 610   case _adapter_check_cast:
 611     {
 612       // Temps:
 613       Register G5_klass = G5_index;  // Interesting AMH data.
 614 
 615       // Check a reference argument before jumping to the next layer of MH:
 616       __ ldsw(G3_amh_vmargslot, O0_argslot);
 617       Address vmarg = __ argument_address(O0_argslot);
 618 
 619       // What class are we casting to?
 620       __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
 621       __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
 622 
 623       Label done;
 624       __ ld_ptr(vmarg, O1_scratch);
 625       __ tst(O1_scratch);
 626       __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
 627       __ delayed()->nop();
 628       __ load_klass(O1_scratch, O1_scratch);
 629 
 630       // Live at this point:
 631       // - G5_klass        :  klass required by the target method
 632       // - O0_argslot      :  argslot index in vmarg; may be required in the failing path
 633       // - O1_scratch      :  argument klass to test
 634       // - G3_method_handle:  adapter method handle
 635       __ check_klass_subtype(O1_scratch, G5_klass, O2_scratch, O3_scratch, done);
 636 
 637       // If we get here, the type check failed!
 638       __ load_heap_oop(G3_amh_argument,        O2_required);  // required class
 639       __ ld_ptr(       vmarg,                  O1_actual);    // bad object
 640       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
 641       __ delayed()->mov(Bytecodes::_checkcast, O0_code);      // who is complaining?
 642 
 643       __ bind(done);
 644       // Get the new MH:
 645       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 646       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 647     }
 648     break;
 649 
 650   case _adapter_prim_to_prim:
 651   case _adapter_ref_to_prim:
 652     // Handled completely by optimized cases.
 653     __ stop("init_AdapterMethodHandle should not issue this");
 654     break;
 655 
 656   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
 657 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
 658   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
 659   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
 660     {
 661       // Perform an in-place conversion to int or an int subword.
 662       __ ldsw(G3_amh_vmargslot, O0_argslot);
 663       Address value;
 664       Address vmarg = __ argument_address(O0_argslot);
 665       bool value_left_justified = false;
 666 
 667       switch (ek) {
 668       case _adapter_opt_i2i:
 669         value = vmarg;
 670         break;
 671       case _adapter_opt_l2i:
 672         {
 673           // just delete the extra slot
 674 #ifdef _LP64
 675           // In V9, longs are given 2 64-bit slots in the interpreter, but the
 676           // data is passed in only 1 slot.
 677           // Keep the second slot.
 678           __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot);
 679           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 680           value = Address(O0_argslot, 4);  // Get least-significant 32-bit of 64-bit value.
 681           vmarg = Address(O0_argslot, Interpreter::stackElementSize);
 682 #else
 683           // Keep the first slot.
 684           __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 685           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 686           value = Address(O0_argslot, 0);
 687           vmarg = value;
 688 #endif
 689         }
 690         break;
 691       case _adapter_opt_unboxi:
 692         {
 693           // Load the value up from the heap.
 694           __ ld_ptr(vmarg, O1_scratch);
 695           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
 696 #ifdef ASSERT
 697           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
 698             if (is_subword_type(BasicType(bt)))
 699               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
 700           }
 701 #endif
 702           __ null_check(O1_scratch, value_offset);
 703           value = Address(O1_scratch, value_offset);
 704 #ifdef _BIG_ENDIAN
 705           // Values stored in objects are packed.
 706           value_left_justified = true;
 707 #endif
 708         }
 709         break;
 710       default:
 711         ShouldNotReachHere();
 712       }
 713 
 714       // This check is required on _BIG_ENDIAN
 715       Register G5_vminfo = G5_index;
 716       __ ldsw(G3_amh_conversion, G5_vminfo);
 717       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 718 
 719       // Original 32-bit vmdata word must be of this form:
 720       // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
 721       __ lduw(value, O1_scratch);
 722       if (!value_left_justified)
 723         __ sll(O1_scratch, G5_vminfo, O1_scratch);
 724       Label zero_extend, done;
 725       __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
 726       __ br(Assembler::zero, false, Assembler::pn, zero_extend);
 727       __ delayed()->nop();
 728 
 729       // this path is taken for int->byte, int->short
 730       __ sra(O1_scratch, G5_vminfo, O1_scratch);
 731       __ ba(false, done);
 732       __ delayed()->nop();
 733 
 734       __ bind(zero_extend);
 735       // this is taken for int->char
 736       __ srl(O1_scratch, G5_vminfo, O1_scratch);
 737 
 738       __ bind(done);
 739       __ st(O1_scratch, vmarg);
 740 
 741       // Get the new MH:
 742       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 743       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 744     }
 745     break;
 746 
 747   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
 748   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
 749     {
 750       // Perform an in-place int-to-long or ref-to-long conversion.
 751       __ ldsw(G3_amh_vmargslot, O0_argslot);
 752 
 753       // On big-endian machine we duplicate the slot and store the MSW
 754       // in the first slot.
 755       __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
 756 
 757       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
 758 
 759       Address arg_lsw(O0_argslot, 0);
 760       Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
 761 
 762       switch (ek) {
 763       case _adapter_opt_i2l:
 764         {
 765 #ifdef _LP64
 766           __ ldsw(arg_lsw, O2_scratch);                 // Load LSW sign-extended
 767 #else
 768           __ ldsw(arg_lsw, O3_scratch);                 // Load LSW sign-extended
 769           __ srlx(O3_scratch, BitsPerInt, O2_scratch);  // Move MSW value to lower 32-bits for std
 770 #endif
 771           __ st_long(O2_scratch, arg_msw);              // Uses O2/O3 on !_LP64
 772         }
 773         break;
 774       case _adapter_opt_unboxl:
 775         {
 776           // Load the value up from the heap.
 777           __ ld_ptr(arg_lsw, O1_scratch);
 778           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
 779           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
 780           __ null_check(O1_scratch, value_offset);
 781           __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
 782           __ st_long(O2_scratch, arg_msw);
 783         }
 784         break;
 785       default:
 786         ShouldNotReachHere();
 787       }
 788 
 789       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 790       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 791     }
 792     break;
 793 
 794   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
 795   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
 796     {
 797       // perform an in-place floating primitive conversion
 798       __ unimplemented(entry_name(ek));
 799     }
 800     break;
 801 
 802   case _adapter_prim_to_ref:
 803     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 804     break;
 805 
 806   case _adapter_swap_args:
 807   case _adapter_rot_args:
 808     // handled completely by optimized cases
 809     __ stop("init_AdapterMethodHandle should not issue this");
 810     break;
 811 
 812   case _adapter_opt_swap_1:
 813   case _adapter_opt_swap_2:
 814   case _adapter_opt_rot_1_up:
 815   case _adapter_opt_rot_1_down:
 816   case _adapter_opt_rot_2_up:
 817   case _adapter_opt_rot_2_down:
 818     {
 819       int swap_bytes = 0, rotate = 0;
 820       get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
 821 
 822       // 'argslot' is the position of the first argument to swap.
 823       __ ldsw(G3_amh_vmargslot, O0_argslot);
 824       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 825 
 826       // 'vminfo' is the second.
 827       Register O1_destslot = O1_scratch;
 828       __ ldsw(G3_amh_conversion, O1_destslot);
 829       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 830       __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
 831       __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
 832 
 833       if (!rotate) {
 834         for (int i = 0; i < swap_bytes; i += wordSize) {
 835           __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
 836           __ ld_ptr(Address(O1_destslot, i), O3_scratch);
 837           __ st_ptr(O3_scratch, Address(O0_argslot,  i));
 838           __ st_ptr(O2_scratch, Address(O1_destslot, i));
 839         }
 840       } else {
 841         // Save the first chunk, which is going to get overwritten.
 842         switch (swap_bytes) {
 843         case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
 844         case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
 845         case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
 846         default: ShouldNotReachHere();
 847         }
 848 
 849         if (rotate > 0) {
 850           // Rorate upward.
 851           __ sub(O0_argslot, swap_bytes, O0_argslot);
 852 #if ASSERT
 853           {
 854             // Verify that argslot > destslot, by at least swap_bytes.
 855             Label L_ok;
 856             __ cmp(O0_argslot, O1_destslot);
 857             __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
 858             __ delayed()->nop();
 859             __ stop("source must be above destination (upward rotation)");
 860             __ bind(L_ok);
 861           }
 862 #endif
 863           // Work argslot down to destslot, copying contiguous data upwards.
 864           // Pseudo-code:
 865           //   argslot  = src_addr - swap_bytes
 866           //   destslot = dest_addr
 867           //   while (argslot >= destslot) {
 868           //     *(argslot + swap_bytes) = *(argslot + 0);
 869           //     argslot--;
 870           //   }
 871           Label loop;
 872           __ bind(loop);
 873           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 874           __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
 875           __ sub(O0_argslot, wordSize, O0_argslot);
 876           __ cmp(O0_argslot, O1_destslot);
 877           __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
 878           __ delayed()->nop();  // FILLME
 879         } else {
 880           __ add(O0_argslot, swap_bytes, O0_argslot);
 881 #if ASSERT
 882           {
 883             // Verify that argslot < destslot, by at least swap_bytes.
 884             Label L_ok;
 885             __ cmp(O0_argslot, O1_destslot);
 886             __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 887             __ delayed()->nop();
 888             __ stop("source must be above destination (upward rotation)");
 889             __ bind(L_ok);
 890           }
 891 #endif
 892           // Work argslot up to destslot, copying contiguous data downwards.
 893           // Pseudo-code:
 894           //   argslot  = src_addr + swap_bytes
 895           //   destslot = dest_addr
 896           //   while (argslot >= destslot) {
 897           //     *(argslot - swap_bytes) = *(argslot + 0);
 898           //     argslot++;
 899           //   }
 900           Label loop;
 901           __ bind(loop);
 902           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 903           __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
 904           __ add(O0_argslot, wordSize, O0_argslot);
 905           __ cmp(O0_argslot, O1_destslot);
 906           __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
 907           __ delayed()->nop();  // FILLME
 908         }
 909 
 910         // Store the original first chunk into the destination slot, now free.
 911         switch (swap_bytes) {
 912         case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
 913         case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
 914         case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
 915         default: ShouldNotReachHere();
 916         }
 917       }
 918 
 919       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 920       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 921     }
 922     break;
 923 
 924   case _adapter_dup_args:
 925     {
 926       // 'argslot' is the position of the first argument to duplicate.
 927       __ ldsw(G3_amh_vmargslot, O0_argslot);
 928       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 929 
 930       // 'stack_move' is negative number of words to duplicate.
 931       Register G5_stack_move = G5_index;
 932       __ ldsw(G3_amh_conversion, G5_stack_move);
 933       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 934 
 935       // Remember the old Gargs (argslot[0]).
 936       Register O1_oldarg = O1_scratch;
 937       __ mov(Gargs, O1_oldarg);
 938 
 939       // Move Gargs down to make room for dups.
 940       __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
 941       __ add(Gargs, G5_stack_move, Gargs);
 942 
 943       // Compute the new Gargs (argslot[0]).
 944       Register O2_newarg = O2_scratch;
 945       __ mov(Gargs, O2_newarg);
 946 
 947       // Copy from oldarg[0...] down to newarg[0...]
 948       // Pseude-code:
 949       //   O1_oldarg  = old-Gargs
 950       //   O2_newarg  = new-Gargs
 951       //   O0_argslot = argslot
 952       //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
 953       Label loop;
 954       __ bind(loop);
 955       __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
 956       __ st_ptr(O3_scratch, Address(O2_newarg, 0));
 957       __ add(O0_argslot, wordSize, O0_argslot);
 958       __ add(O2_newarg,  wordSize, O2_newarg);
 959       __ cmp(O2_newarg, O1_oldarg);
 960       __ brx(Assembler::less, false, Assembler::pt, loop);
 961       __ delayed()->nop();  // FILLME
 962 
 963       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 964       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 965     }
 966     break;
 967 
 968   case _adapter_drop_args:
 969     {
 970       // 'argslot' is the position of the first argument to nuke.
 971       __ ldsw(G3_amh_vmargslot, O0_argslot);
 972       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 973 
 974       // 'stack_move' is number of words to drop.
 975       Register G5_stack_move = G5_index;
 976       __ ldsw(G3_amh_conversion, G5_stack_move);
 977       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 978 
 979       remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 980 
 981       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 982       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 983     }
 984     break;
 985 
 986   case _adapter_collect_args:
 987     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 988     break;
 989 
 990   case _adapter_spread_args:
 991     // Handled completely by optimized cases.
 992     __ stop("init_AdapterMethodHandle should not issue this");
 993     break;
 994 
 995   case _adapter_opt_spread_0:
 996   case _adapter_opt_spread_1:
 997   case _adapter_opt_spread_more:
 998     {
 999       // spread an array out into a group of arguments
1000       __ unimplemented(entry_name(ek));
1001     }
1002     break;
1003 
1004   case _adapter_flyby:
1005   case _adapter_ricochet:
1006     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1007     break;
1008 
1009   default:
1010     ShouldNotReachHere();
1011   }
1012 
1013   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1014   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1015 
1016   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
1017 }