1 /*
   2  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_methodHandles_sparc.cpp.incl"
  27 
  28 #define __ _masm->
  29 
  30 #ifdef PRODUCT
  31 #define BLOCK_COMMENT(str) /* nothing */
  32 #else
  33 #define BLOCK_COMMENT(str) __ block_comment(str)
  34 #endif
  35 
  36 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  37 
  38 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
  39                                                 address interpreted_entry) {
  40   // Just before the actual machine code entry point, allocate space
  41   // for a MethodHandleEntry::Data record, so that we can manage everything
  42   // from one base pointer.
  43   __ align(wordSize);
  44   address target = __ pc() + sizeof(Data);
  45   while (__ pc() < target) {
  46     __ nop();
  47     __ align(wordSize);
  48   }
  49 
  50   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
  51   me->set_end_address(__ pc());         // set a temporary end_address
  52   me->set_from_interpreted_entry(interpreted_entry);
  53   me->set_type_checking_entry(NULL);
  54 
  55   return (address) me;
  56 }
  57 
  58 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
  59                                                 address start_addr) {
  60   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
  61   assert(me->end_address() == start_addr, "valid ME");
  62 
  63   // Fill in the real end_address:
  64   __ align(wordSize);
  65   me->set_end_address(__ pc());
  66 
  67   return me;
  68 }
  69 
  70 
  71 // Code generation
  72 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
  73   // I5_savedSP: sender SP (must preserve)
  74   // G4 (Gargs): incoming argument list (must preserve)
  75   // G5_method:  invoke methodOop; becomes method type.
  76   // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
  77   // O0, O1: garbage temps, blown away
  78   Register O0_argslot = O0;
  79   Register O1_scratch = O1;
  80 
  81   // emit WrongMethodType path first, to enable back-branch from main path
  82   Label wrong_method_type;
  83   __ bind(wrong_method_type);
  84   __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
  85   __ delayed()->nop();
  86 
  87   // here's where control starts out:
  88   __ align(CodeEntryAlignment);
  89   address entry_point = __ pc();
  90 
  91   // fetch the MethodType from the method handle into G5_method_type
  92   {
  93     Register tem = G5_method;
  94     assert(tem == G5_method_type, "yes, it's the same register");
  95     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
  96       __ ld_ptr(Address(tem, *pchase), G5_method_type);
  97     }
  98   }
  99 
 100   // given the MethodType, find out where the MH argument is buried
 101   __ load_heap_oop(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
 102   __ ldsw(         Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
 103   __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
 104 
 105   __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
 106   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 107 
 108   return entry_point;
 109 }
 110 
 111 
 112 #ifdef ASSERT
 113 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
 114   // Verify that argslot lies within (Gargs, FP].
 115   Label L_ok, L_bad;
 116   BLOCK_COMMENT("{ verify_argslot");
 117 #ifdef _LP64
 118   __ add(FP, STACK_BIAS, temp_reg);
 119   __ cmp(argslot_reg, temp_reg);
 120 #else
 121   __ cmp(argslot_reg, FP);
 122 #endif
 123   __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
 124   __ delayed()->nop();
 125   __ cmp(Gargs, argslot_reg);
 126   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 127   __ delayed()->nop();
 128   __ bind(L_bad);
 129   __ stop(error_message);
 130   __ bind(L_ok);
 131   BLOCK_COMMENT("} verify_argslot");
 132 }
 133 #endif
 134 
 135 
 136 // Helper to insert argument slots into the stack.
 137 // arg_slots must be a multiple of stack_move_unit() and <= 0
 138 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
 139                                      RegisterOrConstant arg_slots,
 140                                      int arg_mask,
 141                                      Register argslot_reg,
 142                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 143   assert(temp3_reg != noreg, "temp3 required");
 144   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 145                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 146 
 147 #ifdef ASSERT
 148   verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
 149   if (arg_slots.is_register()) {
 150     Label L_ok, L_bad;
 151     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 152     __ br(Assembler::greater, false, Assembler::pn, L_bad);
 153     __ delayed()->nop();
 154     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 155     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 156     __ delayed()->nop();
 157     __ bind(L_bad);
 158     __ stop("assert arg_slots <= 0 and clear low bits");
 159     __ bind(L_ok);
 160   } else {
 161     assert(arg_slots.as_constant() <= 0, "");
 162     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 163   }
 164 #endif // ASSERT
 165 
 166 #ifdef _LP64
 167   if (arg_slots.is_register()) {
 168     // Was arg_slots register loaded as signed int?
 169     Label L_ok;
 170     __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
 171     __ sra(temp_reg, BitsPerInt, temp_reg);
 172     __ cmp(arg_slots.as_register(), temp_reg);
 173     __ br(Assembler::equal, false, Assembler::pt, L_ok);
 174     __ delayed()->nop();
 175     __ stop("arg_slots register not loaded as signed int");
 176     __ bind(L_ok);
 177   }
 178 #endif
 179 
 180   // Make space on the stack for the inserted argument(s).
 181   // Then pull down everything shallower than argslot_reg.
 182   // The stacked return address gets pulled down with everything else.
 183   // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
 184   //   sp -= size;
 185   //   for (temp = sp + size; temp < argslot; temp++)
 186   //     temp[-size] = temp[0]
 187   //   argslot -= size;
 188   BLOCK_COMMENT("insert_arg_slots {");
 189   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 190 
 191   // Keep the stack pointer 2*wordSize aligned.
 192   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 193   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
 194   __ add(SP, masked_offset, SP);
 195 
 196   __ mov(Gargs, temp_reg);  // source pointer for copy
 197   __ add(Gargs, offset, Gargs);
 198 
 199   {
 200     Label loop;
 201     __ BIND(loop);
 202     // pull one word down each time through the loop
 203     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 204     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 205     __ add(temp_reg, wordSize, temp_reg);
 206     __ cmp(temp_reg, argslot_reg);
 207     __ brx(Assembler::less, false, Assembler::pt, loop);
 208     __ delayed()->nop();  // FILLME
 209   }
 210 
 211   // Now move the argslot down, to point to the opened-up space.
 212   __ add(argslot_reg, offset, argslot_reg);
 213   BLOCK_COMMENT("} insert_arg_slots");
 214 }
 215 
 216 
 217 // Helper to remove argument slots from the stack.
 218 // arg_slots must be a multiple of stack_move_unit() and >= 0
 219 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
 220                                      RegisterOrConstant arg_slots,
 221                                      Register argslot_reg,
 222                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 223   assert(temp3_reg != noreg, "temp3 required");
 224   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 225                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 226 
 227   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 228 
 229 #ifdef ASSERT
 230   // Verify that [argslot..argslot+size) lies within (Gargs, FP).
 231   __ add(argslot_reg, offset, temp2_reg);
 232   verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
 233   if (arg_slots.is_register()) {
 234     Label L_ok, L_bad;
 235     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 236     __ br(Assembler::less, false, Assembler::pn, L_bad);
 237     __ delayed()->nop();
 238     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 239     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 240     __ delayed()->nop();
 241     __ bind(L_bad);
 242     __ stop("assert arg_slots >= 0 and clear low bits");
 243     __ bind(L_ok);
 244   } else {
 245     assert(arg_slots.as_constant() >= 0, "");
 246     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 247   }
 248 #endif // ASSERT
 249 
 250   BLOCK_COMMENT("remove_arg_slots {");
 251   // Pull up everything shallower than argslot.
 252   // Then remove the excess space on the stack.
 253   // The stacked return address gets pulled up with everything else.
 254   // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
 255   //   for (temp = argslot-1; temp >= sp; --temp)
 256   //     temp[size] = temp[0]
 257   //   argslot += size;
 258   //   sp += size;
 259   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
 260   {
 261     Label loop;
 262     __ BIND(loop);
 263     // pull one word up each time through the loop
 264     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 265     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 266     __ sub(temp_reg, wordSize, temp_reg);
 267     __ cmp(temp_reg, Gargs);
 268     __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
 269     __ delayed()->nop();  // FILLME
 270   }
 271 
 272   // Now move the argslot up, to point to the just-copied block.
 273   __ add(Gargs, offset, Gargs);
 274   // And adjust the argslot address to point at the deletion point.
 275   __ add(argslot_reg, offset, argslot_reg);
 276 
 277   // Keep the stack pointer 2*wordSize aligned.
 278   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 279   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
 280   __ add(SP, masked_offset, SP);
 281   BLOCK_COMMENT("} remove_arg_slots");
 282 }
 283 
 284 
 285 #ifndef PRODUCT
 286 extern "C" void print_method_handle(oop mh);
 287 void trace_method_handle_stub(const char* adaptername,
 288                               oopDesc* mh) {
 289   printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
 290   print_method_handle(mh);
 291 }
 292 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
 293   if (!TraceMethodHandles)  return;
 294   BLOCK_COMMENT("trace_method_handle {");
 295   // save: Gargs, O5_savedSP
 296   __ save_frame(16);
 297   __ set((intptr_t) adaptername, O0);
 298   __ mov(G3_method_handle, O1);
 299   __ mov(G3_method_handle, L3);
 300   __ mov(Gargs, L4);
 301   __ mov(G5_method_type, L5);
 302   __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
 303 
 304   __ mov(L3, G3_method_handle);
 305   __ mov(L4, Gargs);
 306   __ mov(L5, G5_method_type);
 307   __ restore();
 308   BLOCK_COMMENT("} trace_method_handle");
 309 }
 310 #endif // PRODUCT
 311 
 312 // which conversion op types are implemented here?
 313 int MethodHandles::adapter_conversion_ops_supported_mask() {
 314   return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
 315          |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
 316          |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
 317          |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
 318          |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
 319          |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
 320          |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
 321          |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
 322          |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
 323          //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
 324          );
 325   // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 326 }
 327 
 328 //------------------------------------------------------------------------------
 329 // MethodHandles::generate_method_handle_stub
 330 //
 331 // Generate an "entry" field for a method handle.
 332 // This determines how the method handle will respond to calls.
 333 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
 334   // Here is the register state during an interpreted call,
 335   // as set up by generate_method_handle_interpreter_entry():
 336   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
 337   // - G3: receiver method handle
 338   // - O5_savedSP: sender SP (must preserve)
 339 
 340   Register O0_argslot = O0;
 341   Register O1_scratch = O1;
 342   Register O2_scratch = O2;
 343   Register O3_scratch = O3;
 344   Register G5_index   = G5;
 345 
 346   guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 347 
 348   // Some handy addresses:
 349   Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
 350 
 351   Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
 352 
 353   Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
 354 
 355   Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
 356   Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
 357 
 358   Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
 359   Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
 360   Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
 361 
 362   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 363 
 364   if (have_entry(ek)) {
 365     __ nop();  // empty stubs make SG sick
 366     return;
 367   }
 368 
 369   address interp_entry = __ pc();
 370 
 371   trace_method_handle(_masm, entry_name(ek));
 372 
 373   switch ((int) ek) {
 374   case _raise_exception:
 375     {
 376       // Not a real MH entry, but rather shared code for raising an
 377       // exception.  Extra local arguments are passed in scratch
 378       // registers, as required type in O3, failing object (or NULL)
 379       // in O2, failing bytecode type in O1.
 380 
 381       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 382 
 383       // Push arguments as if coming from the interpreter.
 384       Register O0_scratch = O0_argslot;
 385       int stackElementSize = Interpreter::stackElementSize;
 386 
 387       // Make space on the stack for the arguments and set Gargs
 388       // correctly.
 389       __ sub(SP, 4*stackElementSize, SP);  // Keep stack aligned.
 390       __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
 391 
 392       // void raiseException(int code, Object actual, Object required)
 393       __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
 394       __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
 395       __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
 396 
 397       Label no_method;
 398       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
 399       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
 400       __ ld_ptr(Address(G5_method, 0), G5_method);
 401       __ tst(G5_method);
 402       __ brx(Assembler::zero, false, Assembler::pn, no_method);
 403       __ delayed()->nop();
 404 
 405       int jobject_oop_offset = 0;
 406       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
 407       __ tst(G5_method);
 408       __ brx(Assembler::zero, false, Assembler::pn, no_method);
 409       __ delayed()->nop();
 410 
 411       __ verify_oop(G5_method);
 412       __ jump_indirect_to(G5_method_fie, O1_scratch);
 413       __ delayed()->nop();
 414 
 415       // If we get here, the Java runtime did not do its job of creating the exception.
 416       // Do something that is at least causes a valid throw from the interpreter.
 417       __ bind(no_method);
 418       __ unimplemented("_raise_exception no method");
 419     }
 420     break;
 421 
 422   case _invokestatic_mh:
 423   case _invokespecial_mh:
 424     {
 425       __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
 426       __ verify_oop(G5_method);
 427       // Same as TemplateTable::invokestatic or invokespecial,
 428       // minus the CP setup and profiling:
 429       if (ek == _invokespecial_mh) {
 430         // Must load & check the first argument before entering the target method.
 431         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 432         __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
 433         __ null_check(G3_method_handle);
 434         __ verify_oop(G3_method_handle);
 435       }
 436       __ jump_indirect_to(G5_method_fie, O1_scratch);
 437       __ delayed()->nop();
 438     }
 439     break;
 440 
 441   case _invokevirtual_mh:
 442     {
 443       // Same as TemplateTable::invokevirtual,
 444       // minus the CP setup and profiling:
 445 
 446       // Pick out the vtable index and receiver offset from the MH,
 447       // and then we can discard it:
 448       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 449       __ ldsw(G3_dmh_vmindex, G5_index);
 450       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
 451       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 452       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 453 
 454       // Get receiver klass:
 455       Register O0_klass = O0_argslot;
 456       __ load_klass(G3_method_handle, O0_klass);
 457       __ verify_oop(O0_klass);
 458 
 459       // Get target methodOop & entry point:
 460       const int base = instanceKlass::vtable_start_offset() * wordSize;
 461       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 462 
 463       __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
 464       __ add(O0_klass, G5_index, O0_klass);
 465       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
 466       __ ld_ptr(vtable_entry_addr, G5_method);
 467 
 468       __ verify_oop(G5_method);
 469       __ jump_indirect_to(G5_method_fie, O1_scratch);
 470       __ delayed()->nop();
 471     }
 472     break;
 473 
 474   case _invokeinterface_mh:
 475     {
 476       // Same as TemplateTable::invokeinterface,
 477       // minus the CP setup and profiling:
 478       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 479       Register O1_intf  = O1_scratch;
 480       __ load_heap_oop(G3_mh_vmtarget, O1_intf);
 481       __ ldsw(G3_dmh_vmindex, G5_index);
 482       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 483       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 484 
 485       // Get receiver klass:
 486       Register O0_klass = O0_argslot;
 487       __ load_klass(G3_method_handle, O0_klass);
 488       __ verify_oop(O0_klass);
 489 
 490       // Get interface:
 491       Label no_such_interface;
 492       __ verify_oop(O1_intf);
 493       __ lookup_interface_method(O0_klass, O1_intf,
 494                                  // Note: next two args must be the same:
 495                                  G5_index, G5_method,
 496                                  O2_scratch,
 497                                  O3_scratch,
 498                                  no_such_interface);
 499 
 500       __ verify_oop(G5_method);
 501       __ jump_indirect_to(G5_method_fie, O1_scratch);
 502       __ delayed()->nop();
 503 
 504       __ bind(no_such_interface);
 505       // Throw an exception.
 506       // For historical reasons, it will be IncompatibleClassChangeError.
 507       __ unimplemented("not tested yet");
 508       __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
 509       __ mov(O0_klass, O2_scratch);  // bad receiver
 510       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
 511       __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
 512     }
 513     break;
 514 
 515   case _bound_ref_mh:
 516   case _bound_int_mh:
 517   case _bound_long_mh:
 518   case _bound_ref_direct_mh:
 519   case _bound_int_direct_mh:
 520   case _bound_long_direct_mh:
 521     {
 522       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
 523       BasicType arg_type  = T_ILLEGAL;
 524       int       arg_mask  = _INSERT_NO_MASK;
 525       int       arg_slots = -1;
 526       get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
 527 
 528       // Make room for the new argument:
 529       __ ldsw(G3_bmh_vmargslot, O0_argslot);
 530       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 531 
 532       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
 533 
 534       // Store bound argument into the new stack slot:
 535       __ load_heap_oop(G3_bmh_argument, O1_scratch);
 536       if (arg_type == T_OBJECT) {
 537         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
 538       } else {
 539         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
 540         __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
 541         if (arg_slots == 2) {
 542           __ unimplemented("not yet tested");
 543 #ifndef _LP64
 544           __ signx(O2_scratch, O3_scratch);  // Sign extend
 545 #endif
 546           __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
 547         } else {
 548           __ st_ptr( O2_scratch, Address(O0_argslot, 0));
 549         }
 550       }
 551 
 552       if (direct_to_method) {
 553         __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
 554         __ verify_oop(G5_method);
 555         __ jump_indirect_to(G5_method_fie, O1_scratch);
 556         __ delayed()->nop();
 557       } else {
 558         __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
 559         __ verify_oop(G3_method_handle);
 560         __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 561       }
 562     }
 563     break;
 564 
 565   case _adapter_retype_only:
 566   case _adapter_retype_raw:
 567     // Immediately jump to the next MH layer:
 568     __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 569     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 570     // This is OK when all parameter types widen.
 571     // It is also OK when a return type narrows.
 572     break;
 573 
 574   case _adapter_check_cast:
 575     {
 576       // Temps:
 577       Register G5_klass = G5_index;  // Interesting AMH data.
 578 
 579       // Check a reference argument before jumping to the next layer of MH:
 580       __ ldsw(G3_amh_vmargslot, O0_argslot);
 581       Address vmarg = __ argument_address(O0_argslot);
 582 
 583       // What class are we casting to?
 584       __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
 585       __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
 586 
 587       Label done;
 588       __ ld_ptr(vmarg, O1_scratch);
 589       __ tst(O1_scratch);
 590       __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
 591       __ delayed()->nop();
 592       __ load_klass(O1_scratch, O1_scratch);
 593 
 594       // Live at this point:
 595       // - G5_klass        :  klass required by the target method
 596       // - O1_scratch      :  argument klass to test
 597       // - G3_method_handle:  adapter method handle
 598       __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
 599 
 600       // If we get here, the type check failed!
 601       __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
 602       __ load_heap_oop(G3_amh_argument, O3_scratch);  // required class
 603       __ ld_ptr(vmarg, O2_scratch);  // bad object
 604       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
 605       __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
 606 
 607       __ bind(done);
 608       // Get the new MH:
 609       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 610       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 611     }
 612     break;
 613 
 614   case _adapter_prim_to_prim:
 615   case _adapter_ref_to_prim:
 616     // Handled completely by optimized cases.
 617     __ stop("init_AdapterMethodHandle should not issue this");
 618     break;
 619 
 620   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
 621 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
 622   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
 623   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
 624     {
 625       // Perform an in-place conversion to int or an int subword.
 626       __ ldsw(G3_amh_vmargslot, O0_argslot);
 627       Address vmarg = __ argument_address(O0_argslot);
 628       Address value;
 629       bool value_left_justified = false;
 630 
 631       switch (ek) {
 632       case _adapter_opt_i2i:
 633       case _adapter_opt_l2i:
 634         __ unimplemented(entry_name(ek));
 635         value = vmarg;
 636         break;
 637       case _adapter_opt_unboxi:
 638         {
 639           // Load the value up from the heap.
 640           __ ld_ptr(vmarg, O1_scratch);
 641           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
 642 #ifdef ASSERT
 643           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
 644             if (is_subword_type(BasicType(bt)))
 645               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
 646           }
 647 #endif
 648           __ null_check(O1_scratch, value_offset);
 649           value = Address(O1_scratch, value_offset);
 650 #ifdef _BIG_ENDIAN
 651           // Values stored in objects are packed.
 652           value_left_justified = true;
 653 #endif
 654         }
 655         break;
 656       default:
 657         ShouldNotReachHere();
 658       }
 659 
 660       // This check is required on _BIG_ENDIAN
 661       Register G5_vminfo = G5_index;
 662       __ ldsw(G3_amh_conversion, G5_vminfo);
 663       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 664 
 665       // Original 32-bit vmdata word must be of this form:
 666       // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
 667       __ lduw(value, O1_scratch);
 668       if (!value_left_justified)
 669         __ sll(O1_scratch, G5_vminfo, O1_scratch);
 670       Label zero_extend, done;
 671       __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
 672       __ br(Assembler::zero, false, Assembler::pn, zero_extend);
 673       __ delayed()->nop();
 674 
 675       // this path is taken for int->byte, int->short
 676       __ sra(O1_scratch, G5_vminfo, O1_scratch);
 677       __ ba(false, done);
 678       __ delayed()->nop();
 679 
 680       __ bind(zero_extend);
 681       // this is taken for int->char
 682       __ srl(O1_scratch, G5_vminfo, O1_scratch);
 683 
 684       __ bind(done);
 685       __ st(O1_scratch, vmarg);
 686 
 687       // Get the new MH:
 688       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 689       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 690     }
 691     break;
 692 
 693   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
 694   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
 695     {
 696       // Perform an in-place int-to-long or ref-to-long conversion.
 697       __ ldsw(G3_amh_vmargslot, O0_argslot);
 698 
 699       // On big-endian machine we duplicate the slot and store the MSW
 700       // in the first slot.
 701       __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
 702 
 703       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
 704 
 705       Address arg_lsw(O0_argslot, 0);
 706       Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
 707 
 708       switch (ek) {
 709       case _adapter_opt_i2l:
 710         {
 711           __ ldsw(arg_lsw, O2_scratch);      // Load LSW
 712 #ifndef _LP64
 713           __ signx(O2_scratch, O3_scratch);  // Sign extend
 714 #endif
 715           __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
 716         }
 717         break;
 718       case _adapter_opt_unboxl:
 719         {
 720           // Load the value up from the heap.
 721           __ ld_ptr(arg_lsw, O1_scratch);
 722           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
 723           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
 724           __ null_check(O1_scratch, value_offset);
 725           __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
 726           __ st_long(O2_scratch, arg_msw);
 727         }
 728         break;
 729       default:
 730         ShouldNotReachHere();
 731       }
 732 
 733       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 734       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 735     }
 736     break;
 737 
 738   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
 739   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
 740     {
 741       // perform an in-place floating primitive conversion
 742       __ unimplemented(entry_name(ek));
 743     }
 744     break;
 745 
 746   case _adapter_prim_to_ref:
 747     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 748     break;
 749 
 750   case _adapter_swap_args:
 751   case _adapter_rot_args:
 752     // handled completely by optimized cases
 753     __ stop("init_AdapterMethodHandle should not issue this");
 754     break;
 755 
 756   case _adapter_opt_swap_1:
 757   case _adapter_opt_swap_2:
 758   case _adapter_opt_rot_1_up:
 759   case _adapter_opt_rot_1_down:
 760   case _adapter_opt_rot_2_up:
 761   case _adapter_opt_rot_2_down:
 762     {
 763       int swap_bytes = 0, rotate = 0;
 764       get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
 765 
 766       // 'argslot' is the position of the first argument to swap.
 767       __ ldsw(G3_amh_vmargslot, O0_argslot);
 768       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 769 
 770       // 'vminfo' is the second.
 771       Register O1_destslot = O1_scratch;
 772       __ ldsw(G3_amh_conversion, O1_destslot);
 773       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 774       __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
 775       __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
 776 
 777       if (!rotate) {
 778         for (int i = 0; i < swap_bytes; i += wordSize) {
 779           __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
 780           __ ld_ptr(Address(O1_destslot, i), O3_scratch);
 781           __ st_ptr(O3_scratch, Address(O0_argslot,  i));
 782           __ st_ptr(O2_scratch, Address(O1_destslot, i));
 783         }
 784       } else {
 785         // Save the first chunk, which is going to get overwritten.
 786         switch (swap_bytes) {
 787         case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
 788         case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
 789         case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
 790         default: ShouldNotReachHere();
 791         }
 792 
 793         if (rotate > 0) {
 794           // Rorate upward.
 795           __ sub(O0_argslot, swap_bytes, O0_argslot);
 796 #if ASSERT
 797           {
 798             // Verify that argslot > destslot, by at least swap_bytes.
 799             Label L_ok;
 800             __ cmp(O0_argslot, O1_destslot);
 801             __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
 802             __ delayed()->nop();
 803             __ stop("source must be above destination (upward rotation)");
 804             __ bind(L_ok);
 805           }
 806 #endif
 807           // Work argslot down to destslot, copying contiguous data upwards.
 808           // Pseudo-code:
 809           //   argslot  = src_addr - swap_bytes
 810           //   destslot = dest_addr
 811           //   while (argslot >= destslot) {
 812           //     *(argslot + swap_bytes) = *(argslot + 0);
 813           //     argslot--;
 814           //   }
 815           Label loop;
 816           __ bind(loop);
 817           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 818           __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
 819           __ sub(O0_argslot, wordSize, O0_argslot);
 820           __ cmp(O0_argslot, O1_destslot);
 821           __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
 822           __ delayed()->nop();  // FILLME
 823         } else {
 824           __ add(O0_argslot, swap_bytes, O0_argslot);
 825 #if ASSERT
 826           {
 827             // Verify that argslot < destslot, by at least swap_bytes.
 828             Label L_ok;
 829             __ cmp(O0_argslot, O1_destslot);
 830             __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 831             __ delayed()->nop();
 832             __ stop("source must be above destination (upward rotation)");
 833             __ bind(L_ok);
 834           }
 835 #endif
 836           // Work argslot up to destslot, copying contiguous data downwards.
 837           // Pseudo-code:
 838           //   argslot  = src_addr + swap_bytes
 839           //   destslot = dest_addr
 840           //   while (argslot >= destslot) {
 841           //     *(argslot - swap_bytes) = *(argslot + 0);
 842           //     argslot++;
 843           //   }
 844           Label loop;
 845           __ bind(loop);
 846           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 847           __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
 848           __ add(O0_argslot, wordSize, O0_argslot);
 849           __ cmp(O0_argslot, O1_destslot);
 850           __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
 851           __ delayed()->nop();  // FILLME
 852         }
 853 
 854         // Store the original first chunk into the destination slot, now free.
 855         switch (swap_bytes) {
 856         case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
 857         case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
 858         case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
 859         default: ShouldNotReachHere();
 860         }
 861       }
 862 
 863       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 864       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 865     }
 866     break;
 867 
 868   case _adapter_dup_args:
 869     {
 870       // 'argslot' is the position of the first argument to duplicate.
 871       __ ldsw(G3_amh_vmargslot, O0_argslot);
 872       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 873 
 874       // 'stack_move' is negative number of words to duplicate.
 875       Register G5_stack_move = G5_index;
 876       __ ldsw(G3_amh_conversion, G5_stack_move);
 877       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 878 
 879       // Remember the old Gargs (argslot[0]).
 880       Register O1_oldarg = O1_scratch;
 881       __ mov(Gargs, O1_oldarg);
 882 
 883       // Move Gargs down to make room for dups.
 884       __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
 885       __ add(Gargs, G5_stack_move, Gargs);
 886 
 887       // Compute the new Gargs (argslot[0]).
 888       Register O2_newarg = O2_scratch;
 889       __ mov(Gargs, O2_newarg);
 890 
 891       // Copy from oldarg[0...] down to newarg[0...]
 892       // Pseude-code:
 893       //   O1_oldarg  = old-Gargs
 894       //   O2_newarg  = new-Gargs
 895       //   O0_argslot = argslot
 896       //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
 897       Label loop;
 898       __ bind(loop);
 899       __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
 900       __ st_ptr(O3_scratch, Address(O2_newarg, 0));
 901       __ add(O0_argslot, wordSize, O0_argslot);
 902       __ add(O2_newarg,  wordSize, O2_newarg);
 903       __ cmp(O2_newarg, O1_oldarg);
 904       __ brx(Assembler::less, false, Assembler::pt, loop);
 905       __ delayed()->nop();  // FILLME
 906 
 907       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 908       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 909     }
 910     break;
 911 
 912   case _adapter_drop_args:
 913     {
 914       // 'argslot' is the position of the first argument to nuke.
 915       __ ldsw(G3_amh_vmargslot, O0_argslot);
 916       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 917 
 918       // 'stack_move' is number of words to drop.
 919       Register G5_stack_move = G5_index;
 920       __ ldsw(G3_amh_conversion, G5_stack_move);
 921       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 922 
 923       remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 924 
 925       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
 926       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 927     }
 928     break;
 929 
 930   case _adapter_collect_args:
 931     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 932     break;
 933 
 934   case _adapter_spread_args:
 935     // Handled completely by optimized cases.
 936     __ stop("init_AdapterMethodHandle should not issue this");
 937     break;
 938 
 939   case _adapter_opt_spread_0:
 940   case _adapter_opt_spread_1:
 941   case _adapter_opt_spread_more:
 942     {
 943       // spread an array out into a group of arguments
 944       __ unimplemented(entry_name(ek));
 945     }
 946     break;
 947 
 948   case _adapter_flyby:
 949   case _adapter_ricochet:
 950     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 951     break;
 952 
 953   default:
 954     ShouldNotReachHere();
 955   }
 956 
 957   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
 958   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 959 
 960   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
 961 }