1 /*
   2  * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_methodHandles_sparc.cpp.incl"
  27 
  28 #define __ _masm->
  29 
  30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
  31                                                 address interpreted_entry) {
  32   // Just before the actual machine code entry point, allocate space
  33   // for a MethodHandleEntry::Data record, so that we can manage everything
  34   // from one base pointer.
  35   __ align(wordSize);
  36   address target = __ pc() + sizeof(Data);
  37   while (__ pc() < target) {
  38     __ nop();
  39     __ align(wordSize);
  40   }
  41 
  42   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
  43   me->set_end_address(__ pc());         // set a temporary end_address
  44   me->set_from_interpreted_entry(interpreted_entry);
  45   me->set_type_checking_entry(NULL);
  46 
  47   return (address) me;
  48 }
  49 
  50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
  51                                                 address start_addr) {
  52   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
  53   assert(me->end_address() == start_addr, "valid ME");
  54 
  55   // Fill in the real end_address:
  56   __ align(wordSize);
  57   me->set_end_address(__ pc());
  58 
  59   return me;
  60 }
  61 
  62 
  63 // Code generation
  64 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
  65   // I5_savedSP: sender SP (must preserve)
  66   // G4 (Gargs): incoming argument list (must preserve)
  67   // G5_method:  invoke methodOop; becomes method type.
  68   // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
  69   // O0, O1: garbage temps, blown away
  70   Register O0_argslot = O0;
  71   Register O1_scratch = O1;
  72 
  73   // emit WrongMethodType path first, to enable back-branch from main path
  74   Label wrong_method_type;
  75   __ bind(wrong_method_type);
  76   __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
  77   __ delayed()->nop();
  78 
  79   // here's where control starts out:
  80   __ align(CodeEntryAlignment);
  81   address entry_point = __ pc();
  82 
  83   // fetch the MethodType from the method handle into G5_method_type
  84   {
  85     Register tem = G5_method;
  86     assert(tem == G5_method_type, "yes, it's the same register");
  87     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
  88       __ ld_ptr(Address(tem, *pchase), G5_method_type);
  89     }
  90   }
  91 
  92   // given the MethodType, find out where the MH argument is buried
  93   __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
  94   __ ldsw(  Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
  95   __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
  96 
  97   __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
  98   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  99 
 100   return entry_point;
 101 }
 102 
 103 
 104 #ifdef ASSERT
 105 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
 106   // Verify that argslot lies within (Gargs, FP].
 107   Label L_ok, L_bad;
 108 #ifdef _LP64
 109   __ add(FP, STACK_BIAS, temp_reg);
 110   __ cmp(argslot_reg, temp_reg);
 111 #else
 112   __ cmp(argslot_reg, FP);
 113 #endif
 114   __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
 115   __ delayed()->nop();
 116   __ cmp(Gargs, argslot_reg);
 117   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 118   __ delayed()->nop();
 119   __ bind(L_bad);
 120   __ stop(error_message);
 121   __ bind(L_ok);
 122 }
 123 #endif
 124 
 125 
 126 // Helper to insert argument slots into the stack.
 127 // arg_slots must be a multiple of stack_move_unit() and <= 0
 128 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
 129                                      RegisterOrConstant arg_slots,
 130                                      int arg_mask,
 131                                      Register argslot_reg,
 132                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 133   assert(temp3_reg != noreg, "temp3 required");
 134   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 135                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 136 
 137 #ifdef ASSERT
 138   verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
 139   if (arg_slots.is_register()) {
 140     Label L_ok, L_bad;
 141     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 142     __ br(Assembler::greater, false, Assembler::pn, L_bad);
 143     __ delayed()->nop();
 144     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 145     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 146     __ delayed()->nop();
 147     __ bind(L_bad);
 148     __ stop("assert arg_slots <= 0 and clear low bits");
 149     __ bind(L_ok);
 150   } else {
 151     assert(arg_slots.as_constant() <= 0, "");
 152     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 153   }
 154 #endif // ASSERT
 155 
 156 #ifdef _LP64
 157   if (arg_slots.is_register()) {
 158     // Was arg_slots register loaded as signed int?
 159     Label L_ok;
 160     __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
 161     __ sra(temp_reg, BitsPerInt, temp_reg);
 162     __ cmp(arg_slots.as_register(), temp_reg);
 163     __ br(Assembler::equal, false, Assembler::pt, L_ok);
 164     __ delayed()->nop();
 165     __ stop("arg_slots register not loaded as signed int");
 166     __ bind(L_ok);
 167   }
 168 #endif
 169 
 170   // Make space on the stack for the inserted argument(s).
 171   // Then pull down everything shallower than argslot_reg.
 172   // The stacked return address gets pulled down with everything else.
 173   // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
 174   //   sp -= size;
 175   //   for (temp = sp + size; temp < argslot; temp++)
 176   //     temp[-size] = temp[0]
 177   //   argslot -= size;
 178   RegisterOrConstant offset = temp3_reg;
 179   __ regcon_sll_ptr(arg_slots, LogBytesPerWord, offset);
 180 
 181   // Keep the stack pointer 2*wordSize aligned.
 182   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 183   RegisterOrConstant masked_offset = temp_reg;
 184   __ regcon_andn_ptr(offset, TwoWordAlignmentMask, masked_offset);
 185   __ add(SP, masked_offset, SP);
 186 
 187   __ mov(Gargs, temp_reg);  // source pointer for copy
 188   __ add(Gargs, offset, Gargs);
 189 
 190   {
 191     Label loop;
 192     __ bind(loop);
 193     // pull one word down each time through the loop
 194     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 195     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 196     __ add(temp_reg, wordSize, temp_reg);
 197     __ cmp(temp_reg, argslot_reg);
 198     __ brx(Assembler::less, false, Assembler::pt, loop);
 199     __ delayed()->nop();  // FILLME
 200   }
 201 
 202   // Now move the argslot down, to point to the opened-up space.
 203   __ add(argslot_reg, offset, argslot_reg);
 204 }
 205 
 206 
 207 // Helper to remove argument slots from the stack.
 208 // arg_slots must be a multiple of stack_move_unit() and >= 0
 209 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
 210                                      RegisterOrConstant arg_slots,
 211                                      Register argslot_reg,
 212                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
 213   assert(temp3_reg != noreg, "temp3 required");
 214   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
 215                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 216 
 217   RegisterOrConstant offset = temp3_reg;
 218   __ regcon_sll_ptr(arg_slots, LogBytesPerWord, offset);
 219 
 220 #ifdef ASSERT
 221   // Verify that [argslot..argslot+size) lies within (Gargs, FP).
 222   __ add(argslot_reg, offset, temp2_reg);
 223   verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
 224   if (arg_slots.is_register()) {
 225     Label L_ok, L_bad;
 226     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
 227     __ br(Assembler::less, false, Assembler::pn, L_bad);
 228     __ delayed()->nop();
 229     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
 230     __ br(Assembler::zero, false, Assembler::pt, L_ok);
 231     __ delayed()->nop();
 232     __ bind(L_bad);
 233     __ stop("assert arg_slots >= 0 and clear low bits");
 234     __ bind(L_ok);
 235   } else {
 236     assert(arg_slots.as_constant() >= 0, "");
 237     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
 238   }
 239 #endif // ASSERT
 240 
 241   // Pull up everything shallower than argslot.
 242   // Then remove the excess space on the stack.
 243   // The stacked return address gets pulled up with everything else.
 244   // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
 245   //   for (temp = argslot-1; temp >= sp; --temp)
 246   //     temp[size] = temp[0]
 247   //   argslot += size;
 248   //   sp += size;
 249   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
 250   {
 251     Label loop;
 252     __ bind(loop);
 253     // pull one word up each time through the loop
 254     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
 255     __ st_ptr(temp2_reg, Address(temp_reg, offset));
 256     __ sub(temp_reg, wordSize, temp_reg);
 257     __ cmp(temp_reg, Gargs);
 258     __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
 259     __ delayed()->nop();  // FILLME
 260   }
 261 
 262   // Now move the argslot up, to point to the just-copied block.
 263   __ add(Gargs, offset, Gargs);
 264   // And adjust the argslot address to point at the deletion point.
 265   __ add(argslot_reg, offset, argslot_reg);
 266 
 267   // Keep the stack pointer 2*wordSize aligned.
 268   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
 269   RegisterOrConstant masked_offset = temp_reg;
 270   __ regcon_andn_ptr(offset, TwoWordAlignmentMask, masked_offset);
 271   __ add(SP, masked_offset, SP);
 272 }
 273 
 274 
 275 #ifndef PRODUCT
 276 extern "C" void print_method_handle(oop mh);
 277 void trace_method_handle_stub(const char* adaptername,
 278                               oop mh) {
 279 #if 0
 280                               intptr_t* entry_sp,
 281                               intptr_t* saved_sp,
 282                               intptr_t* saved_bp) {
 283   // called as a leaf from native code: do not block the JVM!
 284   intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
 285   intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
 286   printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
 287          adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
 288   if (last_sp != saved_sp)
 289     printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
 290 #endif
 291 
 292   printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
 293   print_method_handle(mh);
 294 }
 295 #endif // PRODUCT
 296 
 297 
 298 //------------------------------------------------------------------------------
 299 // MethodHandles::generate_method_handle_stub
 300 //
 301 // Generate an "entry" field for a method handle.
 302 // This determines how the method handle will respond to calls.
 303 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
 304   // Here is the register state during an interpreted call,
 305   // as set up by generate_method_handle_interpreter_entry():
 306   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
 307   // - G3: receiver method handle
 308   // - O5_savedSP: sender SP (must preserve)
 309 
 310   Register O0_argslot = O0;
 311   Register O1_scratch = O1;
 312   Register O2_scratch = O2;
 313   Register O3_scratch = O3;
 314   Register G5_index   = G5;
 315 
 316   guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 317 
 318   // Some handy addresses:
 319   Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
 320 
 321   Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
 322 
 323   Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
 324 
 325   Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
 326   Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
 327 
 328   Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
 329   Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
 330   Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
 331 
 332   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 333 
 334   if (have_entry(ek)) {
 335     __ nop();  // empty stubs make SG sick
 336     return;
 337   }
 338 
 339   address interp_entry = __ pc();
 340   if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
 341 
 342 #ifndef PRODUCT
 343   if (TraceMethodHandles) {
 344     // save: Gargs, O5_savedSP
 345     __ save(SP, -16*wordSize, SP);
 346     __ set((intptr_t) entry_name(ek), O0);
 347     __ mov(G3_method_handle, O1);
 348     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
 349     __ restore(SP, 16*wordSize, SP);
 350   }
 351 #endif // PRODUCT
 352 
 353   switch ((int) ek) {
 354   case _raise_exception:
 355     {
 356       // Not a real MH entry, but rather shared code for raising an
 357       // exception.  Extra local arguments are passed in scratch
 358       // registers, as required type in O3, failing object (or NULL)
 359       // in O2, failing bytecode type in O1.
 360 
 361       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 362 
 363       // Push arguments as if coming from the interpreter.
 364       Register O0_scratch = O0_argslot;
 365       int stackElementSize = Interpreter::stackElementSize();
 366 
 367       // Make space on the stack for the arguments.
 368       __ sub(SP,    4*stackElementSize, SP);
 369       __ sub(Gargs, 3*stackElementSize, Gargs);
 370       //__ sub(Lesp,  3*stackElementSize, Lesp);
 371 
 372       // void raiseException(int code, Object actual, Object required)
 373       __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
 374       __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
 375       __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
 376 
 377       Label no_method;
 378       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
 379       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
 380       __ ld_ptr(Address(G5_method, 0), G5_method);
 381       __ tst(G5_method);
 382       __ brx(Assembler::zero, false, Assembler::pn, no_method);
 383       __ delayed()->nop();
 384 
 385       int jobject_oop_offset = 0;
 386       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
 387       __ tst(G5_method);
 388       __ brx(Assembler::zero, false, Assembler::pn, no_method);
 389       __ delayed()->nop();
 390 
 391       __ verify_oop(G5_method);
 392       __ jump_indirect_to(G5_method_fie, O1_scratch);
 393       __ delayed()->nop();
 394 
 395       // If we get here, the Java runtime did not do its job of creating the exception.
 396       // Do something that is at least causes a valid throw from the interpreter.
 397       __ bind(no_method);
 398       __ unimplemented("_raise_exception no method");
 399     }
 400     break;
 401 
 402   case _invokestatic_mh:
 403   case _invokespecial_mh:
 404     {
 405       __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
 406       __ verify_oop(G5_method);
 407       // Same as TemplateTable::invokestatic or invokespecial,
 408       // minus the CP setup and profiling:
 409       if (ek == _invokespecial_mh) {
 410         // Must load & check the first argument before entering the target method.
 411         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 412         __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
 413         __ null_check(G3_method_handle);
 414         __ verify_oop(G3_method_handle);
 415       }
 416       __ jump_indirect_to(G5_method_fie, O1_scratch);
 417       __ delayed()->nop();
 418     }
 419     break;
 420 
 421   case _invokevirtual_mh:
 422     {
 423       // Same as TemplateTable::invokevirtual,
 424       // minus the CP setup and profiling:
 425 
 426       // Pick out the vtable index and receiver offset from the MH,
 427       // and then we can discard it:
 428       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 429       __ ldsw(G3_dmh_vmindex, G5_index);
 430       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
 431       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 432       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 433 
 434       // Get receiver klass:
 435       Register O0_klass = O0_argslot;
 436       __ load_klass(G3_method_handle, O0_klass);
 437       __ verify_oop(O0_klass);
 438 
 439       // Get target methodOop & entry point:
 440       const int base = instanceKlass::vtable_start_offset() * wordSize;
 441       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 442 
 443       __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
 444       __ add(O0_klass, G5_index, O0_klass);
 445       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
 446       __ ld_ptr(vtable_entry_addr, G5_method);
 447 
 448       __ verify_oop(G5_method);
 449       __ jump_indirect_to(G5_method_fie, O1_scratch);
 450       __ delayed()->nop();
 451     }
 452     break;
 453 
 454   case _invokeinterface_mh:
 455     {
 456       // Same as TemplateTable::invokeinterface,
 457       // minus the CP setup and profiling:
 458       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
 459       Register O1_intf  = O1_scratch;
 460       __ ld_ptr(G3_mh_vmtarget, O1_intf);
 461       __ ldsw(G3_dmh_vmindex, G5_index);
 462       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
 463       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 464 
 465       // Get receiver klass:
 466       Register O0_klass = O0_argslot;
 467       __ load_klass(G3_method_handle, O0_klass);
 468       __ verify_oop(O0_klass);
 469 
 470       // Get interface:
 471       Label no_such_interface;
 472       __ verify_oop(O1_intf);
 473       __ lookup_interface_method(O0_klass, O1_intf,
 474                                  // Note: next two args must be the same:
 475                                  G5_index, G5_method,
 476                                  O2_scratch,
 477                                  O3_scratch,
 478                                  no_such_interface);
 479 
 480       __ verify_oop(G5_method);
 481       __ jump_indirect_to(G5_method_fie, O1_scratch);
 482       __ delayed()->nop();
 483 
 484       __ bind(no_such_interface);
 485       // Throw an exception.
 486       // For historical reasons, it will be IncompatibleClassChangeError.
 487       __ unimplemented("not tested yet");
 488       __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
 489       __ mov(O0_klass, O2_scratch);  // bad receiver
 490       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
 491       __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
 492     }
 493     break;
 494 
 495   case _bound_ref_mh:
 496   case _bound_int_mh:
 497   case _bound_long_mh:
 498   case _bound_ref_direct_mh:
 499   case _bound_int_direct_mh:
 500   case _bound_long_direct_mh:
 501     {
 502       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
 503       BasicType arg_type  = T_ILLEGAL;
 504       int       arg_mask  = _INSERT_NO_MASK;
 505       int       arg_slots = -1;
 506       get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
 507 
 508       // Make room for the new argument:
 509       __ ldsw(G3_bmh_vmargslot, O0_argslot);
 510       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 511 
 512       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
 513 
 514       // Store bound argument into the new stack slot:
 515       __ ld_ptr(G3_bmh_argument, O1_scratch);
 516       if (arg_type == T_OBJECT) {
 517         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
 518       } else {
 519         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
 520         __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
 521         if (arg_slots == 2) {
 522           __ unimplemented("not yet tested");
 523 #ifndef _LP64
 524           __ signx(O2_scratch, O3_scratch);  // Sign extend
 525 #endif
 526           __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
 527         } else {
 528           __ st_ptr( O2_scratch, Address(O0_argslot, 0));
 529         }
 530       }
 531 
 532       if (direct_to_method) {
 533         __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
 534         __ verify_oop(G5_method);
 535         __ jump_indirect_to(G5_method_fie, O1_scratch);
 536         __ delayed()->nop();
 537       } else {
 538         __ ld_ptr(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
 539         __ verify_oop(G3_method_handle);
 540         __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 541       }
 542     }
 543     break;
 544 
 545   case _adapter_retype_only:
 546   case _adapter_retype_raw:
 547     // Immediately jump to the next MH layer:
 548     __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 549     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 550     // This is OK when all parameter types widen.
 551     // It is also OK when a return type narrows.
 552     break;
 553 
 554   case _adapter_check_cast:
 555     {
 556       // Temps:
 557       Register G5_klass = G5_index;  // Interesting AMH data.
 558 
 559       // Check a reference argument before jumping to the next layer of MH:
 560       __ ldsw(G3_amh_vmargslot, O0_argslot);
 561       Address vmarg = __ argument_address(O0_argslot);
 562 
 563       // What class are we casting to?
 564       __ ld_ptr(G3_amh_argument, G5_klass);  // This is a Class object!
 565       __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
 566 
 567       Label done;
 568       __ ld_ptr(vmarg, O1_scratch);
 569       __ tst(O1_scratch);
 570       __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
 571       __ delayed()->nop();
 572       __ load_klass(O1_scratch, O1_scratch);
 573 
 574       // Live at this point:
 575       // - G5_klass        :  klass required by the target method
 576       // - O1_scratch      :  argument klass to test
 577       // - G3_method_handle:  adapter method handle
 578       __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
 579 
 580       // If we get here, the type check failed!
 581       __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
 582       __ ld_ptr(G3_amh_argument, O3_scratch);  // required class
 583       __ ld_ptr(vmarg, O2_scratch);  // bad object
 584       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
 585       __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
 586 
 587       __ bind(done);
 588       // Get the new MH:
 589       __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 590       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 591     }
 592     break;
 593 
 594   case _adapter_prim_to_prim:
 595   case _adapter_ref_to_prim:
 596     // Handled completely by optimized cases.
 597     __ stop("init_AdapterMethodHandle should not issue this");
 598     break;
 599 
 600   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
 601 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
 602   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
 603   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
 604     {
 605       // Perform an in-place conversion to int or an int subword.
 606       __ ldsw(G3_amh_vmargslot, O0_argslot);
 607       Address vmarg = __ argument_address(O0_argslot);
 608       Address value;
 609       bool value_left_justified = false;
 610 
 611       switch (ek) {
 612       case _adapter_opt_i2i:
 613       case _adapter_opt_l2i:
 614         __ unimplemented(entry_name(ek));
 615         value = vmarg;
 616         break;
 617       case _adapter_opt_unboxi:
 618         {
 619           // Load the value up from the heap.
 620           __ ld_ptr(vmarg, O1_scratch);
 621           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
 622 #ifdef ASSERT
 623           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
 624             if (is_subword_type(BasicType(bt)))
 625               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
 626           }
 627 #endif
 628           __ null_check(O1_scratch, value_offset);
 629           value = Address(O1_scratch, value_offset);
 630 #ifdef _BIG_ENDIAN
 631           // Values stored in objects are packed.
 632           value_left_justified = true;
 633 #endif
 634         }
 635         break;
 636       default:
 637         ShouldNotReachHere();
 638       }
 639 
 640       // This check is required on _BIG_ENDIAN
 641       Register G5_vminfo = G5_index;
 642       __ ldsw(G3_amh_conversion, G5_vminfo);
 643       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 644 
 645       // Original 32-bit vmdata word must be of this form:
 646       // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
 647       __ lduw(value, O1_scratch);
 648       if (!value_left_justified)
 649         __ sll(O1_scratch, G5_vminfo, O1_scratch);
 650       Label zero_extend, done;
 651       __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
 652       __ br(Assembler::zero, false, Assembler::pn, zero_extend);
 653       __ delayed()->nop();
 654 
 655       // this path is taken for int->byte, int->short
 656       __ sra(O1_scratch, G5_vminfo, O1_scratch);
 657       __ ba(false, done);
 658       __ delayed()->nop();
 659 
 660       __ bind(zero_extend);
 661       // this is taken for int->char
 662       __ srl(O1_scratch, G5_vminfo, O1_scratch);
 663 
 664       __ bind(done);
 665       __ st(O1_scratch, vmarg);
 666 
 667       // Get the new MH:
 668       __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 669       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 670     }
 671     break;
 672 
 673   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
 674   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
 675     {
 676       // Perform an in-place int-to-long or ref-to-long conversion.
 677       __ ldsw(G3_amh_vmargslot, O0_argslot);
 678 
 679       // On big-endian machine we duplicate the slot and store the MSW
 680       // in the first slot.
 681       __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
 682 
 683       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
 684 
 685       Address arg_lsw(O0_argslot, 0);
 686       Address arg_msw(O0_argslot, -Interpreter::stackElementSize());
 687 
 688       switch (ek) {
 689       case _adapter_opt_i2l:
 690         {
 691           __ ldsw(arg_lsw, O2_scratch);      // Load LSW
 692 #ifndef _LP64
 693           __ signx(O2_scratch, O3_scratch);  // Sign extend
 694 #endif
 695           __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
 696         }
 697         break;
 698       case _adapter_opt_unboxl:
 699         {
 700           // Load the value up from the heap.
 701           __ ld_ptr(arg_lsw, O1_scratch);
 702           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
 703           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
 704           __ null_check(O1_scratch, value_offset);
 705           __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
 706           __ st_long(O2_scratch, arg_msw);
 707         }
 708         break;
 709       default:
 710         ShouldNotReachHere();
 711       }
 712 
 713       __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 714       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 715     }
 716     break;
 717 
 718   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
 719   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
 720     {
 721       // perform an in-place floating primitive conversion
 722       __ unimplemented(entry_name(ek));
 723     }
 724     break;
 725 
 726   case _adapter_prim_to_ref:
 727     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 728     break;
 729 
 730   case _adapter_swap_args:
 731   case _adapter_rot_args:
 732     // handled completely by optimized cases
 733     __ stop("init_AdapterMethodHandle should not issue this");
 734     break;
 735 
 736   case _adapter_opt_swap_1:
 737   case _adapter_opt_swap_2:
 738   case _adapter_opt_rot_1_up:
 739   case _adapter_opt_rot_1_down:
 740   case _adapter_opt_rot_2_up:
 741   case _adapter_opt_rot_2_down:
 742     {
 743       int swap_bytes = 0, rotate = 0;
 744       get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
 745 
 746       // 'argslot' is the position of the first argument to swap.
 747       __ ldsw(G3_amh_vmargslot, O0_argslot);
 748       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 749 
 750       // 'vminfo' is the second.
 751       Register O1_destslot = O1_scratch;
 752       __ ldsw(G3_amh_conversion, O1_destslot);
 753       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 754       __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
 755       __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
 756 
 757       if (!rotate) {
 758         for (int i = 0; i < swap_bytes; i += wordSize) {
 759           __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
 760           __ ld_ptr(Address(O1_destslot, i), O3_scratch);
 761           __ st_ptr(O3_scratch, Address(O0_argslot,  i));
 762           __ st_ptr(O2_scratch, Address(O1_destslot, i));
 763         }
 764       } else {
 765         // Save the first chunk, which is going to get overwritten.
 766         switch (swap_bytes) {
 767         case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
 768         case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
 769         case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
 770         default: ShouldNotReachHere();
 771         }
 772 
 773         if (rotate > 0) {
 774           // Rorate upward.
 775           __ sub(O0_argslot, swap_bytes, O0_argslot);
 776 #if ASSERT
 777           {
 778             // Verify that argslot > destslot, by at least swap_bytes.
 779             Label L_ok;
 780             __ cmp(O0_argslot, O1_destslot);
 781             __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
 782             __ delayed()->nop();
 783             __ stop("source must be above destination (upward rotation)");
 784             __ bind(L_ok);
 785           }
 786 #endif
 787           // Work argslot down to destslot, copying contiguous data upwards.
 788           // Pseudo-code:
 789           //   argslot  = src_addr - swap_bytes
 790           //   destslot = dest_addr
 791           //   while (argslot >= destslot) {
 792           //     *(argslot + swap_bytes) = *(argslot + 0);
 793           //     argslot--;
 794           //   }
 795           Label loop;
 796           __ bind(loop);
 797           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 798           __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
 799           __ sub(O0_argslot, wordSize, O0_argslot);
 800           __ cmp(O0_argslot, O1_destslot);
 801           __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
 802           __ delayed()->nop();  // FILLME
 803         } else {
 804           __ add(O0_argslot, swap_bytes, O0_argslot);
 805 #if ASSERT
 806           {
 807             // Verify that argslot < destslot, by at least swap_bytes.
 808             Label L_ok;
 809             __ cmp(O0_argslot, O1_destslot);
 810             __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
 811             __ delayed()->nop();
 812             __ stop("source must be above destination (upward rotation)");
 813             __ bind(L_ok);
 814           }
 815 #endif
 816           // Work argslot up to destslot, copying contiguous data downwards.
 817           // Pseudo-code:
 818           //   argslot  = src_addr + swap_bytes
 819           //   destslot = dest_addr
 820           //   while (argslot >= destslot) {
 821           //     *(argslot - swap_bytes) = *(argslot + 0);
 822           //     argslot++;
 823           //   }
 824           Label loop;
 825           __ bind(loop);
 826           __ ld_ptr(Address(O0_argslot, 0), G5_index);
 827           __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
 828           __ add(O0_argslot, wordSize, O0_argslot);
 829           __ cmp(O0_argslot, O1_destslot);
 830           __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
 831           __ delayed()->nop();  // FILLME
 832         }
 833 
 834         // Store the original first chunk into the destination slot, now free.
 835         switch (swap_bytes) {
 836         case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
 837         case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
 838         case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
 839         default: ShouldNotReachHere();
 840         }
 841       }
 842 
 843       __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 844       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 845     }
 846     break;
 847 
 848   case _adapter_dup_args:
 849     {
 850       // 'argslot' is the position of the first argument to duplicate.
 851       __ ldsw(G3_amh_vmargslot, O0_argslot);
 852       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 853 
 854       // 'stack_move' is negative number of words to duplicate.
 855       Register G5_stack_move = G5_index;
 856       __ ldsw(G3_amh_conversion, G5_stack_move);
 857       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 858 
 859       // Remember the old Gargs (argslot[0]).
 860       Register O1_oldarg = O1_scratch;
 861       __ mov(Gargs, O1_oldarg);
 862 
 863       // Move Gargs down to make room for dups.
 864       __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
 865       __ add(Gargs, G5_stack_move, Gargs);
 866 
 867       // Compute the new Gargs (argslot[0]).
 868       Register O2_newarg = O2_scratch;
 869       __ mov(Gargs, O2_newarg);
 870 
 871       // Copy from oldarg[0...] down to newarg[0...]
 872       // Pseude-code:
 873       //   O1_oldarg  = old-Gargs
 874       //   O2_newarg  = new-Gargs
 875       //   O0_argslot = argslot
 876       //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
 877       Label loop;
 878       __ bind(loop);
 879       __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
 880       __ st_ptr(O3_scratch, Address(O2_newarg, 0));
 881       __ add(O0_argslot, wordSize, O0_argslot);
 882       __ add(O2_newarg,  wordSize, O2_newarg);
 883       __ cmp(O2_newarg, O1_oldarg);
 884       __ brx(Assembler::less, false, Assembler::pt, loop);
 885       __ delayed()->nop();  // FILLME
 886 
 887       __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 888       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 889     }
 890     break;
 891 
 892   case _adapter_drop_args:
 893     {
 894       // 'argslot' is the position of the first argument to nuke.
 895       __ ldsw(G3_amh_vmargslot, O0_argslot);
 896       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
 897 
 898       // 'stack_move' is number of words to drop.
 899       Register G5_stack_move = G5_index;
 900       __ ldsw(G3_amh_conversion, G5_stack_move);
 901       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
 902 
 903       remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 904 
 905       __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
 906       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 907     }
 908     break;
 909 
 910   case _adapter_collect_args:
 911     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 912     break;
 913 
 914   case _adapter_spread_args:
 915     // Handled completely by optimized cases.
 916     __ stop("init_AdapterMethodHandle should not issue this");
 917     break;
 918 
 919   case _adapter_opt_spread_0:
 920   case _adapter_opt_spread_1:
 921   case _adapter_opt_spread_more:
 922     {
 923       // spread an array out into a group of arguments
 924       __ unimplemented(entry_name(ek));
 925     }
 926     break;
 927 
 928   case _adapter_flyby:
 929   case _adapter_ricochet:
 930     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 931     break;
 932 
 933   default:
 934     ShouldNotReachHere();
 935   }
 936 
 937   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
 938   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
 939 
 940   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
 941 }