1 /* 2 * Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_methodHandles_sparc.cpp.incl" 27 28 #define __ _masm-> 29 30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, 31 address interpreted_entry) { 32 // Just before the actual machine code entry point, allocate space 33 // for a MethodHandleEntry::Data record, so that we can manage everything 34 // from one base pointer. 35 __ align(wordSize); 36 address target = __ pc() + sizeof(Data); 37 while (__ pc() < target) { 38 __ nop(); 39 __ align(wordSize); 40 } 41 42 MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); 43 me->set_end_address(__ pc()); // set a temporary end_address 44 me->set_from_interpreted_entry(interpreted_entry); 45 me->set_type_checking_entry(NULL); 46 47 return (address) me; 48 } 49 50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, 51 address start_addr) { 52 MethodHandleEntry* me = (MethodHandleEntry*) start_addr; 53 assert(me->end_address() == start_addr, "valid ME"); 54 55 // Fill in the real end_address: 56 __ align(wordSize); 57 me->set_end_address(__ pc()); 58 59 return me; 60 } 61 62 63 // Code generation 64 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 65 // I5_savedSP: sender SP (must preserve) 66 // G4 (Gargs): incoming argument list (must preserve) 67 // G5_method: invoke methodOop; becomes method type. 68 // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 69 // O0, O1: garbage temps, blown away 70 Register O0_argslot = O0; 71 Register O1_scratch = O1; 72 73 // emit WrongMethodType path first, to enable back-branch from main path 74 Label wrong_method_type; 75 __ bind(wrong_method_type); 76 __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); 77 __ delayed()->nop(); 78 79 // here's where control starts out: 80 __ align(CodeEntryAlignment); 81 address entry_point = __ pc(); 82 83 // fetch the MethodType from the method handle into G5_method_type 84 { 85 Register tem = G5_method; 86 assert(tem == G5_method_type, "yes, it's the same register"); 87 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { 88 __ ld_ptr(Address(tem, *pchase), G5_method_type); 89 } 90 } 91 92 // given the MethodType, find out where the MH argument is buried 93 __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O0_argslot); 94 __ ldsw( Address(O0_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot); 95 __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); 96 97 __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type); 98 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 99 100 return entry_point; 101 } 102 103 104 #ifdef ASSERT 105 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { 106 // Verify that argslot lies within (Gargs, FP]. 107 Label L_ok, L_bad; 108 #ifdef _LP64 109 __ add(FP, STACK_BIAS, temp_reg); 110 __ cmp(argslot_reg, temp_reg); 111 #else 112 __ cmp(argslot_reg, FP); 113 #endif 114 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad); 115 __ delayed()->nop(); 116 __ cmp(Gargs, argslot_reg); 117 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 118 __ delayed()->nop(); 119 __ bind(L_bad); 120 __ stop(error_message); 121 __ bind(L_ok); 122 } 123 #endif 124 125 126 // Helper to insert argument slots into the stack. 127 // arg_slots must be a multiple of stack_move_unit() and <= 0 128 void MethodHandles::insert_arg_slots(MacroAssembler* _masm, 129 RegisterOrConstant arg_slots, 130 int arg_mask, 131 Register argslot_reg, 132 Register temp_reg, Register temp2_reg, Register temp3_reg) { 133 assert(temp3_reg != noreg, "temp3 required"); 134 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 135 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 136 137 #ifdef ASSERT 138 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); 139 if (arg_slots.is_register()) { 140 Label L_ok, L_bad; 141 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 142 __ br(Assembler::greater, false, Assembler::pn, L_bad); 143 __ delayed()->nop(); 144 __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 145 __ br(Assembler::zero, false, Assembler::pt, L_ok); 146 __ delayed()->nop(); 147 __ bind(L_bad); 148 __ stop("assert arg_slots <= 0 and clear low bits"); 149 __ bind(L_ok); 150 } else { 151 assert(arg_slots.as_constant() <= 0, ""); 152 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 153 } 154 #endif // ASSERT 155 156 #ifdef _LP64 157 if (arg_slots.is_register()) { 158 // Was arg_slots register loaded as signed int? 159 Label L_ok; 160 __ sll(arg_slots.as_register(), BitsPerInt, temp_reg); 161 __ sra(temp_reg, BitsPerInt, temp_reg); 162 __ cmp(arg_slots.as_register(), temp_reg); 163 __ br(Assembler::equal, false, Assembler::pt, L_ok); 164 __ delayed()->nop(); 165 __ stop("arg_slots register not loaded as signed int"); 166 __ bind(L_ok); 167 } 168 #endif 169 170 // Make space on the stack for the inserted argument(s). 171 // Then pull down everything shallower than argslot_reg. 172 // The stacked return address gets pulled down with everything else. 173 // That is, copy [sp, argslot) downward by -size words. In pseudo-code: 174 // sp -= size; 175 // for (temp = sp + size; temp < argslot; temp++) 176 // temp[-size] = temp[0] 177 // argslot -= size; 178 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 179 180 // Keep the stack pointer 2*wordSize aligned. 181 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 182 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 183 __ add(SP, masked_offset, SP); 184 185 __ mov(Gargs, temp_reg); // source pointer for copy 186 __ add(Gargs, offset, Gargs); 187 188 { 189 Label loop; 190 __ bind(loop); 191 // pull one word down each time through the loop 192 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 193 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 194 __ add(temp_reg, wordSize, temp_reg); 195 __ cmp(temp_reg, argslot_reg); 196 __ brx(Assembler::less, false, Assembler::pt, loop); 197 __ delayed()->nop(); // FILLME 198 } 199 200 // Now move the argslot down, to point to the opened-up space. 201 __ add(argslot_reg, offset, argslot_reg); 202 } 203 204 205 // Helper to remove argument slots from the stack. 206 // arg_slots must be a multiple of stack_move_unit() and >= 0 207 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, 208 RegisterOrConstant arg_slots, 209 Register argslot_reg, 210 Register temp_reg, Register temp2_reg, Register temp3_reg) { 211 assert(temp3_reg != noreg, "temp3 required"); 212 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 213 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 214 215 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 216 217 #ifdef ASSERT 218 // Verify that [argslot..argslot+size) lies within (Gargs, FP). 219 __ add(argslot_reg, offset, temp2_reg); 220 verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame"); 221 if (arg_slots.is_register()) { 222 Label L_ok, L_bad; 223 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 224 __ br(Assembler::less, false, Assembler::pn, L_bad); 225 __ delayed()->nop(); 226 __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 227 __ br(Assembler::zero, false, Assembler::pt, L_ok); 228 __ delayed()->nop(); 229 __ bind(L_bad); 230 __ stop("assert arg_slots >= 0 and clear low bits"); 231 __ bind(L_ok); 232 } else { 233 assert(arg_slots.as_constant() >= 0, ""); 234 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 235 } 236 #endif // ASSERT 237 238 // Pull up everything shallower than argslot. 239 // Then remove the excess space on the stack. 240 // The stacked return address gets pulled up with everything else. 241 // That is, copy [sp, argslot) upward by size words. In pseudo-code: 242 // for (temp = argslot-1; temp >= sp; --temp) 243 // temp[size] = temp[0] 244 // argslot += size; 245 // sp += size; 246 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy 247 { 248 Label loop; 249 __ bind(loop); 250 // pull one word up each time through the loop 251 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 252 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 253 __ sub(temp_reg, wordSize, temp_reg); 254 __ cmp(temp_reg, Gargs); 255 __ brx(Assembler::greaterEqual, false, Assembler::pt, loop); 256 __ delayed()->nop(); // FILLME 257 } 258 259 // Now move the argslot up, to point to the just-copied block. 260 __ add(Gargs, offset, Gargs); 261 // And adjust the argslot address to point at the deletion point. 262 __ add(argslot_reg, offset, argslot_reg); 263 264 // Keep the stack pointer 2*wordSize aligned. 265 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 266 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 267 __ add(SP, masked_offset, SP); 268 } 269 270 271 #ifndef PRODUCT 272 extern "C" void print_method_handle(oop mh); 273 void trace_method_handle_stub(const char* adaptername, 274 oop mh) { 275 #if 0 276 intptr_t* entry_sp, 277 intptr_t* saved_sp, 278 intptr_t* saved_bp) { 279 // called as a leaf from native code: do not block the JVM! 280 intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset]; 281 intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; 282 printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n", 283 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); 284 if (last_sp != saved_sp) 285 printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp); 286 #endif 287 288 printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh); 289 print_method_handle(mh); 290 } 291 #endif // PRODUCT 292 293 294 //------------------------------------------------------------------------------ 295 // MethodHandles::generate_method_handle_stub 296 // 297 // Generate an "entry" field for a method handle. 298 // This determines how the method handle will respond to calls. 299 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { 300 // Here is the register state during an interpreted call, 301 // as set up by generate_method_handle_interpreter_entry(): 302 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused) 303 // - G3: receiver method handle 304 // - O5_savedSP: sender SP (must preserve) 305 306 Register O0_argslot = O0; 307 Register O1_scratch = O1; 308 Register O2_scratch = O2; 309 Register O3_scratch = O3; 310 Register G5_index = G5; 311 312 guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); 313 314 // Some handy addresses: 315 Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset())); 316 317 Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes()); 318 319 Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes()); 320 321 Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes()); 322 Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes()); 323 324 Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes()); 325 Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes()); 326 Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes()); 327 328 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 329 330 if (have_entry(ek)) { 331 __ nop(); // empty stubs make SG sick 332 return; 333 } 334 335 address interp_entry = __ pc(); 336 if (UseCompressedOops) __ unimplemented("UseCompressedOops"); 337 338 #ifndef PRODUCT 339 if (TraceMethodHandles) { 340 // save: Gargs, O5_savedSP 341 __ save(SP, -16*wordSize, SP); 342 __ set((intptr_t) entry_name(ek), O0); 343 __ mov(G3_method_handle, O1); 344 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); 345 __ restore(SP, 16*wordSize, SP); 346 } 347 #endif // PRODUCT 348 349 switch ((int) ek) { 350 case _raise_exception: 351 { 352 // Not a real MH entry, but rather shared code for raising an 353 // exception. Extra local arguments are passed in scratch 354 // registers, as required type in O3, failing object (or NULL) 355 // in O2, failing bytecode type in O1. 356 357 __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. 358 359 // Push arguments as if coming from the interpreter. 360 Register O0_scratch = O0_argslot; 361 int stackElementSize = Interpreter::stackElementSize(); 362 363 // Make space on the stack for the arguments. 364 __ sub(SP, 4*stackElementSize, SP); 365 __ sub(Gargs, 3*stackElementSize, Gargs); 366 //__ sub(Lesp, 3*stackElementSize, Lesp); 367 368 // void raiseException(int code, Object actual, Object required) 369 __ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code 370 __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual 371 __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required 372 373 Label no_method; 374 // FIXME: fill in _raise_exception_method with a suitable sun.dyn method 375 __ set(AddressLiteral((address) &_raise_exception_method), G5_method); 376 __ ld_ptr(Address(G5_method, 0), G5_method); 377 __ tst(G5_method); 378 __ brx(Assembler::zero, false, Assembler::pn, no_method); 379 __ delayed()->nop(); 380 381 int jobject_oop_offset = 0; 382 __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); 383 __ tst(G5_method); 384 __ brx(Assembler::zero, false, Assembler::pn, no_method); 385 __ delayed()->nop(); 386 387 __ verify_oop(G5_method); 388 __ jump_indirect_to(G5_method_fie, O1_scratch); 389 __ delayed()->nop(); 390 391 // If we get here, the Java runtime did not do its job of creating the exception. 392 // Do something that is at least causes a valid throw from the interpreter. 393 __ bind(no_method); 394 __ unimplemented("_raise_exception no method"); 395 } 396 break; 397 398 case _invokestatic_mh: 399 case _invokespecial_mh: 400 { 401 __ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop 402 __ verify_oop(G5_method); 403 // Same as TemplateTable::invokestatic or invokespecial, 404 // minus the CP setup and profiling: 405 if (ek == _invokespecial_mh) { 406 // Must load & check the first argument before entering the target method. 407 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 408 __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); 409 __ null_check(G3_method_handle); 410 __ verify_oop(G3_method_handle); 411 } 412 __ jump_indirect_to(G5_method_fie, O1_scratch); 413 __ delayed()->nop(); 414 } 415 break; 416 417 case _invokevirtual_mh: 418 { 419 // Same as TemplateTable::invokevirtual, 420 // minus the CP setup and profiling: 421 422 // Pick out the vtable index and receiver offset from the MH, 423 // and then we can discard it: 424 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 425 __ ldsw(G3_dmh_vmindex, G5_index); 426 // Note: The verifier allows us to ignore G3_mh_vmtarget. 427 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 428 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 429 430 // Get receiver klass: 431 Register O0_klass = O0_argslot; 432 __ load_klass(G3_method_handle, O0_klass); 433 __ verify_oop(O0_klass); 434 435 // Get target methodOop & entry point: 436 const int base = instanceKlass::vtable_start_offset() * wordSize; 437 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 438 439 __ sll_ptr(G5_index, LogBytesPerWord, G5_index); 440 __ add(O0_klass, G5_index, O0_klass); 441 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); 442 __ ld_ptr(vtable_entry_addr, G5_method); 443 444 __ verify_oop(G5_method); 445 __ jump_indirect_to(G5_method_fie, O1_scratch); 446 __ delayed()->nop(); 447 } 448 break; 449 450 case _invokeinterface_mh: 451 { 452 // Same as TemplateTable::invokeinterface, 453 // minus the CP setup and profiling: 454 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 455 Register O1_intf = O1_scratch; 456 __ ld_ptr(G3_mh_vmtarget, O1_intf); 457 __ ldsw(G3_dmh_vmindex, G5_index); 458 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 459 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 460 461 // Get receiver klass: 462 Register O0_klass = O0_argslot; 463 __ load_klass(G3_method_handle, O0_klass); 464 __ verify_oop(O0_klass); 465 466 // Get interface: 467 Label no_such_interface; 468 __ verify_oop(O1_intf); 469 __ lookup_interface_method(O0_klass, O1_intf, 470 // Note: next two args must be the same: 471 G5_index, G5_method, 472 O2_scratch, 473 O3_scratch, 474 no_such_interface); 475 476 __ verify_oop(G5_method); 477 __ jump_indirect_to(G5_method_fie, O1_scratch); 478 __ delayed()->nop(); 479 480 __ bind(no_such_interface); 481 // Throw an exception. 482 // For historical reasons, it will be IncompatibleClassChangeError. 483 __ unimplemented("not tested yet"); 484 __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface 485 __ mov(O0_klass, O2_scratch); // bad receiver 486 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot); 487 __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining? 488 } 489 break; 490 491 case _bound_ref_mh: 492 case _bound_int_mh: 493 case _bound_long_mh: 494 case _bound_ref_direct_mh: 495 case _bound_int_direct_mh: 496 case _bound_long_direct_mh: 497 { 498 const bool direct_to_method = (ek >= _bound_ref_direct_mh); 499 BasicType arg_type = T_ILLEGAL; 500 int arg_mask = _INSERT_NO_MASK; 501 int arg_slots = -1; 502 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); 503 504 // Make room for the new argument: 505 __ ldsw(G3_bmh_vmargslot, O0_argslot); 506 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 507 508 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index); 509 510 // Store bound argument into the new stack slot: 511 __ ld_ptr(G3_bmh_argument, O1_scratch); 512 if (arg_type == T_OBJECT) { 513 __ st_ptr(O1_scratch, Address(O0_argslot, 0)); 514 } else { 515 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 516 __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type)); 517 if (arg_slots == 2) { 518 __ unimplemented("not yet tested"); 519 #ifndef _LP64 520 __ signx(O2_scratch, O3_scratch); // Sign extend 521 #endif 522 __ st_long(O2_scratch, Address(O0_argslot, 0)); // Uses O2/O3 on !_LP64 523 } else { 524 __ st_ptr( O2_scratch, Address(O0_argslot, 0)); 525 } 526 } 527 528 if (direct_to_method) { 529 __ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop 530 __ verify_oop(G5_method); 531 __ jump_indirect_to(G5_method_fie, O1_scratch); 532 __ delayed()->nop(); 533 } else { 534 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); // target is a methodOop 535 __ verify_oop(G3_method_handle); 536 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 537 } 538 } 539 break; 540 541 case _adapter_retype_only: 542 case _adapter_retype_raw: 543 // Immediately jump to the next MH layer: 544 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 545 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 546 // This is OK when all parameter types widen. 547 // It is also OK when a return type narrows. 548 break; 549 550 case _adapter_check_cast: 551 { 552 // Temps: 553 Register G5_klass = G5_index; // Interesting AMH data. 554 555 // Check a reference argument before jumping to the next layer of MH: 556 __ ldsw(G3_amh_vmargslot, O0_argslot); 557 Address vmarg = __ argument_address(O0_argslot); 558 559 // What class are we casting to? 560 __ ld_ptr(G3_amh_argument, G5_klass); // This is a Class object! 561 __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass); 562 563 Label done; 564 __ ld_ptr(vmarg, O1_scratch); 565 __ tst(O1_scratch); 566 __ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null. 567 __ delayed()->nop(); 568 __ load_klass(O1_scratch, O1_scratch); 569 570 // Live at this point: 571 // - G5_klass : klass required by the target method 572 // - O1_scratch : argument klass to test 573 // - G3_method_handle: adapter method handle 574 __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done); 575 576 // If we get here, the type check failed! 577 __ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field 578 __ ld_ptr(G3_amh_argument, O3_scratch); // required class 579 __ ld_ptr(vmarg, O2_scratch); // bad object 580 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot); 581 __ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining? 582 583 __ bind(done); 584 // Get the new MH: 585 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 586 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 587 } 588 break; 589 590 case _adapter_prim_to_prim: 591 case _adapter_ref_to_prim: 592 // Handled completely by optimized cases. 593 __ stop("init_AdapterMethodHandle should not issue this"); 594 break; 595 596 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim 597 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim 598 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim 599 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim 600 { 601 // Perform an in-place conversion to int or an int subword. 602 __ ldsw(G3_amh_vmargslot, O0_argslot); 603 Address vmarg = __ argument_address(O0_argslot); 604 Address value; 605 bool value_left_justified = false; 606 607 switch (ek) { 608 case _adapter_opt_i2i: 609 case _adapter_opt_l2i: 610 __ unimplemented(entry_name(ek)); 611 value = vmarg; 612 break; 613 case _adapter_opt_unboxi: 614 { 615 // Load the value up from the heap. 616 __ ld_ptr(vmarg, O1_scratch); 617 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); 618 #ifdef ASSERT 619 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { 620 if (is_subword_type(BasicType(bt))) 621 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); 622 } 623 #endif 624 __ null_check(O1_scratch, value_offset); 625 value = Address(O1_scratch, value_offset); 626 #ifdef _BIG_ENDIAN 627 // Values stored in objects are packed. 628 value_left_justified = true; 629 #endif 630 } 631 break; 632 default: 633 ShouldNotReachHere(); 634 } 635 636 // This check is required on _BIG_ENDIAN 637 Register G5_vminfo = G5_index; 638 __ ldsw(G3_amh_conversion, G5_vminfo); 639 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 640 641 // Original 32-bit vmdata word must be of this form: 642 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | 643 __ lduw(value, O1_scratch); 644 if (!value_left_justified) 645 __ sll(O1_scratch, G5_vminfo, O1_scratch); 646 Label zero_extend, done; 647 __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); 648 __ br(Assembler::zero, false, Assembler::pn, zero_extend); 649 __ delayed()->nop(); 650 651 // this path is taken for int->byte, int->short 652 __ sra(O1_scratch, G5_vminfo, O1_scratch); 653 __ ba(false, done); 654 __ delayed()->nop(); 655 656 __ bind(zero_extend); 657 // this is taken for int->char 658 __ srl(O1_scratch, G5_vminfo, O1_scratch); 659 660 __ bind(done); 661 __ st(O1_scratch, vmarg); 662 663 // Get the new MH: 664 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 665 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 666 } 667 break; 668 669 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim 670 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim 671 { 672 // Perform an in-place int-to-long or ref-to-long conversion. 673 __ ldsw(G3_amh_vmargslot, O0_argslot); 674 675 // On big-endian machine we duplicate the slot and store the MSW 676 // in the first slot. 677 __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot); 678 679 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index); 680 681 Address arg_lsw(O0_argslot, 0); 682 Address arg_msw(O0_argslot, -Interpreter::stackElementSize()); 683 684 switch (ek) { 685 case _adapter_opt_i2l: 686 { 687 __ ldsw(arg_lsw, O2_scratch); // Load LSW 688 #ifndef _LP64 689 __ signx(O2_scratch, O3_scratch); // Sign extend 690 #endif 691 __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 692 } 693 break; 694 case _adapter_opt_unboxl: 695 { 696 // Load the value up from the heap. 697 __ ld_ptr(arg_lsw, O1_scratch); 698 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); 699 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); 700 __ null_check(O1_scratch, value_offset); 701 __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 702 __ st_long(O2_scratch, arg_msw); 703 } 704 break; 705 default: 706 ShouldNotReachHere(); 707 } 708 709 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 710 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 711 } 712 break; 713 714 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim 715 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim 716 { 717 // perform an in-place floating primitive conversion 718 __ unimplemented(entry_name(ek)); 719 } 720 break; 721 722 case _adapter_prim_to_ref: 723 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 724 break; 725 726 case _adapter_swap_args: 727 case _adapter_rot_args: 728 // handled completely by optimized cases 729 __ stop("init_AdapterMethodHandle should not issue this"); 730 break; 731 732 case _adapter_opt_swap_1: 733 case _adapter_opt_swap_2: 734 case _adapter_opt_rot_1_up: 735 case _adapter_opt_rot_1_down: 736 case _adapter_opt_rot_2_up: 737 case _adapter_opt_rot_2_down: 738 { 739 int swap_bytes = 0, rotate = 0; 740 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); 741 742 // 'argslot' is the position of the first argument to swap. 743 __ ldsw(G3_amh_vmargslot, O0_argslot); 744 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 745 746 // 'vminfo' is the second. 747 Register O1_destslot = O1_scratch; 748 __ ldsw(G3_amh_conversion, O1_destslot); 749 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 750 __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot); 751 __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot); 752 753 if (!rotate) { 754 for (int i = 0; i < swap_bytes; i += wordSize) { 755 __ ld_ptr(Address(O0_argslot, i), O2_scratch); 756 __ ld_ptr(Address(O1_destslot, i), O3_scratch); 757 __ st_ptr(O3_scratch, Address(O0_argslot, i)); 758 __ st_ptr(O2_scratch, Address(O1_destslot, i)); 759 } 760 } else { 761 // Save the first chunk, which is going to get overwritten. 762 switch (swap_bytes) { 763 case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break; 764 case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru 765 case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break; 766 default: ShouldNotReachHere(); 767 } 768 769 if (rotate > 0) { 770 // Rorate upward. 771 __ sub(O0_argslot, swap_bytes, O0_argslot); 772 #if ASSERT 773 { 774 // Verify that argslot > destslot, by at least swap_bytes. 775 Label L_ok; 776 __ cmp(O0_argslot, O1_destslot); 777 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok); 778 __ delayed()->nop(); 779 __ stop("source must be above destination (upward rotation)"); 780 __ bind(L_ok); 781 } 782 #endif 783 // Work argslot down to destslot, copying contiguous data upwards. 784 // Pseudo-code: 785 // argslot = src_addr - swap_bytes 786 // destslot = dest_addr 787 // while (argslot >= destslot) { 788 // *(argslot + swap_bytes) = *(argslot + 0); 789 // argslot--; 790 // } 791 Label loop; 792 __ bind(loop); 793 __ ld_ptr(Address(O0_argslot, 0), G5_index); 794 __ st_ptr(G5_index, Address(O0_argslot, swap_bytes)); 795 __ sub(O0_argslot, wordSize, O0_argslot); 796 __ cmp(O0_argslot, O1_destslot); 797 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop); 798 __ delayed()->nop(); // FILLME 799 } else { 800 __ add(O0_argslot, swap_bytes, O0_argslot); 801 #if ASSERT 802 { 803 // Verify that argslot < destslot, by at least swap_bytes. 804 Label L_ok; 805 __ cmp(O0_argslot, O1_destslot); 806 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 807 __ delayed()->nop(); 808 __ stop("source must be above destination (upward rotation)"); 809 __ bind(L_ok); 810 } 811 #endif 812 // Work argslot up to destslot, copying contiguous data downwards. 813 // Pseudo-code: 814 // argslot = src_addr + swap_bytes 815 // destslot = dest_addr 816 // while (argslot >= destslot) { 817 // *(argslot - swap_bytes) = *(argslot + 0); 818 // argslot++; 819 // } 820 Label loop; 821 __ bind(loop); 822 __ ld_ptr(Address(O0_argslot, 0), G5_index); 823 __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes)); 824 __ add(O0_argslot, wordSize, O0_argslot); 825 __ cmp(O0_argslot, O1_destslot); 826 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop); 827 __ delayed()->nop(); // FILLME 828 } 829 830 // Store the original first chunk into the destination slot, now free. 831 switch (swap_bytes) { 832 case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break; 833 case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru 834 case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break; 835 default: ShouldNotReachHere(); 836 } 837 } 838 839 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 840 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 841 } 842 break; 843 844 case _adapter_dup_args: 845 { 846 // 'argslot' is the position of the first argument to duplicate. 847 __ ldsw(G3_amh_vmargslot, O0_argslot); 848 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 849 850 // 'stack_move' is negative number of words to duplicate. 851 Register G5_stack_move = G5_index; 852 __ ldsw(G3_amh_conversion, G5_stack_move); 853 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 854 855 // Remember the old Gargs (argslot[0]). 856 Register O1_oldarg = O1_scratch; 857 __ mov(Gargs, O1_oldarg); 858 859 // Move Gargs down to make room for dups. 860 __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move); 861 __ add(Gargs, G5_stack_move, Gargs); 862 863 // Compute the new Gargs (argslot[0]). 864 Register O2_newarg = O2_scratch; 865 __ mov(Gargs, O2_newarg); 866 867 // Copy from oldarg[0...] down to newarg[0...] 868 // Pseude-code: 869 // O1_oldarg = old-Gargs 870 // O2_newarg = new-Gargs 871 // O0_argslot = argslot 872 // while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++ 873 Label loop; 874 __ bind(loop); 875 __ ld_ptr(Address(O0_argslot, 0), O3_scratch); 876 __ st_ptr(O3_scratch, Address(O2_newarg, 0)); 877 __ add(O0_argslot, wordSize, O0_argslot); 878 __ add(O2_newarg, wordSize, O2_newarg); 879 __ cmp(O2_newarg, O1_oldarg); 880 __ brx(Assembler::less, false, Assembler::pt, loop); 881 __ delayed()->nop(); // FILLME 882 883 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 884 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 885 } 886 break; 887 888 case _adapter_drop_args: 889 { 890 // 'argslot' is the position of the first argument to nuke. 891 __ ldsw(G3_amh_vmargslot, O0_argslot); 892 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 893 894 // 'stack_move' is number of words to drop. 895 Register G5_stack_move = G5_index; 896 __ ldsw(G3_amh_conversion, G5_stack_move); 897 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 898 899 remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch); 900 901 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 902 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 903 } 904 break; 905 906 case _adapter_collect_args: 907 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 908 break; 909 910 case _adapter_spread_args: 911 // Handled completely by optimized cases. 912 __ stop("init_AdapterMethodHandle should not issue this"); 913 break; 914 915 case _adapter_opt_spread_0: 916 case _adapter_opt_spread_1: 917 case _adapter_opt_spread_more: 918 { 919 // spread an array out into a group of arguments 920 __ unimplemented(entry_name(ek)); 921 } 922 break; 923 924 case _adapter_flyby: 925 case _adapter_ricochet: 926 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 927 break; 928 929 default: 930 ShouldNotReachHere(); 931 } 932 933 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); 934 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 935 936 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); 937 }