1 /* 2 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "prims/methodHandles.hpp" 29 30 #define __ _masm-> 31 32 #ifdef PRODUCT 33 #define BLOCK_COMMENT(str) /* nothing */ 34 #else 35 #define BLOCK_COMMENT(str) __ block_comment(str) 36 #endif 37 38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 39 40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, 41 address interpreted_entry) { 42 // Just before the actual machine code entry point, allocate space 43 // for a MethodHandleEntry::Data record, so that we can manage everything 44 // from one base pointer. 45 __ align(wordSize); 46 address target = __ pc() + sizeof(Data); 47 while (__ pc() < target) { 48 __ nop(); 49 __ align(wordSize); 50 } 51 52 MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); 53 me->set_end_address(__ pc()); // set a temporary end_address 54 me->set_from_interpreted_entry(interpreted_entry); 55 me->set_type_checking_entry(NULL); 56 57 return (address) me; 58 } 59 60 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, 61 address start_addr) { 62 MethodHandleEntry* me = (MethodHandleEntry*) start_addr; 63 assert(me->end_address() == start_addr, "valid ME"); 64 65 // Fill in the real end_address: 66 __ align(wordSize); 67 me->set_end_address(__ pc()); 68 69 return me; 70 } 71 72 73 // Code generation 74 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 75 // I5_savedSP/O5_savedSP: sender SP (must preserve) 76 // G4 (Gargs): incoming argument list (must preserve) 77 // G5_method: invoke methodOop 78 // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 79 // O0, O1, O2, O3, O4: garbage temps, blown away 80 Register O0_mtype = O0; 81 Register O1_scratch = O1; 82 Register O2_scratch = O2; 83 Register O3_scratch = O3; 84 Register O4_argslot = O4; 85 Register O4_argbase = O4; 86 87 // emit WrongMethodType path first, to enable back-branch from main path 88 Label wrong_method_type; 89 __ bind(wrong_method_type); 90 Label invoke_generic_slow_path; 91 assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; 92 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); 93 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact); 94 __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path); 95 __ delayed()->nop(); 96 __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType 97 // mov(G3_method_handle, G3_method_handle); // already in this register 98 __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); 99 __ delayed()->nop(); 100 101 // here's where control starts out: 102 __ align(CodeEntryAlignment); 103 address entry_point = __ pc(); 104 105 // fetch the MethodType from the method handle 106 { 107 Register tem = G5_method; 108 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { 109 __ ld_ptr(Address(tem, *pchase), O0_mtype); 110 tem = O0_mtype; // in case there is another indirection 111 } 112 } 113 114 // given the MethodType, find out where the MH argument is buried 115 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); 116 __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); 117 __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase); 118 // Note: argument_address uses its input as a scratch register! 119 __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle); 120 121 trace_method_handle(_masm, "invokeExact"); 122 123 __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type); 124 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 125 126 // for invokeGeneric (only), apply argument and result conversions on the fly 127 __ bind(invoke_generic_slow_path); 128 #ifdef ASSERT 129 { Label L; 130 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); 131 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric); 132 __ brx(Assembler::equal, false, Assembler::pt, L); 133 __ delayed()->nop(); 134 __ stop("bad methodOop::intrinsic_id"); 135 __ bind(L); 136 } 137 #endif //ASSERT 138 139 // make room on the stack for another pointer: 140 insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch); 141 // load up an adapter from the calling type (Java weaves this) 142 Register O2_form = O2_scratch; 143 Register O3_adapter = O3_scratch; 144 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); 145 // load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); 146 // deal with old JDK versions: 147 __ add( Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); 148 __ cmp(O3_adapter, O2_form); 149 Label sorry_no_invoke_generic; 150 __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic); 151 __ delayed()->nop(); 152 153 __ load_heap_oop(Address(O3_adapter, 0), O3_adapter); 154 __ tst(O3_adapter); 155 __ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic); 156 __ delayed()->nop(); 157 __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); 158 // As a trusted first argument, pass the type being called, so the adapter knows 159 // the actual types of the arguments and return values. 160 // (Generic invokers are shared among form-families of method-type.) 161 __ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize)); 162 // FIXME: assert that O3_adapter is of the right method-type. 163 __ mov(O3_adapter, G3_method_handle); 164 trace_method_handle(_masm, "invokeGeneric"); 165 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 166 167 __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! 168 __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType 169 // mov(G3_method_handle, G3_method_handle); // already in this register 170 __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); 171 __ delayed()->nop(); 172 173 return entry_point; 174 } 175 176 177 #ifdef ASSERT 178 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { 179 // Verify that argslot lies within (Gargs, FP]. 180 Label L_ok, L_bad; 181 BLOCK_COMMENT("{ verify_argslot"); 182 #ifdef _LP64 183 __ add(FP, STACK_BIAS, temp_reg); 184 __ cmp(argslot_reg, temp_reg); 185 #else 186 __ cmp(argslot_reg, FP); 187 #endif 188 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad); 189 __ delayed()->nop(); 190 __ cmp(Gargs, argslot_reg); 191 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 192 __ delayed()->nop(); 193 __ bind(L_bad); 194 __ stop(error_message); 195 __ bind(L_ok); 196 BLOCK_COMMENT("} verify_argslot"); 197 } 198 #endif 199 200 201 // Helper to insert argument slots into the stack. 202 // arg_slots must be a multiple of stack_move_unit() and <= 0 203 void MethodHandles::insert_arg_slots(MacroAssembler* _masm, 204 RegisterOrConstant arg_slots, 205 int arg_mask, 206 Register argslot_reg, 207 Register temp_reg, Register temp2_reg, Register temp3_reg) { 208 assert(temp3_reg != noreg, "temp3 required"); 209 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 210 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 211 212 #ifdef ASSERT 213 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); 214 if (arg_slots.is_register()) { 215 Label L_ok, L_bad; 216 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 217 __ br(Assembler::greater, false, Assembler::pn, L_bad); 218 __ delayed()->nop(); 219 __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 220 __ br(Assembler::zero, false, Assembler::pt, L_ok); 221 __ delayed()->nop(); 222 __ bind(L_bad); 223 __ stop("assert arg_slots <= 0 and clear low bits"); 224 __ bind(L_ok); 225 } else { 226 assert(arg_slots.as_constant() <= 0, ""); 227 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 228 } 229 #endif // ASSERT 230 231 #ifdef _LP64 232 if (arg_slots.is_register()) { 233 // Was arg_slots register loaded as signed int? 234 Label L_ok; 235 __ sll(arg_slots.as_register(), BitsPerInt, temp_reg); 236 __ sra(temp_reg, BitsPerInt, temp_reg); 237 __ cmp(arg_slots.as_register(), temp_reg); 238 __ br(Assembler::equal, false, Assembler::pt, L_ok); 239 __ delayed()->nop(); 240 __ stop("arg_slots register not loaded as signed int"); 241 __ bind(L_ok); 242 } 243 #endif 244 245 // Make space on the stack for the inserted argument(s). 246 // Then pull down everything shallower than argslot_reg. 247 // The stacked return address gets pulled down with everything else. 248 // That is, copy [sp, argslot) downward by -size words. In pseudo-code: 249 // sp -= size; 250 // for (temp = sp + size; temp < argslot; temp++) 251 // temp[-size] = temp[0] 252 // argslot -= size; 253 BLOCK_COMMENT("insert_arg_slots {"); 254 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 255 256 // Keep the stack pointer 2*wordSize aligned. 257 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 258 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 259 __ add(SP, masked_offset, SP); 260 261 __ mov(Gargs, temp_reg); // source pointer for copy 262 __ add(Gargs, offset, Gargs); 263 264 { 265 Label loop; 266 __ BIND(loop); 267 // pull one word down each time through the loop 268 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 269 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 270 __ add(temp_reg, wordSize, temp_reg); 271 __ cmp(temp_reg, argslot_reg); 272 __ brx(Assembler::less, false, Assembler::pt, loop); 273 __ delayed()->nop(); // FILLME 274 } 275 276 // Now move the argslot down, to point to the opened-up space. 277 __ add(argslot_reg, offset, argslot_reg); 278 BLOCK_COMMENT("} insert_arg_slots"); 279 } 280 281 282 // Helper to remove argument slots from the stack. 283 // arg_slots must be a multiple of stack_move_unit() and >= 0 284 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, 285 RegisterOrConstant arg_slots, 286 Register argslot_reg, 287 Register temp_reg, Register temp2_reg, Register temp3_reg) { 288 assert(temp3_reg != noreg, "temp3 required"); 289 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 290 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 291 292 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 293 294 #ifdef ASSERT 295 // Verify that [argslot..argslot+size) lies within (Gargs, FP). 296 __ add(argslot_reg, offset, temp2_reg); 297 verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame"); 298 if (arg_slots.is_register()) { 299 Label L_ok, L_bad; 300 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 301 __ br(Assembler::less, false, Assembler::pn, L_bad); 302 __ delayed()->nop(); 303 __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 304 __ br(Assembler::zero, false, Assembler::pt, L_ok); 305 __ delayed()->nop(); 306 __ bind(L_bad); 307 __ stop("assert arg_slots >= 0 and clear low bits"); 308 __ bind(L_ok); 309 } else { 310 assert(arg_slots.as_constant() >= 0, ""); 311 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 312 } 313 #endif // ASSERT 314 315 BLOCK_COMMENT("remove_arg_slots {"); 316 // Pull up everything shallower than argslot. 317 // Then remove the excess space on the stack. 318 // The stacked return address gets pulled up with everything else. 319 // That is, copy [sp, argslot) upward by size words. In pseudo-code: 320 // for (temp = argslot-1; temp >= sp; --temp) 321 // temp[size] = temp[0] 322 // argslot += size; 323 // sp += size; 324 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy 325 { 326 Label loop; 327 __ BIND(loop); 328 // pull one word up each time through the loop 329 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 330 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 331 __ sub(temp_reg, wordSize, temp_reg); 332 __ cmp(temp_reg, Gargs); 333 __ brx(Assembler::greaterEqual, false, Assembler::pt, loop); 334 __ delayed()->nop(); // FILLME 335 } 336 337 // Now move the argslot up, to point to the just-copied block. 338 __ add(Gargs, offset, Gargs); 339 // And adjust the argslot address to point at the deletion point. 340 __ add(argslot_reg, offset, argslot_reg); 341 342 // Keep the stack pointer 2*wordSize aligned. 343 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 344 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 345 __ add(SP, masked_offset, SP); 346 BLOCK_COMMENT("} remove_arg_slots"); 347 } 348 349 350 #ifndef PRODUCT 351 extern "C" void print_method_handle(oop mh); 352 void trace_method_handle_stub(const char* adaptername, 353 oopDesc* mh) { 354 printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh); 355 print_method_handle(mh); 356 } 357 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 358 if (!TraceMethodHandles) return; 359 BLOCK_COMMENT("trace_method_handle {"); 360 // save: Gargs, O5_savedSP 361 __ save_frame(16); 362 __ set((intptr_t) adaptername, O0); 363 __ mov(G3_method_handle, O1); 364 __ mov(G3_method_handle, L3); 365 __ mov(Gargs, L4); 366 __ mov(G5_method_type, L5); 367 __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); 368 369 __ mov(L3, G3_method_handle); 370 __ mov(L4, Gargs); 371 __ mov(L5, G5_method_type); 372 __ restore(); 373 BLOCK_COMMENT("} trace_method_handle"); 374 } 375 #endif // PRODUCT 376 377 // which conversion op types are implemented here? 378 int MethodHandles::adapter_conversion_ops_supported_mask() { 379 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) 380 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) 381 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) 382 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) 383 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) 384 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) 385 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) 386 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) 387 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) 388 //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! 389 ); 390 // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. 391 } 392 393 //------------------------------------------------------------------------------ 394 // MethodHandles::generate_method_handle_stub 395 // 396 // Generate an "entry" field for a method handle. 397 // This determines how the method handle will respond to calls. 398 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { 399 // Here is the register state during an interpreted call, 400 // as set up by generate_method_handle_interpreter_entry(): 401 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused) 402 // - G3: receiver method handle 403 // - O5_savedSP: sender SP (must preserve) 404 405 const Register O0_argslot = O0; 406 const Register O1_scratch = O1; 407 const Register O2_scratch = O2; 408 const Register O3_scratch = O3; 409 const Register G5_index = G5; 410 411 // Argument registers for _raise_exception. 412 const Register O0_code = O0; 413 const Register O1_actual = O1; 414 const Register O2_required = O2; 415 416 guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); 417 418 // Some handy addresses: 419 Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset())); 420 Address G5_method_fce( G5_method, in_bytes(methodOopDesc::from_compiled_offset())); 421 422 Address G3_mh_vmtarget( G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes()); 423 424 Address G3_dmh_vmindex( G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes()); 425 426 Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes()); 427 Address G3_bmh_argument( G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes()); 428 429 Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes()); 430 Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes()); 431 Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); 432 433 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 434 435 if (have_entry(ek)) { 436 __ nop(); // empty stubs make SG sick 437 return; 438 } 439 440 address interp_entry = __ pc(); 441 442 trace_method_handle(_masm, entry_name(ek)); 443 444 switch ((int) ek) { 445 case _raise_exception: 446 { 447 // Not a real MH entry, but rather shared code for raising an 448 // exception. Since we use the compiled entry, arguments are 449 // expected in compiler argument registers. 450 assert(raise_exception_method(), "must be set"); 451 assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); 452 453 __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. 454 455 Label L_no_method; 456 // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method 457 __ set(AddressLiteral((address) &_raise_exception_method), G5_method); 458 __ ld_ptr(Address(G5_method, 0), G5_method); 459 __ tst(G5_method); 460 __ brx(Assembler::zero, false, Assembler::pn, L_no_method); 461 __ delayed()->nop(); 462 463 const int jobject_oop_offset = 0; 464 __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); 465 __ tst(G5_method); 466 __ brx(Assembler::zero, false, Assembler::pn, L_no_method); 467 __ delayed()->nop(); 468 469 __ verify_oop(G5_method); 470 __ jump_indirect_to(G5_method_fce, O3_scratch); // jump to compiled entry 471 __ delayed()->nop(); 472 473 // Do something that is at least causes a valid throw from the interpreter. 474 __ bind(L_no_method); 475 __ unimplemented("call throw_WrongMethodType_entry"); 476 } 477 break; 478 479 case _invokestatic_mh: 480 case _invokespecial_mh: 481 { 482 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop 483 __ verify_oop(G5_method); 484 // Same as TemplateTable::invokestatic or invokespecial, 485 // minus the CP setup and profiling: 486 if (ek == _invokespecial_mh) { 487 // Must load & check the first argument before entering the target method. 488 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 489 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 490 __ null_check(G3_method_handle); 491 __ verify_oop(G3_method_handle); 492 } 493 __ jump_indirect_to(G5_method_fie, O1_scratch); 494 __ delayed()->nop(); 495 } 496 break; 497 498 case _invokevirtual_mh: 499 { 500 // Same as TemplateTable::invokevirtual, 501 // minus the CP setup and profiling: 502 503 // Pick out the vtable index and receiver offset from the MH, 504 // and then we can discard it: 505 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 506 __ ldsw(G3_dmh_vmindex, G5_index); 507 // Note: The verifier allows us to ignore G3_mh_vmtarget. 508 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 509 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 510 511 // Get receiver klass: 512 Register O0_klass = O0_argslot; 513 __ load_klass(G3_method_handle, O0_klass); 514 __ verify_oop(O0_klass); 515 516 // Get target methodOop & entry point: 517 const int base = instanceKlass::vtable_start_offset() * wordSize; 518 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 519 520 __ sll_ptr(G5_index, LogBytesPerWord, G5_index); 521 __ add(O0_klass, G5_index, O0_klass); 522 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); 523 __ ld_ptr(vtable_entry_addr, G5_method); 524 525 __ verify_oop(G5_method); 526 __ jump_indirect_to(G5_method_fie, O1_scratch); 527 __ delayed()->nop(); 528 } 529 break; 530 531 case _invokeinterface_mh: 532 { 533 // Same as TemplateTable::invokeinterface, 534 // minus the CP setup and profiling: 535 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 536 Register O1_intf = O1_scratch; 537 __ load_heap_oop(G3_mh_vmtarget, O1_intf); 538 __ ldsw(G3_dmh_vmindex, G5_index); 539 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 540 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 541 542 // Get receiver klass: 543 Register O0_klass = O0_argslot; 544 __ load_klass(G3_method_handle, O0_klass); 545 __ verify_oop(O0_klass); 546 547 // Get interface: 548 Label no_such_interface; 549 __ verify_oop(O1_intf); 550 __ lookup_interface_method(O0_klass, O1_intf, 551 // Note: next two args must be the same: 552 G5_index, G5_method, 553 O2_scratch, 554 O3_scratch, 555 no_such_interface); 556 557 __ verify_oop(G5_method); 558 __ jump_indirect_to(G5_method_fie, O1_scratch); 559 __ delayed()->nop(); 560 561 __ bind(no_such_interface); 562 // Throw an exception. 563 // For historical reasons, it will be IncompatibleClassChangeError. 564 __ unimplemented("not tested yet"); 565 __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required); // required interface 566 __ mov( O0_klass, O1_actual); // bad receiver 567 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); 568 __ delayed()->mov(Bytecodes::_invokeinterface, O0_code); // who is complaining? 569 } 570 break; 571 572 case _bound_ref_mh: 573 case _bound_int_mh: 574 case _bound_long_mh: 575 case _bound_ref_direct_mh: 576 case _bound_int_direct_mh: 577 case _bound_long_direct_mh: 578 { 579 const bool direct_to_method = (ek >= _bound_ref_direct_mh); 580 BasicType arg_type = T_ILLEGAL; 581 int arg_mask = _INSERT_NO_MASK; 582 int arg_slots = -1; 583 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); 584 585 // Make room for the new argument: 586 __ ldsw(G3_bmh_vmargslot, O0_argslot); 587 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 588 589 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index); 590 591 // Store bound argument into the new stack slot: 592 __ load_heap_oop(G3_bmh_argument, O1_scratch); 593 if (arg_type == T_OBJECT) { 594 __ st_ptr(O1_scratch, Address(O0_argslot, 0)); 595 } else { 596 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 597 const int arg_size = type2aelembytes(arg_type); 598 __ load_sized_value(prim_value_addr, O2_scratch, arg_size, is_signed_subword_type(arg_type)); 599 __ store_sized_value(O2_scratch, Address(O0_argslot, 0), arg_size); // long store uses O2/O3 on !_LP64 600 } 601 602 if (direct_to_method) { 603 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop 604 __ verify_oop(G5_method); 605 __ jump_indirect_to(G5_method_fie, O1_scratch); 606 __ delayed()->nop(); 607 } else { 608 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop 609 __ verify_oop(G3_method_handle); 610 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 611 } 612 } 613 break; 614 615 case _adapter_retype_only: 616 case _adapter_retype_raw: 617 // Immediately jump to the next MH layer: 618 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 619 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 620 // This is OK when all parameter types widen. 621 // It is also OK when a return type narrows. 622 break; 623 624 case _adapter_check_cast: 625 { 626 // Temps: 627 Register G5_klass = G5_index; // Interesting AMH data. 628 629 // Check a reference argument before jumping to the next layer of MH: 630 __ ldsw(G3_amh_vmargslot, O0_argslot); 631 Address vmarg = __ argument_address(O0_argslot); 632 633 // What class are we casting to? 634 __ load_heap_oop(G3_amh_argument, G5_klass); // This is a Class object! 635 __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass); 636 637 Label done; 638 __ ld_ptr(vmarg, O1_scratch); 639 __ tst(O1_scratch); 640 __ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null. 641 __ delayed()->nop(); 642 __ load_klass(O1_scratch, O1_scratch); 643 644 // Live at this point: 645 // - G5_klass : klass required by the target method 646 // - O1_scratch : argument klass to test 647 // - G3_method_handle: adapter method handle 648 __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done); 649 650 // If we get here, the type check failed! 651 __ load_heap_oop(G3_amh_argument, O2_required); // required class 652 __ ld_ptr( vmarg, O1_actual); // bad object 653 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); 654 __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining? 655 656 __ bind(done); 657 // Get the new MH: 658 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 659 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 660 } 661 break; 662 663 case _adapter_prim_to_prim: 664 case _adapter_ref_to_prim: 665 // Handled completely by optimized cases. 666 __ stop("init_AdapterMethodHandle should not issue this"); 667 break; 668 669 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim 670 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim 671 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim 672 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim 673 { 674 // Perform an in-place conversion to int or an int subword. 675 __ ldsw(G3_amh_vmargslot, O0_argslot); 676 Address value; 677 Address vmarg = __ argument_address(O0_argslot); 678 bool value_left_justified = false; 679 680 switch (ek) { 681 case _adapter_opt_i2i: 682 value = vmarg; 683 break; 684 case _adapter_opt_l2i: 685 { 686 // just delete the extra slot 687 #ifdef _LP64 688 // In V9, longs are given 2 64-bit slots in the interpreter, but the 689 // data is passed in only 1 slot. 690 // Keep the second slot. 691 __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot); 692 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 693 value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value. 694 vmarg = Address(O0_argslot, Interpreter::stackElementSize); 695 #else 696 // Keep the first slot. 697 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 698 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 699 value = Address(O0_argslot, 0); 700 vmarg = value; 701 #endif 702 } 703 break; 704 case _adapter_opt_unboxi: 705 { 706 // Load the value up from the heap. 707 __ ld_ptr(vmarg, O1_scratch); 708 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); 709 #ifdef ASSERT 710 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { 711 if (is_subword_type(BasicType(bt))) 712 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); 713 } 714 #endif 715 __ null_check(O1_scratch, value_offset); 716 value = Address(O1_scratch, value_offset); 717 #ifdef _BIG_ENDIAN 718 // Values stored in objects are packed. 719 value_left_justified = true; 720 #endif 721 } 722 break; 723 default: 724 ShouldNotReachHere(); 725 } 726 727 // This check is required on _BIG_ENDIAN 728 Register G5_vminfo = G5_index; 729 __ ldsw(G3_amh_conversion, G5_vminfo); 730 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 731 732 // Original 32-bit vmdata word must be of this form: 733 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | 734 __ lduw(value, O1_scratch); 735 if (!value_left_justified) 736 __ sll(O1_scratch, G5_vminfo, O1_scratch); 737 Label zero_extend, done; 738 __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); 739 __ br(Assembler::zero, false, Assembler::pn, zero_extend); 740 __ delayed()->nop(); 741 742 // this path is taken for int->byte, int->short 743 __ sra(O1_scratch, G5_vminfo, O1_scratch); 744 __ ba(false, done); 745 __ delayed()->nop(); 746 747 __ bind(zero_extend); 748 // this is taken for int->char 749 __ srl(O1_scratch, G5_vminfo, O1_scratch); 750 751 __ bind(done); 752 __ st(O1_scratch, vmarg); 753 754 // Get the new MH: 755 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 756 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 757 } 758 break; 759 760 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim 761 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim 762 { 763 // Perform an in-place int-to-long or ref-to-long conversion. 764 __ ldsw(G3_amh_vmargslot, O0_argslot); 765 766 // On big-endian machine we duplicate the slot and store the MSW 767 // in the first slot. 768 __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot); 769 770 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index); 771 772 Address arg_lsw(O0_argslot, 0); 773 Address arg_msw(O0_argslot, -Interpreter::stackElementSize); 774 775 switch (ek) { 776 case _adapter_opt_i2l: 777 { 778 #ifdef _LP64 779 __ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended 780 #else 781 __ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended 782 __ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std 783 #endif 784 __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 785 } 786 break; 787 case _adapter_opt_unboxl: 788 { 789 // Load the value up from the heap. 790 __ ld_ptr(arg_lsw, O1_scratch); 791 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); 792 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); 793 __ null_check(O1_scratch, value_offset); 794 __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 795 __ st_long(O2_scratch, arg_msw); 796 } 797 break; 798 default: 799 ShouldNotReachHere(); 800 } 801 802 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 803 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 804 } 805 break; 806 807 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim 808 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim 809 { 810 // perform an in-place floating primitive conversion 811 __ unimplemented(entry_name(ek)); 812 } 813 break; 814 815 case _adapter_prim_to_ref: 816 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 817 break; 818 819 case _adapter_swap_args: 820 case _adapter_rot_args: 821 // handled completely by optimized cases 822 __ stop("init_AdapterMethodHandle should not issue this"); 823 break; 824 825 case _adapter_opt_swap_1: 826 case _adapter_opt_swap_2: 827 case _adapter_opt_rot_1_up: 828 case _adapter_opt_rot_1_down: 829 case _adapter_opt_rot_2_up: 830 case _adapter_opt_rot_2_down: 831 { 832 int swap_bytes = 0, rotate = 0; 833 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); 834 835 // 'argslot' is the position of the first argument to swap. 836 __ ldsw(G3_amh_vmargslot, O0_argslot); 837 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 838 839 // 'vminfo' is the second. 840 Register O1_destslot = O1_scratch; 841 __ ldsw(G3_amh_conversion, O1_destslot); 842 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 843 __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot); 844 __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot); 845 846 if (!rotate) { 847 for (int i = 0; i < swap_bytes; i += wordSize) { 848 __ ld_ptr(Address(O0_argslot, i), O2_scratch); 849 __ ld_ptr(Address(O1_destslot, i), O3_scratch); 850 __ st_ptr(O3_scratch, Address(O0_argslot, i)); 851 __ st_ptr(O2_scratch, Address(O1_destslot, i)); 852 } 853 } else { 854 // Save the first chunk, which is going to get overwritten. 855 switch (swap_bytes) { 856 case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break; 857 case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru 858 case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break; 859 default: ShouldNotReachHere(); 860 } 861 862 if (rotate > 0) { 863 // Rorate upward. 864 __ sub(O0_argslot, swap_bytes, O0_argslot); 865 #if ASSERT 866 { 867 // Verify that argslot > destslot, by at least swap_bytes. 868 Label L_ok; 869 __ cmp(O0_argslot, O1_destslot); 870 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok); 871 __ delayed()->nop(); 872 __ stop("source must be above destination (upward rotation)"); 873 __ bind(L_ok); 874 } 875 #endif 876 // Work argslot down to destslot, copying contiguous data upwards. 877 // Pseudo-code: 878 // argslot = src_addr - swap_bytes 879 // destslot = dest_addr 880 // while (argslot >= destslot) { 881 // *(argslot + swap_bytes) = *(argslot + 0); 882 // argslot--; 883 // } 884 Label loop; 885 __ bind(loop); 886 __ ld_ptr(Address(O0_argslot, 0), G5_index); 887 __ st_ptr(G5_index, Address(O0_argslot, swap_bytes)); 888 __ sub(O0_argslot, wordSize, O0_argslot); 889 __ cmp(O0_argslot, O1_destslot); 890 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop); 891 __ delayed()->nop(); // FILLME 892 } else { 893 __ add(O0_argslot, swap_bytes, O0_argslot); 894 #if ASSERT 895 { 896 // Verify that argslot < destslot, by at least swap_bytes. 897 Label L_ok; 898 __ cmp(O0_argslot, O1_destslot); 899 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 900 __ delayed()->nop(); 901 __ stop("source must be above destination (upward rotation)"); 902 __ bind(L_ok); 903 } 904 #endif 905 // Work argslot up to destslot, copying contiguous data downwards. 906 // Pseudo-code: 907 // argslot = src_addr + swap_bytes 908 // destslot = dest_addr 909 // while (argslot >= destslot) { 910 // *(argslot - swap_bytes) = *(argslot + 0); 911 // argslot++; 912 // } 913 Label loop; 914 __ bind(loop); 915 __ ld_ptr(Address(O0_argslot, 0), G5_index); 916 __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes)); 917 __ add(O0_argslot, wordSize, O0_argslot); 918 __ cmp(O0_argslot, O1_destslot); 919 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop); 920 __ delayed()->nop(); // FILLME 921 } 922 923 // Store the original first chunk into the destination slot, now free. 924 switch (swap_bytes) { 925 case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break; 926 case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru 927 case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break; 928 default: ShouldNotReachHere(); 929 } 930 } 931 932 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 933 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 934 } 935 break; 936 937 case _adapter_dup_args: 938 { 939 // 'argslot' is the position of the first argument to duplicate. 940 __ ldsw(G3_amh_vmargslot, O0_argslot); 941 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 942 943 // 'stack_move' is negative number of words to duplicate. 944 Register G5_stack_move = G5_index; 945 __ ldsw(G3_amh_conversion, G5_stack_move); 946 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 947 948 // Remember the old Gargs (argslot[0]). 949 Register O1_oldarg = O1_scratch; 950 __ mov(Gargs, O1_oldarg); 951 952 // Move Gargs down to make room for dups. 953 __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move); 954 __ add(Gargs, G5_stack_move, Gargs); 955 956 // Compute the new Gargs (argslot[0]). 957 Register O2_newarg = O2_scratch; 958 __ mov(Gargs, O2_newarg); 959 960 // Copy from oldarg[0...] down to newarg[0...] 961 // Pseude-code: 962 // O1_oldarg = old-Gargs 963 // O2_newarg = new-Gargs 964 // O0_argslot = argslot 965 // while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++ 966 Label loop; 967 __ bind(loop); 968 __ ld_ptr(Address(O0_argslot, 0), O3_scratch); 969 __ st_ptr(O3_scratch, Address(O2_newarg, 0)); 970 __ add(O0_argslot, wordSize, O0_argslot); 971 __ add(O2_newarg, wordSize, O2_newarg); 972 __ cmp(O2_newarg, O1_oldarg); 973 __ brx(Assembler::less, false, Assembler::pt, loop); 974 __ delayed()->nop(); // FILLME 975 976 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 977 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 978 } 979 break; 980 981 case _adapter_drop_args: 982 { 983 // 'argslot' is the position of the first argument to nuke. 984 __ ldsw(G3_amh_vmargslot, O0_argslot); 985 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 986 987 // 'stack_move' is number of words to drop. 988 Register G5_stack_move = G5_index; 989 __ ldsw(G3_amh_conversion, G5_stack_move); 990 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 991 992 remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch); 993 994 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 995 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 996 } 997 break; 998 999 case _adapter_collect_args: 1000 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1001 break; 1002 1003 case _adapter_spread_args: 1004 // Handled completely by optimized cases. 1005 __ stop("init_AdapterMethodHandle should not issue this"); 1006 break; 1007 1008 case _adapter_opt_spread_0: 1009 case _adapter_opt_spread_1: 1010 case _adapter_opt_spread_more: 1011 { 1012 // spread an array out into a group of arguments 1013 __ unimplemented(entry_name(ek)); 1014 } 1015 break; 1016 1017 case _adapter_flyby: 1018 case _adapter_ricochet: 1019 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1020 break; 1021 1022 default: 1023 ShouldNotReachHere(); 1024 } 1025 1026 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); 1027 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1028 1029 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); 1030 }