1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "prims/methodHandles.hpp" 31 32 #define __ _masm-> 33 34 #ifdef PRODUCT 35 #define BLOCK_COMMENT(str) /* nothing */ 36 #define STOP(error) stop(error) 37 #else 38 #define BLOCK_COMMENT(str) __ block_comment(str) 39 #define STOP(error) block_comment(error); __ stop(error) 40 #endif 41 42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 43 44 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. 45 static RegisterOrConstant constant(int value) { 46 return RegisterOrConstant(value); 47 } 48 49 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { 50 if (VerifyMethodHandles) 51 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), 52 "MH argument is a Class"); 53 __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); 54 } 55 56 #ifdef ASSERT 57 static int check_nonzero(const char* xname, int x) { 58 assert(x != 0, err_msg("%s should be nonzero", xname)); 59 return x; 60 } 61 #define NONZERO(x) check_nonzero(#x, x) 62 #else //ASSERT 63 #define NONZERO(x) (x) 64 #endif //ASSERT 65 66 #ifdef ASSERT 67 void MethodHandles::verify_klass(MacroAssembler* _masm, 68 Register obj, SystemDictionary::WKID klass_id, 69 const char* error_message) { 70 Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); 71 KlassHandle klass = SystemDictionary::well_known_klass(klass_id); 72 Register temp = rdi; 73 Register temp2 = noreg; 74 LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr 75 Label L_ok, L_bad; 76 BLOCK_COMMENT("verify_klass {"); 77 __ verify_oop(obj); 78 __ testptr(obj, obj); 79 __ jcc(Assembler::zero, L_bad); 80 __ push(temp); if (temp2 != noreg) __ push(temp2); 81 #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } 82 __ load_klass(temp, obj); 83 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 84 __ jcc(Assembler::equal, L_ok); 85 intptr_t super_check_offset = klass->super_check_offset(); 86 __ movptr(temp, Address(temp, super_check_offset)); 87 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 88 __ jcc(Assembler::equal, L_ok); 89 UNPUSH; 90 __ bind(L_bad); 91 __ STOP(error_message); 92 __ BIND(L_ok); 93 UNPUSH; 94 BLOCK_COMMENT("} verify_klass"); 95 } 96 97 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { 98 Label L; 99 BLOCK_COMMENT("verify_ref_kind {"); 100 __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()))); 101 __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT); 102 __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); 103 __ cmpl(temp, ref_kind); 104 __ jcc(Assembler::equal, L); 105 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); 106 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); 107 if (ref_kind == JVM_REF_invokeVirtual || 108 ref_kind == JVM_REF_invokeSpecial) 109 // could do this for all ref_kinds, but would explode assembly code size 110 trace_method_handle(_masm, buf); 111 __ STOP(buf); 112 } 113 BLOCK_COMMENT("} verify_ref_kind"); 114 __ bind(L); 115 } 116 117 #endif //ASSERT 118 119 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, 120 bool for_compiler_entry) { 121 assert(method == rbx, "interpreter calling convention"); 122 __ verify_method_ptr(method); 123 124 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { 125 Label run_compiled_code; 126 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 127 // compiled code in threads for which the event is enabled. Check here for 128 // interp_only_mode if these events CAN be enabled. 129 #ifdef _LP64 130 Register rthread = r15_thread; 131 #else 132 Register rthread = temp; 133 __ get_thread(rthread); 134 #endif 135 // interp_only is an int, on little endian it is sufficient to test the byte only 136 // Is a cmpl faster? 137 __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0); 138 __ jccb(Assembler::zero, run_compiled_code); 139 __ jmp(Address(method, Method::interpreter_entry_offset())); 140 __ BIND(run_compiled_code); 141 } 142 143 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : 144 Method::from_interpreted_offset(); 145 __ jmp(Address(method, entry_offset)); 146 } 147 148 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, 149 Register recv, Register method_temp, 150 Register temp2, 151 bool for_compiler_entry) { 152 BLOCK_COMMENT("jump_to_lambda_form {"); 153 // This is the initial entry point of a lazy method handle. 154 // After type checking, it picks up the invoker from the LambdaForm. 155 assert_different_registers(recv, method_temp, temp2); 156 assert(recv != noreg, "required register"); 157 assert(method_temp == rbx, "required register for loading method"); 158 159 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); 160 161 // Load the invoker, as MH -> MH.form -> LF.vmentry 162 __ verify_oop(recv); 163 __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); 164 __ verify_oop(method_temp); 165 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); 166 __ verify_oop(method_temp); 167 // the following assumes that a Method* is normally compressed in the vmtarget field: 168 __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); 169 170 if (VerifyMethodHandles && !for_compiler_entry) { 171 // make sure recv is already on stack 172 __ movptr(temp2, Address(method_temp, Method::const_offset())); 173 __ load_sized_value(temp2, 174 Address(temp2, ConstMethod::size_of_parameters_offset()), 175 sizeof(u2), /*is_signed*/ false); 176 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 177 Label L; 178 __ cmpptr(recv, __ argument_address(temp2, -1)); 179 __ jcc(Assembler::equal, L); 180 __ movptr(rax, __ argument_address(temp2, -1)); 181 __ STOP("receiver not on stack"); 182 __ BIND(L); 183 } 184 185 jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); 186 BLOCK_COMMENT("} jump_to_lambda_form"); 187 } 188 189 190 // Code generation 191 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, 192 vmIntrinsics::ID iid) { 193 const bool not_for_compiler_entry = false; // this is the interpreter entry 194 assert(is_signature_polymorphic(iid), "expected invoke iid"); 195 if (iid == vmIntrinsics::_invokeGeneric || 196 iid == vmIntrinsics::_compiledLambdaForm) { 197 // Perhaps surprisingly, the symbolic references visible to Java are not directly used. 198 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. 199 // They all allow an appendix argument. 200 __ hlt(); // empty stubs make SG sick 201 return NULL; 202 } 203 204 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) 205 // rbx: Method* 206 // rdx: argument locator (parameter slot count, added to rsp) 207 // rcx: used as temp to hold mh or receiver 208 // rax, rdi: garbage temps, blown away 209 Register rdx_argp = rdx; // argument list ptr, live on error paths 210 Register rax_temp = rax; 211 Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled 212 Register rbx_method = rbx; // eventual target of this invocation 213 214 // here's where control starts out: 215 __ align(CodeEntryAlignment); 216 address entry_point = __ pc(); 217 218 if (VerifyMethodHandles) { 219 Label L; 220 BLOCK_COMMENT("verify_intrinsic_id {"); 221 __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid); 222 __ jcc(Assembler::equal, L); 223 if (iid == vmIntrinsics::_linkToVirtual || 224 iid == vmIntrinsics::_linkToSpecial) { 225 // could do this for all kinds, but would explode assembly code size 226 trace_method_handle(_masm, "bad Method*::intrinsic_id"); 227 } 228 __ STOP("bad Method*::intrinsic_id"); 229 __ bind(L); 230 BLOCK_COMMENT("} verify_intrinsic_id"); 231 } 232 233 // First task: Find out how big the argument list is. 234 Address rdx_first_arg_addr; 235 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); 236 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); 237 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { 238 __ movptr(rdx_argp, Address(rbx_method, Method::const_offset())); 239 __ load_sized_value(rdx_argp, 240 Address(rdx_argp, ConstMethod::size_of_parameters_offset()), 241 sizeof(u2), /*is_signed*/ false); 242 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 243 rdx_first_arg_addr = __ argument_address(rdx_argp, -1); 244 } else { 245 DEBUG_ONLY(rdx_argp = noreg); 246 } 247 248 if (!is_signature_polymorphic_static(iid)) { 249 __ movptr(rcx_mh, rdx_first_arg_addr); 250 DEBUG_ONLY(rdx_argp = noreg); 251 } 252 253 // rdx_first_arg_addr is live! 254 255 trace_method_handle_interpreter_entry(_masm, iid); 256 257 if (iid == vmIntrinsics::_invokeBasic) { 258 generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry); 259 260 } else { 261 // Adjust argument list by popping the trailing MemberName argument. 262 Register rcx_recv = noreg; 263 if (MethodHandles::ref_kind_has_receiver(ref_kind)) { 264 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. 265 __ movptr(rcx_recv = rcx, rdx_first_arg_addr); 266 } 267 DEBUG_ONLY(rdx_argp = noreg); 268 Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now 269 __ pop(rax_temp); // return address 270 __ pop(rbx_member); // extract last argument 271 __ push(rax_temp); // re-push return address 272 generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry); 273 } 274 275 return entry_point; 276 } 277 278 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, 279 vmIntrinsics::ID iid, 280 Register receiver_reg, 281 Register member_reg, 282 bool for_compiler_entry) { 283 assert(is_signature_polymorphic(iid), "expected invoke iid"); 284 Register rbx_method = rbx; // eventual target of this invocation 285 // temps used in this code are not used in *either* compiled or interpreted calling sequences 286 #ifdef _LP64 287 Register temp1 = rscratch1; 288 Register temp2 = rscratch2; 289 Register temp3 = rax; 290 if (for_compiler_entry) { 291 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); 292 assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 293 assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 294 assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 295 } 296 #else 297 Register temp1 = (for_compiler_entry ? rsi : rdx); 298 Register temp2 = rdi; 299 Register temp3 = rax; 300 if (for_compiler_entry) { 301 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment"); 302 assert_different_registers(temp1, rcx, rdx); 303 assert_different_registers(temp2, rcx, rdx); 304 assert_different_registers(temp3, rcx, rdx); 305 } 306 #endif 307 else { 308 assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP 309 } 310 assert_different_registers(temp1, temp2, temp3, receiver_reg); 311 assert_different_registers(temp1, temp2, temp3, member_reg); 312 313 if (iid == vmIntrinsics::_invokeBasic) { 314 // indirect through MH.form.vmentry.vmtarget 315 jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry); 316 317 } else { 318 // The method is a member invoker used by direct method handles. 319 if (VerifyMethodHandles) { 320 // make sure the trailing argument really is a MemberName (caller responsibility) 321 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName), 322 "MemberName required for invokeVirtual etc."); 323 } 324 325 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); 326 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); 327 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); 328 329 Register temp1_recv_klass = temp1; 330 if (iid != vmIntrinsics::_linkToStatic) { 331 __ verify_oop(receiver_reg); 332 if (iid == vmIntrinsics::_linkToSpecial) { 333 // Don't actually load the klass; just null-check the receiver. 334 __ null_check(receiver_reg); 335 } else { 336 // load receiver klass itself 337 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); 338 __ load_klass(temp1_recv_klass, receiver_reg); 339 __ verify_klass_ptr(temp1_recv_klass); 340 } 341 BLOCK_COMMENT("check_receiver {"); 342 // The receiver for the MemberName must be in receiver_reg. 343 // Check the receiver against the MemberName.clazz 344 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { 345 // Did not load it above... 346 __ load_klass(temp1_recv_klass, receiver_reg); 347 __ verify_klass_ptr(temp1_recv_klass); 348 } 349 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { 350 Label L_ok; 351 Register temp2_defc = temp2; 352 __ load_heap_oop(temp2_defc, member_clazz); 353 load_klass_from_Class(_masm, temp2_defc); 354 __ verify_klass_ptr(temp2_defc); 355 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); 356 // If we get here, the type check failed! 357 __ STOP("receiver class disagrees with MemberName.clazz"); 358 __ bind(L_ok); 359 } 360 BLOCK_COMMENT("} check_receiver"); 361 } 362 if (iid == vmIntrinsics::_linkToSpecial || 363 iid == vmIntrinsics::_linkToStatic) { 364 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass 365 } 366 367 // Live registers at this point: 368 // member_reg - MemberName that was the trailing argument 369 // temp1_recv_klass - klass of stacked receiver, if needed 370 // rsi/r13 - interpreter linkage (if interpreted) 371 // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled) 372 373 Label L_incompatible_class_change_error; 374 switch (iid) { 375 case vmIntrinsics::_linkToSpecial: 376 if (VerifyMethodHandles) { 377 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); 378 } 379 __ movptr(rbx_method, member_vmtarget); 380 break; 381 382 case vmIntrinsics::_linkToStatic: 383 if (VerifyMethodHandles) { 384 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); 385 } 386 __ movptr(rbx_method, member_vmtarget); 387 break; 388 389 case vmIntrinsics::_linkToVirtual: 390 { 391 // same as TemplateTable::invokevirtual, 392 // minus the CP setup and profiling: 393 394 if (VerifyMethodHandles) { 395 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); 396 } 397 398 // pick out the vtable index from the MemberName, and then we can discard it: 399 Register temp2_index = temp2; 400 __ movptr(temp2_index, member_vmindex); 401 402 if (VerifyMethodHandles) { 403 Label L_index_ok; 404 __ cmpl(temp2_index, 0); 405 __ jcc(Assembler::greaterEqual, L_index_ok); 406 __ STOP("no virtual index"); 407 __ BIND(L_index_ok); 408 } 409 410 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget 411 // at this point. And VerifyMethodHandles has already checked clazz, if needed. 412 413 // get target Method* & entry point 414 __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method); 415 break; 416 } 417 418 case vmIntrinsics::_linkToInterface: 419 { 420 // same as TemplateTable::invokeinterface 421 // (minus the CP setup and profiling, with different argument motion) 422 if (VerifyMethodHandles) { 423 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); 424 } 425 426 Register temp3_intf = temp3; 427 __ load_heap_oop(temp3_intf, member_clazz); 428 load_klass_from_Class(_masm, temp3_intf); 429 __ verify_klass_ptr(temp3_intf); 430 431 Register rbx_index = rbx_method; 432 __ movptr(rbx_index, member_vmindex); 433 if (VerifyMethodHandles) { 434 Label L; 435 __ cmpl(rbx_index, 0); 436 __ jcc(Assembler::greaterEqual, L); 437 __ STOP("invalid vtable index for MH.invokeInterface"); 438 __ bind(L); 439 } 440 441 // given intf, index, and recv klass, dispatch to the implementation method 442 __ lookup_interface_method(temp1_recv_klass, temp3_intf, 443 // note: next two args must be the same: 444 rbx_index, rbx_method, 445 temp2, 446 L_incompatible_class_change_error); 447 break; 448 } 449 450 default: 451 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 452 break; 453 } 454 455 // Live at this point: 456 // rbx_method 457 // rsi/r13 (if interpreted) 458 459 // After figuring out which concrete method to call, jump into it. 460 // Note that this works in the interpreter with no data motion. 461 // But the compiled version will require that rcx_recv be shifted out. 462 __ verify_method_ptr(rbx_method); 463 jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry); 464 465 if (iid == vmIntrinsics::_linkToInterface) { 466 __ bind(L_incompatible_class_change_error); 467 __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); 468 } 469 } 470 } 471 472 #ifndef PRODUCT 473 void trace_method_handle_stub(const char* adaptername, 474 oop mh, 475 intptr_t* saved_regs, 476 intptr_t* entry_sp) { 477 // called as a leaf from native code: do not block the JVM! 478 bool has_mh = (strstr(adaptername, "/static") == NULL && 479 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH 480 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; 481 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, 482 adaptername, mh_reg_name, 483 mh, entry_sp); 484 485 if (Verbose) { 486 tty->print_cr("Registers:"); 487 const int saved_regs_count = RegisterImpl::number_of_registers; 488 for (int i = 0; i < saved_regs_count; i++) { 489 Register r = as_Register(i); 490 // The registers are stored in reverse order on the stack (by pusha). 491 tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]); 492 if ((i + 1) % 4 == 0) { 493 tty->cr(); 494 } else { 495 tty->print(", "); 496 } 497 } 498 tty->cr(); 499 500 { 501 // dumping last frame with frame::describe 502 503 JavaThread* p = JavaThread::active(); 504 505 ResourceMark rm; 506 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here 507 FrameValues values; 508 509 // Note: We want to allow trace_method_handle from any call site. 510 // While trace_method_handle creates a frame, it may be entered 511 // without a PC on the stack top (e.g. not just after a call). 512 // Walking that frame could lead to failures due to that invalid PC. 513 // => carefully detect that frame when doing the stack walking 514 515 // Current C frame 516 frame cur_frame = os::current_frame(); 517 518 // Robust search of trace_calling_frame (independant of inlining). 519 // Assumes saved_regs comes from a pusha in the trace_calling_frame. 520 assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?"); 521 frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame); 522 while (trace_calling_frame.fp() < saved_regs) { 523 trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame); 524 } 525 526 // safely create a frame and call frame::describe 527 intptr_t *dump_sp = trace_calling_frame.sender_sp(); 528 intptr_t *dump_fp = trace_calling_frame.link(); 529 530 bool walkable = has_mh; // whether the traced frame shoud be walkable 531 532 if (walkable) { 533 // The previous definition of walkable may have to be refined 534 // if new call sites cause the next frame constructor to start 535 // failing. Alternatively, frame constructors could be 536 // modified to support the current or future non walkable 537 // frames (but this is more intrusive and is not considered as 538 // part of this RFE, which will instead use a simpler output). 539 frame dump_frame = frame(dump_sp, dump_fp); 540 dump_frame.describe(values, 1); 541 } else { 542 // Stack may not be walkable (invalid PC above FP): 543 // Add descriptions without building a Java frame to avoid issues 544 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>"); 545 values.describe(-1, dump_sp, "sp for #1"); 546 } 547 values.describe(-1, entry_sp, "raw top of stack"); 548 549 tty->print_cr("Stack layout:"); 550 values.print(p); 551 } 552 if (has_mh && mh->is_oop()) { 553 mh->print(); 554 if (java_lang_invoke_MethodHandle::is_instance(mh)) { 555 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) 556 java_lang_invoke_MethodHandle::form(mh)->print(); 557 } 558 } 559 } 560 } 561 562 // The stub wraps the arguments in a struct on the stack to avoid 563 // dealing with the different calling conventions for passing 6 564 // arguments. 565 struct MethodHandleStubArguments { 566 const char* adaptername; 567 oopDesc* mh; 568 intptr_t* saved_regs; 569 intptr_t* entry_sp; 570 }; 571 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { 572 trace_method_handle_stub(args->adaptername, 573 args->mh, 574 args->saved_regs, 575 args->entry_sp); 576 } 577 578 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 579 if (!TraceMethodHandles) return; 580 BLOCK_COMMENT("trace_method_handle {"); 581 __ enter(); 582 __ andptr(rsp, -16); // align stack if needed for FPU state 583 __ pusha(); 584 __ mov(rbx, rsp); // for retreiving saved_regs 585 // Note: saved_regs must be in the entered frame for the 586 // robust stack walking implemented in trace_method_handle_stub. 587 588 // save FP result, valid at some call sites (adapter_opt_return_float, ...) 589 __ increment(rsp, -2 * wordSize); 590 if (UseSSE >= 2) { 591 __ movdbl(Address(rsp, 0), xmm0); 592 } else if (UseSSE == 1) { 593 __ movflt(Address(rsp, 0), xmm0); 594 } else { 595 __ fst_d(Address(rsp, 0)); 596 } 597 598 // Incoming state: 599 // rcx: method handle 600 // 601 // To avoid calling convention issues, build a record on the stack 602 // and pass the pointer to that instead. 603 __ push(rbp); // entry_sp (with extra align space) 604 __ push(rbx); // pusha saved_regs 605 __ push(rcx); // mh 606 __ push(rcx); // slot for adaptername 607 __ movptr(Address(rsp, 0), (intptr_t) adaptername); 608 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); 609 __ increment(rsp, sizeof(MethodHandleStubArguments)); 610 611 if (UseSSE >= 2) { 612 __ movdbl(xmm0, Address(rsp, 0)); 613 } else if (UseSSE == 1) { 614 __ movflt(xmm0, Address(rsp, 0)); 615 } else { 616 __ fld_d(Address(rsp, 0)); 617 } 618 __ increment(rsp, 2 * wordSize); 619 620 __ popa(); 621 __ leave(); 622 BLOCK_COMMENT("} trace_method_handle"); 623 } 624 #endif //PRODUCT