1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "prims/methodHandles.hpp" 31 32 #define __ _masm-> 33 34 #ifdef PRODUCT 35 #define BLOCK_COMMENT(str) /* nothing */ 36 #define STOP(error) stop(error) 37 #else 38 #define BLOCK_COMMENT(str) __ block_comment(str) 39 #define STOP(error) block_comment(error); __ stop(error) 40 #endif 41 42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 43 44 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { 45 if (VerifyMethodHandles) 46 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), 47 "MH argument is a Class"); 48 __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); 49 } 50 51 #ifdef ASSERT 52 static int check_nonzero(const char* xname, int x) { 53 assert(x != 0, err_msg("%s should be nonzero", xname)); 54 return x; 55 } 56 #define NONZERO(x) check_nonzero(#x, x) 57 #else //ASSERT 58 #define NONZERO(x) (x) 59 #endif //ASSERT 60 61 #ifdef ASSERT 62 void MethodHandles::verify_klass(MacroAssembler* _masm, 63 Register obj, SystemDictionary::WKID klass_id, 64 const char* error_message) { 65 Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); 66 KlassHandle klass = SystemDictionary::well_known_klass(klass_id); 67 Register temp = rdi; 68 Register temp2 = noreg; 69 LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr 70 Label L_ok, L_bad; 71 BLOCK_COMMENT("verify_klass {"); 72 __ verify_oop(obj); 73 __ testptr(obj, obj); 74 __ jcc(Assembler::zero, L_bad); 75 __ push(temp); if (temp2 != noreg) __ push(temp2); 76 #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } 77 __ load_klass(temp, obj); 78 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 79 __ jcc(Assembler::equal, L_ok); 80 intptr_t super_check_offset = klass->super_check_offset(); 81 __ movptr(temp, Address(temp, super_check_offset)); 82 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 83 __ jcc(Assembler::equal, L_ok); 84 UNPUSH; 85 __ bind(L_bad); 86 __ STOP(error_message); 87 __ BIND(L_ok); 88 UNPUSH; 89 BLOCK_COMMENT("} verify_klass"); 90 } 91 92 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { 93 Label L; 94 BLOCK_COMMENT("verify_ref_kind {"); 95 __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()))); 96 __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT); 97 __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); 98 __ cmpl(temp, ref_kind); 99 __ jcc(Assembler::equal, L); 100 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); 101 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); 102 if (ref_kind == JVM_REF_invokeVirtual || 103 ref_kind == JVM_REF_invokeSpecial) 104 // could do this for all ref_kinds, but would explode assembly code size 105 trace_method_handle(_masm, buf); 106 __ STOP(buf); 107 } 108 BLOCK_COMMENT("} verify_ref_kind"); 109 __ bind(L); 110 } 111 112 #endif //ASSERT 113 114 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, 115 bool for_compiler_entry) { 116 assert(method == rbx, "interpreter calling convention"); 117 118 // Label no_such_method; 119 // __ testptr(rbx, rbx); 120 // __ jcc(Assembler::zero, no_such_method); 121 122 __ verify_method_ptr(method); 123 124 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { 125 Label run_compiled_code; 126 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 127 // compiled code in threads for which the event is enabled. Check here for 128 // interp_only_mode if these events CAN be enabled. 129 #ifdef _LP64 130 Register rthread = r15_thread; 131 #else 132 Register rthread = temp; 133 __ get_thread(rthread); 134 #endif 135 136 // interp_only is an int, on little endian it is sufficient to test the byte only 137 // Is a cmpl faster? 138 __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0); 139 __ jccb(Assembler::zero, run_compiled_code); 140 141 __ jmp(Address(method, Method::interpreter_entry_offset())); 142 __ BIND(run_compiled_code); 143 } 144 145 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : 146 Method::from_interpreted_offset(); 147 __ jmp(Address(method, entry_offset)); 148 149 // __ bind(no_such_method); 150 // __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); 151 } 152 153 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, 154 Register recv, Register method_temp, 155 Register temp2, 156 bool for_compiler_entry) { 157 BLOCK_COMMENT("jump_to_lambda_form {"); 158 // This is the initial entry point of a lazy method handle. 159 // After type checking, it picks up the invoker from the LambdaForm. 160 assert_different_registers(recv, method_temp, temp2); 161 assert(recv != noreg, "required register"); 162 assert(method_temp == rbx, "required register for loading method"); 163 164 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); 165 166 // Load the invoker, as MH -> MH.form -> LF.vmentry 167 __ verify_oop(recv); 168 __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); 169 __ verify_oop(method_temp); 170 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); 171 __ verify_oop(method_temp); 172 // the following assumes that a Method* is normally compressed in the vmtarget field: 173 __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); 174 175 if (VerifyMethodHandles && !for_compiler_entry) { 176 // make sure recv is already on stack 177 __ movptr(temp2, Address(method_temp, Method::const_offset())); 178 __ load_sized_value(temp2, 179 Address(temp2, ConstMethod::size_of_parameters_offset()), 180 sizeof(u2), /*is_signed*/ false); 181 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 182 Label L; 183 __ cmpptr(recv, __ argument_address(temp2, -1)); 184 __ jcc(Assembler::equal, L); 185 __ movptr(rax, __ argument_address(temp2, -1)); 186 __ STOP("receiver not on stack"); 187 __ BIND(L); 188 } 189 190 jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); 191 BLOCK_COMMENT("} jump_to_lambda_form"); 192 } 193 194 195 // Code generation 196 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, 197 vmIntrinsics::ID iid) { 198 const bool not_for_compiler_entry = false; // this is the interpreter entry 199 assert(is_signature_polymorphic(iid), "expected invoke iid"); 200 if (iid == vmIntrinsics::_invokeGeneric || 201 iid == vmIntrinsics::_compiledLambdaForm) { 202 // Perhaps surprisingly, the symbolic references visible to Java are not directly used. 203 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. 204 // They all allow an appendix argument. 205 __ hlt(); // empty stubs make SG sick 206 return NULL; 207 } 208 209 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) 210 // rbx: Method* 211 // rdx: argument locator (parameter slot count, added to rsp) 212 // rcx: used as temp to hold mh or receiver 213 // rax, rdi: garbage temps, blown away 214 Register rdx_argp = rdx; // argument list ptr, live on error paths 215 Register rax_temp = rax; 216 Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled 217 Register rbx_method = rbx; // eventual target of this invocation 218 219 // here's where control starts out: 220 __ align(CodeEntryAlignment); 221 address entry_point = __ pc(); 222 223 if (VerifyMethodHandles) { 224 Label L; 225 BLOCK_COMMENT("verify_intrinsic_id {"); 226 __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid); 227 __ jcc(Assembler::equal, L); 228 if (iid == vmIntrinsics::_linkToVirtual || 229 iid == vmIntrinsics::_linkToSpecial) { 230 // could do this for all kinds, but would explode assembly code size 231 trace_method_handle(_masm, "bad Method*::intrinsic_id"); 232 } 233 __ STOP("bad Method*::intrinsic_id"); 234 __ bind(L); 235 BLOCK_COMMENT("} verify_intrinsic_id"); 236 } 237 238 // First task: Find out how big the argument list is. 239 Address rdx_first_arg_addr; 240 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); 241 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); 242 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { 243 __ movptr(rdx_argp, Address(rbx_method, Method::const_offset())); 244 __ load_sized_value(rdx_argp, 245 Address(rdx_argp, ConstMethod::size_of_parameters_offset()), 246 sizeof(u2), /*is_signed*/ false); 247 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 248 rdx_first_arg_addr = __ argument_address(rdx_argp, -1); 249 } else { 250 DEBUG_ONLY(rdx_argp = noreg); 251 } 252 253 if (!is_signature_polymorphic_static(iid)) { 254 __ movptr(rcx_mh, rdx_first_arg_addr); 255 DEBUG_ONLY(rdx_argp = noreg); 256 } 257 258 // rdx_first_arg_addr is live! 259 260 trace_method_handle_interpreter_entry(_masm, iid); 261 262 if (iid == vmIntrinsics::_invokeBasic) { 263 generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry); 264 265 } else { 266 // Adjust argument list by popping the trailing MemberName argument. 267 Register rcx_recv = noreg; 268 if (MethodHandles::ref_kind_has_receiver(ref_kind)) { 269 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. 270 __ movptr(rcx_recv = rcx, rdx_first_arg_addr); 271 } 272 DEBUG_ONLY(rdx_argp = noreg); 273 Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now 274 __ pop(rax_temp); // return address 275 __ pop(rbx_member); // extract last argument 276 __ push(rax_temp); // re-push return address 277 generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry); 278 } 279 280 return entry_point; 281 } 282 283 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, 284 vmIntrinsics::ID iid, 285 Register receiver_reg, 286 Register member_reg, 287 bool for_compiler_entry) { 288 assert(is_signature_polymorphic(iid), "expected invoke iid"); 289 Register rbx_method = rbx; // eventual target of this invocation 290 // temps used in this code are not used in *either* compiled or interpreted calling sequences 291 #ifdef _LP64 292 Register temp1 = rscratch1; 293 Register temp2 = rscratch2; 294 Register temp3 = rax; 295 if (for_compiler_entry) { 296 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); 297 assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 298 assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 299 assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 300 } 301 #else 302 Register temp1 = (for_compiler_entry ? rsi : rdx); 303 Register temp2 = rdi; 304 Register temp3 = rax; 305 if (for_compiler_entry) { 306 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment"); 307 assert_different_registers(temp1, rcx, rdx); 308 assert_different_registers(temp2, rcx, rdx); 309 assert_different_registers(temp3, rcx, rdx); 310 } 311 #endif 312 else { 313 assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP 314 } 315 assert_different_registers(temp1, temp2, temp3, receiver_reg); 316 assert_different_registers(temp1, temp2, temp3, member_reg); 317 318 if (iid == vmIntrinsics::_invokeBasic) { 319 // indirect through MH.form.vmentry.vmtarget 320 jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry); 321 322 } else { 323 // The method is a member invoker used by direct method handles. 324 if (VerifyMethodHandles) { 325 // make sure the trailing argument really is a MemberName (caller responsibility) 326 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName), 327 "MemberName required for invokeVirtual etc."); 328 } 329 330 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); 331 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); 332 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); 333 334 Register temp1_recv_klass = temp1; 335 if (iid != vmIntrinsics::_linkToStatic) { 336 __ verify_oop(receiver_reg); 337 if (iid == vmIntrinsics::_linkToSpecial) { 338 // Don't actually load the klass; just null-check the receiver. 339 __ null_check(receiver_reg); 340 } else { 341 // load receiver klass itself 342 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); 343 __ load_klass(temp1_recv_klass, receiver_reg); 344 __ verify_klass_ptr(temp1_recv_klass); 345 } 346 BLOCK_COMMENT("check_receiver {"); 347 // The receiver for the MemberName must be in receiver_reg. 348 // Check the receiver against the MemberName.clazz 349 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { 350 // Did not load it above... 351 __ load_klass(temp1_recv_klass, receiver_reg); 352 __ verify_klass_ptr(temp1_recv_klass); 353 } 354 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { 355 Label L_ok; 356 Register temp2_defc = temp2; 357 __ load_heap_oop(temp2_defc, member_clazz); 358 load_klass_from_Class(_masm, temp2_defc); 359 __ verify_klass_ptr(temp2_defc); 360 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); 361 // If we get here, the type check failed! 362 __ STOP("receiver class disagrees with MemberName.clazz"); 363 __ bind(L_ok); 364 } 365 BLOCK_COMMENT("} check_receiver"); 366 } 367 if (iid == vmIntrinsics::_linkToSpecial || 368 iid == vmIntrinsics::_linkToStatic) { 369 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass 370 } 371 372 // Live registers at this point: 373 // member_reg - MemberName that was the trailing argument 374 // temp1_recv_klass - klass of stacked receiver, if needed 375 // rsi/r13 - interpreter linkage (if interpreted) 376 // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled) 377 378 Label L_incompatible_class_change_error; 379 switch (iid) { 380 case vmIntrinsics::_linkToSpecial: 381 if (VerifyMethodHandles) { 382 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); 383 } 384 __ movptr(rbx_method, member_vmtarget); 385 break; 386 387 case vmIntrinsics::_linkToStatic: 388 if (VerifyMethodHandles) { 389 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); 390 } 391 __ movptr(rbx_method, member_vmtarget); 392 break; 393 394 case vmIntrinsics::_linkToVirtual: 395 { 396 // same as TemplateTable::invokevirtual, 397 // minus the CP setup and profiling: 398 399 if (VerifyMethodHandles) { 400 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); 401 } 402 403 // pick out the vtable index from the MemberName, and then we can discard it: 404 Register temp2_index = temp2; 405 __ movptr(temp2_index, member_vmindex); 406 407 if (VerifyMethodHandles) { 408 Label L_index_ok; 409 __ cmpl(temp2_index, 0); 410 __ jcc(Assembler::greaterEqual, L_index_ok); 411 __ STOP("no virtual index"); 412 __ BIND(L_index_ok); 413 } 414 415 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget 416 // at this point. And VerifyMethodHandles has already checked clazz, if needed. 417 418 // get target Method* & entry point 419 __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method); 420 break; 421 } 422 423 case vmIntrinsics::_linkToInterface: 424 { 425 // same as TemplateTable::invokeinterface 426 // (minus the CP setup and profiling, with different argument motion) 427 if (VerifyMethodHandles) { 428 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); 429 } 430 431 Register temp3_intf = temp3; 432 __ load_heap_oop(temp3_intf, member_clazz); 433 load_klass_from_Class(_masm, temp3_intf); 434 __ verify_klass_ptr(temp3_intf); 435 436 Register rbx_index = rbx_method; 437 __ movptr(rbx_index, member_vmindex); 438 if (VerifyMethodHandles) { 439 Label L; 440 __ cmpl(rbx_index, 0); 441 __ jcc(Assembler::greaterEqual, L); 442 __ STOP("invalid vtable index for MH.invokeInterface"); 443 __ bind(L); 444 } 445 446 // given intf, index, and recv klass, dispatch to the implementation method 447 __ lookup_interface_method(temp1_recv_klass, temp3_intf, 448 // note: next two args must be the same: 449 rbx_index, rbx_method, 450 temp2, 451 L_incompatible_class_change_error); 452 break; 453 } 454 455 default: 456 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 457 break; 458 } 459 460 // Live at this point: 461 // rbx_method 462 // rsi/r13 (if interpreted) 463 464 // After figuring out which concrete method to call, jump into it. 465 // Note that this works in the interpreter with no data motion. 466 // But the compiled version will require that rcx_recv be shifted out. 467 __ verify_method_ptr(rbx_method); 468 jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry); 469 470 if (iid == vmIntrinsics::_linkToInterface) { 471 __ bind(L_incompatible_class_change_error); 472 __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); 473 } 474 } 475 } 476 477 #ifndef PRODUCT 478 void trace_method_handle_stub(const char* adaptername, 479 oop mh, 480 intptr_t* saved_regs, 481 intptr_t* entry_sp) { 482 // called as a leaf from native code: do not block the JVM! 483 bool has_mh = (strstr(adaptername, "/static") == NULL && 484 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH 485 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; 486 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, 487 adaptername, mh_reg_name, 488 mh, entry_sp); 489 490 if (Verbose) { 491 tty->print_cr("Registers:"); 492 const int saved_regs_count = RegisterImpl::number_of_registers; 493 for (int i = 0; i < saved_regs_count; i++) { 494 Register r = as_Register(i); 495 // The registers are stored in reverse order on the stack (by pusha). 496 tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]); 497 if ((i + 1) % 4 == 0) { 498 tty->cr(); 499 } else { 500 tty->print(", "); 501 } 502 } 503 tty->cr(); 504 505 { 506 // dumping last frame with frame::describe 507 508 JavaThread* p = JavaThread::active(); 509 510 ResourceMark rm; 511 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here 512 FrameValues values; 513 514 // Note: We want to allow trace_method_handle from any call site. 515 // While trace_method_handle creates a frame, it may be entered 516 // without a PC on the stack top (e.g. not just after a call). 517 // Walking that frame could lead to failures due to that invalid PC. 518 // => carefully detect that frame when doing the stack walking 519 520 // Current C frame 521 frame cur_frame = os::current_frame(); 522 523 // Robust search of trace_calling_frame (independant of inlining). 524 // Assumes saved_regs comes from a pusha in the trace_calling_frame. 525 assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?"); 526 frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame); 527 while (trace_calling_frame.fp() < saved_regs) { 528 trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame); 529 } 530 531 // safely create a frame and call frame::describe 532 intptr_t *dump_sp = trace_calling_frame.sender_sp(); 533 intptr_t *dump_fp = trace_calling_frame.link(); 534 535 bool walkable = has_mh; // whether the traced frame shoud be walkable 536 537 if (walkable) { 538 // The previous definition of walkable may have to be refined 539 // if new call sites cause the next frame constructor to start 540 // failing. Alternatively, frame constructors could be 541 // modified to support the current or future non walkable 542 // frames (but this is more intrusive and is not considered as 543 // part of this RFE, which will instead use a simpler output). 544 frame dump_frame = frame(dump_sp, dump_fp); 545 dump_frame.describe(values, 1); 546 } else { 547 // Stack may not be walkable (invalid PC above FP): 548 // Add descriptions without building a Java frame to avoid issues 549 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>"); 550 values.describe(-1, dump_sp, "sp for #1"); 551 } 552 values.describe(-1, entry_sp, "raw top of stack"); 553 554 tty->print_cr("Stack layout:"); 555 values.print(p); 556 } 557 if (has_mh && mh->is_oop()) { 558 mh->print(); 559 if (java_lang_invoke_MethodHandle::is_instance(mh)) { 560 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) 561 java_lang_invoke_MethodHandle::form(mh)->print(); 562 } 563 } 564 } 565 } 566 567 // The stub wraps the arguments in a struct on the stack to avoid 568 // dealing with the different calling conventions for passing 6 569 // arguments. 570 struct MethodHandleStubArguments { 571 const char* adaptername; 572 oopDesc* mh; 573 intptr_t* saved_regs; 574 intptr_t* entry_sp; 575 }; 576 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { 577 trace_method_handle_stub(args->adaptername, 578 args->mh, 579 args->saved_regs, 580 args->entry_sp); 581 } 582 583 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 584 if (!TraceMethodHandles) return; 585 BLOCK_COMMENT("trace_method_handle {"); 586 __ enter(); 587 __ andptr(rsp, -16); // align stack if needed for FPU state 588 __ pusha(); 589 __ mov(rbx, rsp); // for retreiving saved_regs 590 // Note: saved_regs must be in the entered frame for the 591 // robust stack walking implemented in trace_method_handle_stub. 592 593 // save FP result, valid at some call sites (adapter_opt_return_float, ...) 594 __ increment(rsp, -2 * wordSize); 595 if (UseSSE >= 2) { 596 __ movdbl(Address(rsp, 0), xmm0); 597 } else if (UseSSE == 1) { 598 __ movflt(Address(rsp, 0), xmm0); 599 } else { 600 __ fst_d(Address(rsp, 0)); 601 } 602 603 // Incoming state: 604 // rcx: method handle 605 // 606 // To avoid calling convention issues, build a record on the stack 607 // and pass the pointer to that instead. 608 __ push(rbp); // entry_sp (with extra align space) 609 __ push(rbx); // pusha saved_regs 610 __ push(rcx); // mh 611 __ push(rcx); // slot for adaptername 612 __ movptr(Address(rsp, 0), (intptr_t) adaptername); 613 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); 614 __ increment(rsp, sizeof(MethodHandleStubArguments)); 615 616 if (UseSSE >= 2) { 617 __ movdbl(xmm0, Address(rsp, 0)); 618 } else if (UseSSE == 1) { 619 __ movflt(xmm0, Address(rsp, 0)); 620 } else { 621 __ fld_d(Address(rsp, 0)); 622 } 623 __ increment(rsp, 2 * wordSize); 624 625 __ popa(); 626 __ leave(); 627 BLOCK_COMMENT("} trace_method_handle"); 628 } 629 #endif //PRODUCT