1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "prims/methodHandles.hpp" 31 32 #define __ _masm-> 33 34 #ifdef PRODUCT 35 #define BLOCK_COMMENT(str) /* nothing */ 36 #define STOP(error) stop(error) 37 #else 38 #define BLOCK_COMMENT(str) __ block_comment(str) 39 #define STOP(error) block_comment(error); __ stop(error) 40 #endif 41 42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 43 44 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { 45 if (VerifyMethodHandles) 46 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), 47 "MH argument is a Class"); 48 __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); 49 } 50 51 #ifdef ASSERT 52 static int check_nonzero(const char* xname, int x) { 53 assert(x != 0, err_msg("%s should be nonzero", xname)); 54 return x; 55 } 56 #define NONZERO(x) check_nonzero(#x, x) 57 #else //ASSERT 58 #define NONZERO(x) (x) 59 #endif //ASSERT 60 61 #ifdef ASSERT 62 void MethodHandles::verify_klass(MacroAssembler* _masm, 63 Register obj, SystemDictionary::WKID klass_id, 64 const char* error_message) { 65 Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); 66 KlassHandle klass = SystemDictionary::well_known_klass(klass_id); 67 Register temp = rdi; 68 Register temp2 = noreg; 69 LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr 70 Label L_ok, L_bad; 71 BLOCK_COMMENT("verify_klass {"); 72 __ verify_oop(obj); 73 __ testptr(obj, obj); 74 __ jcc(Assembler::zero, L_bad); 75 __ push(temp); if (temp2 != noreg) __ push(temp2); 76 #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } 77 __ load_klass(temp, obj); 78 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 79 __ jcc(Assembler::equal, L_ok); 80 intptr_t super_check_offset = klass->super_check_offset(); 81 __ movptr(temp, Address(temp, super_check_offset)); 82 __ cmpptr(temp, ExternalAddress((address) klass_addr)); 83 __ jcc(Assembler::equal, L_ok); 84 UNPUSH; 85 __ bind(L_bad); 86 __ STOP(error_message); 87 __ BIND(L_ok); 88 UNPUSH; 89 BLOCK_COMMENT("} verify_klass"); 90 } 91 92 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { 93 Label L; 94 BLOCK_COMMENT("verify_ref_kind {"); 95 __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()))); 96 __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT); 97 __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); 98 __ cmpl(temp, ref_kind); 99 __ jcc(Assembler::equal, L); 100 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); 101 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); 102 if (ref_kind == JVM_REF_invokeVirtual || 103 ref_kind == JVM_REF_invokeSpecial) 104 // could do this for all ref_kinds, but would explode assembly code size 105 trace_method_handle(_masm, buf); 106 __ STOP(buf); 107 } 108 BLOCK_COMMENT("} verify_ref_kind"); 109 __ bind(L); 110 } 111 112 #endif //ASSERT 113 114 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, 115 bool for_compiler_entry) { 116 assert(method == rbx, "interpreter calling convention"); 117 118 Label L_no_such_method; 119 __ testptr(rbx, rbx); 120 __ jcc(Assembler::zero, L_no_such_method); 121 122 __ verify_method_ptr(method); 123 124 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { 125 Label run_compiled_code; 126 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 127 // compiled code in threads for which the event is enabled. Check here for 128 // interp_only_mode if these events CAN be enabled. 129 #ifdef _LP64 130 Register rthread = r15_thread; 131 #else 132 Register rthread = temp; 133 __ get_thread(rthread); 134 #endif 135 // interp_only is an int, on little endian it is sufficient to test the byte only 136 // Is a cmpl faster? 137 __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0); 138 __ jccb(Assembler::zero, run_compiled_code); 139 __ jmp(Address(method, Method::interpreter_entry_offset())); 140 __ BIND(run_compiled_code); 141 } 142 143 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : 144 Method::from_interpreted_offset(); 145 __ jmp(Address(method, entry_offset)); 146 147 __ bind(L_no_such_method); 148 __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); 149 } 150 151 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, 152 Register recv, Register method_temp, 153 Register temp2, 154 bool for_compiler_entry) { 155 BLOCK_COMMENT("jump_to_lambda_form {"); 156 // This is the initial entry point of a lazy method handle. 157 // After type checking, it picks up the invoker from the LambdaForm. 158 assert_different_registers(recv, method_temp, temp2); 159 assert(recv != noreg, "required register"); 160 assert(method_temp == rbx, "required register for loading method"); 161 162 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); 163 164 // Load the invoker, as MH -> MH.form -> LF.vmentry 165 __ verify_oop(recv); 166 __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); 167 __ verify_oop(method_temp); 168 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); 169 __ verify_oop(method_temp); 170 // the following assumes that a Method* is normally compressed in the vmtarget field: 171 __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); 172 173 if (VerifyMethodHandles && !for_compiler_entry) { 174 // make sure recv is already on stack 175 __ movptr(temp2, Address(method_temp, Method::const_offset())); 176 __ load_sized_value(temp2, 177 Address(temp2, ConstMethod::size_of_parameters_offset()), 178 sizeof(u2), /*is_signed*/ false); 179 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 180 Label L; 181 __ cmpptr(recv, __ argument_address(temp2, -1)); 182 __ jcc(Assembler::equal, L); 183 __ movptr(rax, __ argument_address(temp2, -1)); 184 __ STOP("receiver not on stack"); 185 __ BIND(L); 186 } 187 188 jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); 189 BLOCK_COMMENT("} jump_to_lambda_form"); 190 } 191 192 193 // Code generation 194 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, 195 vmIntrinsics::ID iid) { 196 const bool not_for_compiler_entry = false; // this is the interpreter entry 197 assert(is_signature_polymorphic(iid), "expected invoke iid"); 198 if (iid == vmIntrinsics::_invokeGeneric || 199 iid == vmIntrinsics::_compiledLambdaForm) { 200 // Perhaps surprisingly, the symbolic references visible to Java are not directly used. 201 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. 202 // They all allow an appendix argument. 203 __ hlt(); // empty stubs make SG sick 204 return NULL; 205 } 206 207 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) 208 // rbx: Method* 209 // rdx: argument locator (parameter slot count, added to rsp) 210 // rcx: used as temp to hold mh or receiver 211 // rax, rdi: garbage temps, blown away 212 Register rdx_argp = rdx; // argument list ptr, live on error paths 213 Register rax_temp = rax; 214 Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled 215 Register rbx_method = rbx; // eventual target of this invocation 216 217 // here's where control starts out: 218 __ align(CodeEntryAlignment); 219 address entry_point = __ pc(); 220 221 if (VerifyMethodHandles) { 222 Label L; 223 BLOCK_COMMENT("verify_intrinsic_id {"); 224 __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid); 225 __ jcc(Assembler::equal, L); 226 if (iid == vmIntrinsics::_linkToVirtual || 227 iid == vmIntrinsics::_linkToSpecial) { 228 // could do this for all kinds, but would explode assembly code size 229 trace_method_handle(_masm, "bad Method*::intrinsic_id"); 230 } 231 __ STOP("bad Method*::intrinsic_id"); 232 __ bind(L); 233 BLOCK_COMMENT("} verify_intrinsic_id"); 234 } 235 236 // First task: Find out how big the argument list is. 237 Address rdx_first_arg_addr; 238 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); 239 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); 240 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { 241 __ movptr(rdx_argp, Address(rbx_method, Method::const_offset())); 242 __ load_sized_value(rdx_argp, 243 Address(rdx_argp, ConstMethod::size_of_parameters_offset()), 244 sizeof(u2), /*is_signed*/ false); 245 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 246 rdx_first_arg_addr = __ argument_address(rdx_argp, -1); 247 } else { 248 DEBUG_ONLY(rdx_argp = noreg); 249 } 250 251 if (!is_signature_polymorphic_static(iid)) { 252 __ movptr(rcx_mh, rdx_first_arg_addr); 253 DEBUG_ONLY(rdx_argp = noreg); 254 } 255 256 // rdx_first_arg_addr is live! 257 258 trace_method_handle_interpreter_entry(_masm, iid); 259 260 if (iid == vmIntrinsics::_invokeBasic) { 261 generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry); 262 263 } else { 264 // Adjust argument list by popping the trailing MemberName argument. 265 Register rcx_recv = noreg; 266 if (MethodHandles::ref_kind_has_receiver(ref_kind)) { 267 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. 268 __ movptr(rcx_recv = rcx, rdx_first_arg_addr); 269 } 270 DEBUG_ONLY(rdx_argp = noreg); 271 Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now 272 __ pop(rax_temp); // return address 273 __ pop(rbx_member); // extract last argument 274 __ push(rax_temp); // re-push return address 275 generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry); 276 } 277 278 return entry_point; 279 } 280 281 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, 282 vmIntrinsics::ID iid, 283 Register receiver_reg, 284 Register member_reg, 285 bool for_compiler_entry) { 286 assert(is_signature_polymorphic(iid), "expected invoke iid"); 287 Register rbx_method = rbx; // eventual target of this invocation 288 // temps used in this code are not used in *either* compiled or interpreted calling sequences 289 #ifdef _LP64 290 Register temp1 = rscratch1; 291 Register temp2 = rscratch2; 292 Register temp3 = rax; 293 if (for_compiler_entry) { 294 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); 295 assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 296 assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 297 assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); 298 } 299 #else 300 Register temp1 = (for_compiler_entry ? rsi : rdx); 301 Register temp2 = rdi; 302 Register temp3 = rax; 303 if (for_compiler_entry) { 304 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment"); 305 assert_different_registers(temp1, rcx, rdx); 306 assert_different_registers(temp2, rcx, rdx); 307 assert_different_registers(temp3, rcx, rdx); 308 } 309 #endif 310 else { 311 assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP 312 } 313 assert_different_registers(temp1, temp2, temp3, receiver_reg); 314 assert_different_registers(temp1, temp2, temp3, member_reg); 315 316 if (iid == vmIntrinsics::_invokeBasic) { 317 // indirect through MH.form.vmentry.vmtarget 318 jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry); 319 320 } else { 321 // The method is a member invoker used by direct method handles. 322 if (VerifyMethodHandles) { 323 // make sure the trailing argument really is a MemberName (caller responsibility) 324 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName), 325 "MemberName required for invokeVirtual etc."); 326 } 327 328 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); 329 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); 330 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); 331 332 Register temp1_recv_klass = temp1; 333 if (iid != vmIntrinsics::_linkToStatic) { 334 __ verify_oop(receiver_reg); 335 if (iid == vmIntrinsics::_linkToSpecial) { 336 // Don't actually load the klass; just null-check the receiver. 337 __ null_check(receiver_reg); 338 } else { 339 // load receiver klass itself 340 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); 341 __ load_klass(temp1_recv_klass, receiver_reg); 342 __ verify_klass_ptr(temp1_recv_klass); 343 } 344 BLOCK_COMMENT("check_receiver {"); 345 // The receiver for the MemberName must be in receiver_reg. 346 // Check the receiver against the MemberName.clazz 347 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { 348 // Did not load it above... 349 __ load_klass(temp1_recv_klass, receiver_reg); 350 __ verify_klass_ptr(temp1_recv_klass); 351 } 352 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { 353 Label L_ok; 354 Register temp2_defc = temp2; 355 __ load_heap_oop(temp2_defc, member_clazz); 356 load_klass_from_Class(_masm, temp2_defc); 357 __ verify_klass_ptr(temp2_defc); 358 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); 359 // If we get here, the type check failed! 360 __ STOP("receiver class disagrees with MemberName.clazz"); 361 __ bind(L_ok); 362 } 363 BLOCK_COMMENT("} check_receiver"); 364 } 365 if (iid == vmIntrinsics::_linkToSpecial || 366 iid == vmIntrinsics::_linkToStatic) { 367 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass 368 } 369 370 // Live registers at this point: 371 // member_reg - MemberName that was the trailing argument 372 // temp1_recv_klass - klass of stacked receiver, if needed 373 // rsi/r13 - interpreter linkage (if interpreted) 374 // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled) 375 376 Label L_incompatible_class_change_error; 377 switch (iid) { 378 case vmIntrinsics::_linkToSpecial: 379 if (VerifyMethodHandles) { 380 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); 381 } 382 __ movptr(rbx_method, member_vmtarget); 383 break; 384 385 case vmIntrinsics::_linkToStatic: 386 if (VerifyMethodHandles) { 387 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); 388 } 389 __ movptr(rbx_method, member_vmtarget); 390 break; 391 392 case vmIntrinsics::_linkToVirtual: 393 { 394 // same as TemplateTable::invokevirtual, 395 // minus the CP setup and profiling: 396 397 if (VerifyMethodHandles) { 398 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); 399 } 400 401 // pick out the vtable index from the MemberName, and then we can discard it: 402 Register temp2_index = temp2; 403 __ movptr(temp2_index, member_vmindex); 404 405 if (VerifyMethodHandles) { 406 Label L_index_ok; 407 __ cmpl(temp2_index, 0); 408 __ jcc(Assembler::greaterEqual, L_index_ok); 409 __ STOP("no virtual index"); 410 __ BIND(L_index_ok); 411 } 412 413 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget 414 // at this point. And VerifyMethodHandles has already checked clazz, if needed. 415 416 // get target Method* & entry point 417 __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method); 418 break; 419 } 420 421 case vmIntrinsics::_linkToInterface: 422 { 423 // same as TemplateTable::invokeinterface 424 // (minus the CP setup and profiling, with different argument motion) 425 if (VerifyMethodHandles) { 426 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); 427 } 428 429 Register temp3_intf = temp3; 430 __ load_heap_oop(temp3_intf, member_clazz); 431 load_klass_from_Class(_masm, temp3_intf); 432 __ verify_klass_ptr(temp3_intf); 433 434 Register rbx_index = rbx_method; 435 __ movptr(rbx_index, member_vmindex); 436 if (VerifyMethodHandles) { 437 Label L; 438 __ cmpl(rbx_index, 0); 439 __ jcc(Assembler::greaterEqual, L); 440 __ STOP("invalid vtable index for MH.invokeInterface"); 441 __ bind(L); 442 } 443 444 // given intf, index, and recv klass, dispatch to the implementation method 445 __ lookup_interface_method(temp1_recv_klass, temp3_intf, 446 // note: next two args must be the same: 447 rbx_index, rbx_method, 448 temp2, 449 L_incompatible_class_change_error); 450 break; 451 } 452 453 default: 454 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 455 break; 456 } 457 458 // Live at this point: 459 // rbx_method 460 // rsi/r13 (if interpreted) 461 462 // After figuring out which concrete method to call, jump into it. 463 // Note that this works in the interpreter with no data motion. 464 // But the compiled version will require that rcx_recv be shifted out. 465 __ verify_method_ptr(rbx_method); 466 jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry); 467 468 if (iid == vmIntrinsics::_linkToInterface) { 469 __ bind(L_incompatible_class_change_error); 470 __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); 471 } 472 } 473 } 474 475 #ifndef PRODUCT 476 void trace_method_handle_stub(const char* adaptername, 477 oop mh, 478 intptr_t* saved_regs, 479 intptr_t* entry_sp) { 480 // called as a leaf from native code: do not block the JVM! 481 bool has_mh = (strstr(adaptername, "/static") == NULL && 482 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH 483 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; 484 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, 485 adaptername, mh_reg_name, 486 (void *)mh, entry_sp); 487 488 if (Verbose) { 489 tty->print_cr("Registers:"); 490 const int saved_regs_count = RegisterImpl::number_of_registers; 491 for (int i = 0; i < saved_regs_count; i++) { 492 Register r = as_Register(i); 493 // The registers are stored in reverse order on the stack (by pusha). 494 tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]); 495 if ((i + 1) % 4 == 0) { 496 tty->cr(); 497 } else { 498 tty->print(", "); 499 } 500 } 501 tty->cr(); 502 503 { 504 // dumping last frame with frame::describe 505 506 JavaThread* p = JavaThread::active(); 507 508 ResourceMark rm; 509 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here 510 FrameValues values; 511 512 // Note: We want to allow trace_method_handle from any call site. 513 // While trace_method_handle creates a frame, it may be entered 514 // without a PC on the stack top (e.g. not just after a call). 515 // Walking that frame could lead to failures due to that invalid PC. 516 // => carefully detect that frame when doing the stack walking 517 518 // Current C frame 519 frame cur_frame = os::current_frame(); 520 521 // Robust search of trace_calling_frame (independant of inlining). 522 // Assumes saved_regs comes from a pusha in the trace_calling_frame. 523 assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?"); 524 frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame); 525 while (trace_calling_frame.fp() < saved_regs) { 526 trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame); 527 } 528 529 // safely create a frame and call frame::describe 530 intptr_t *dump_sp = trace_calling_frame.sender_sp(); 531 intptr_t *dump_fp = trace_calling_frame.link(); 532 533 bool walkable = has_mh; // whether the traced frame shoud be walkable 534 535 if (walkable) { 536 // The previous definition of walkable may have to be refined 537 // if new call sites cause the next frame constructor to start 538 // failing. Alternatively, frame constructors could be 539 // modified to support the current or future non walkable 540 // frames (but this is more intrusive and is not considered as 541 // part of this RFE, which will instead use a simpler output). 542 frame dump_frame = frame(dump_sp, dump_fp); 543 dump_frame.describe(values, 1); 544 } else { 545 // Stack may not be walkable (invalid PC above FP): 546 // Add descriptions without building a Java frame to avoid issues 547 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>"); 548 values.describe(-1, dump_sp, "sp for #1"); 549 } 550 values.describe(-1, entry_sp, "raw top of stack"); 551 552 tty->print_cr("Stack layout:"); 553 values.print(p); 554 } 555 if (has_mh && mh->is_oop()) { 556 mh->print(); 557 if (java_lang_invoke_MethodHandle::is_instance(mh)) { 558 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) 559 java_lang_invoke_MethodHandle::form(mh)->print(); 560 } 561 } 562 } 563 } 564 565 // The stub wraps the arguments in a struct on the stack to avoid 566 // dealing with the different calling conventions for passing 6 567 // arguments. 568 struct MethodHandleStubArguments { 569 const char* adaptername; 570 oopDesc* mh; 571 intptr_t* saved_regs; 572 intptr_t* entry_sp; 573 }; 574 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { 575 trace_method_handle_stub(args->adaptername, 576 args->mh, 577 args->saved_regs, 578 args->entry_sp); 579 } 580 581 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 582 if (!TraceMethodHandles) return; 583 BLOCK_COMMENT("trace_method_handle {"); 584 __ enter(); 585 __ andptr(rsp, -16); // align stack if needed for FPU state 586 __ pusha(); 587 __ mov(rbx, rsp); // for retreiving saved_regs 588 // Note: saved_regs must be in the entered frame for the 589 // robust stack walking implemented in trace_method_handle_stub. 590 591 // save FP result, valid at some call sites (adapter_opt_return_float, ...) 592 __ increment(rsp, -2 * wordSize); 593 if (UseSSE >= 2) { 594 __ movdbl(Address(rsp, 0), xmm0); 595 } else if (UseSSE == 1) { 596 __ movflt(Address(rsp, 0), xmm0); 597 } else { 598 __ fst_d(Address(rsp, 0)); 599 } 600 601 // Incoming state: 602 // rcx: method handle 603 // 604 // To avoid calling convention issues, build a record on the stack 605 // and pass the pointer to that instead. 606 __ push(rbp); // entry_sp (with extra align space) 607 __ push(rbx); // pusha saved_regs 608 __ push(rcx); // mh 609 __ push(rcx); // slot for adaptername 610 __ movptr(Address(rsp, 0), (intptr_t) adaptername); 611 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); 612 __ increment(rsp, sizeof(MethodHandleStubArguments)); 613 614 if (UseSSE >= 2) { 615 __ movdbl(xmm0, Address(rsp, 0)); 616 } else if (UseSSE == 1) { 617 __ movflt(xmm0, Address(rsp, 0)); 618 } else { 619 __ fld_d(Address(rsp, 0)); 620 } 621 __ increment(rsp, 2 * wordSize); 622 623 __ popa(); 624 __ leave(); 625 BLOCK_COMMENT("} trace_method_handle"); 626 } 627 #endif //PRODUCT