src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page
rev 6361 : [mq]: 8041934-method_exit


2467     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2468     //  get old displaced header
2469     __ movptr(old_hdr, Address(rax, 0));
2470 
2471     // Atomic swap old header if oop still contains the stack lock
2472     if (os::is_MP()) {
2473       __ lock();
2474     }
2475     __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2476     __ jcc(Assembler::notEqual, slow_path_unlock);
2477 
2478     // slow path re-enters here
2479     __ bind(unlock_done);
2480     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2481       restore_native_result(masm, ret_type, stack_slots);
2482     }
2483 
2484     __ bind(done);
2485 
2486   }























2487   {
2488     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2489     save_native_result(masm, ret_type, stack_slots);
2490     __ mov_metadata(c_rarg1, method());
2491     __ call_VM_leaf(
2492          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2493          r15_thread, c_rarg1);
2494     restore_native_result(masm, ret_type, stack_slots);
2495   }
2496 
2497   __ reset_last_Java_frame(false, true);
2498 
2499   // Unpack oop result
2500   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2501       Label L;
2502       __ testptr(rax, rax);
2503       __ jcc(Assembler::zero, L);
2504       __ movptr(rax, Address(rax, 0));
2505       __ bind(L);
2506       __ verify_oop(rax);




2467     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2468     //  get old displaced header
2469     __ movptr(old_hdr, Address(rax, 0));
2470 
2471     // Atomic swap old header if oop still contains the stack lock
2472     if (os::is_MP()) {
2473       __ lock();
2474     }
2475     __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2476     __ jcc(Assembler::notEqual, slow_path_unlock);
2477 
2478     // slow path re-enters here
2479     __ bind(unlock_done);
2480     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2481       restore_native_result(masm, ret_type, stack_slots);
2482     }
2483 
2484     __ bind(done);
2485 
2486   }
2487 
2488   {
2489     // Normally we do not post method_entry and method_exit events from
2490     // compiled code, only from the interpreter. If method_entry/exit
2491     // events are switched on at runtime, we will deoptimize everything
2492     // (see VM_EnterInterpOnlyMode) on the stack and call method_entry/exit
2493     // from the interpreter. But when we do that, we will not deoptimize
2494     // this native wrapper frame. Thus we have an extra check here to see
2495     // if we are now in interp_only_mode and in that case we do the jvmti
2496     // callback.
2497     Label skip_jvmti_method_exit;
2498     __ cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
2499     __ jccb(Assembler::zero, skip_jvmti_method_exit);
2500 
2501     save_native_result(masm, ret_type, stack_slots);
2502     __ mov_metadata(c_rarg1, method());
2503     __ call_VM_leaf(
2504          CAST_FROM_FN_PTR(address, SharedRuntime::jvmti_method_exit),
2505          r15_thread, c_rarg1);
2506     restore_native_result(masm, ret_type, stack_slots);
2507     __ bind(skip_jvmti_method_exit);
2508   }
2509 
2510   {
2511     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2512     save_native_result(masm, ret_type, stack_slots);
2513     __ mov_metadata(c_rarg1, method());
2514     __ call_VM_leaf(
2515          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2516          r15_thread, c_rarg1);
2517     restore_native_result(masm, ret_type, stack_slots);
2518   }
2519 
2520   __ reset_last_Java_frame(false, true);
2521 
2522   // Unpack oop result
2523   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2524       Label L;
2525       __ testptr(rax, rax);
2526       __ jcc(Assembler::zero, L);
2527       __ movptr(rax, Address(rax, 0));
2528       __ bind(L);
2529       __ verify_oop(rax);