src/cpu/x86/vm/c1_Runtime1_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Feb 21 05:03:53 2011
--- new/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Feb 21 05:03:53 2011

*** 246,260 **** --- 246,263 ---- // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that // happen and will assert if the stack size we create is misaligned #ifdef _LP64 align_dummy_0, align_dummy_1, #endif // _LP64 ! dummy1, SLOT2(dummy1H) // 0, 4 dummy2, SLOT2(dummy2H) // 8, 12 // Two temps to be used as needed by users of save/restore callee registers temp_2_off, SLOT2(temp_2H_off) // 16, 20 temp_1_off, SLOT2(temp_1H_off) // 24, 28 ! #ifdef _WIN64 + // Windows always allocates space for it's argument registers (see + // frame::arg_reg_save_area_bytes). + arg_reg_save_1, arg_reg_save_1H, // 0, 4 + arg_reg_save_2, arg_reg_save_2H, // 8, 12 + arg_reg_save_3, arg_reg_save_3H, // 16, 20 + arg_reg_save_4, arg_reg_save_4H, // 24, 28 + #endif // _WIN64 xmm_regs_as_doubles_off, // 32 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 // fpu_state_end_off is exclusive fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
*** 280,307 **** --- 283,293 ---- rdx_off, SLOT2(rdxH_off) // 464, 468 rcx_off, SLOT2(rcxH_off) // 472, 476 rax_off, SLOT2(raxH_off) // 480, 484 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 return_off, SLOT2(returnH_off) // 496, 500 ! reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 #ifdef _WIN64 c_rarg0_off = rcx_off, #else c_rarg0_off = rdi_off, #endif // WIN64 // equates // illegal instruction handler continue_dest_off = temp_1_off, // deoptimization equates fp0_off = float_regs_as_doubles_off, // slot for java float/double return value xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value deopt_type = temp_2_off, // slot for type of deopt in progress ret_type = temp_1_off // slot for return type ! reg_save_frame_size // As noted: neglects any parameters to runtime // 504 }; // Save off registers which might be killed by calls into the runtime.
*** 403,417 **** --- 389,398 ---- static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { __ block_comment("save_live_registers"); // 64bit passes the args in regs to the c++ runtime int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread // frame_size = round_to(frame_size, 4); sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); __ pusha(); // integer registers // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
*** 640,662 **** --- 621,682 ---- return oop_maps; } ! void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) { ! OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { + __ block_comment("generate_handle_exception"); + // incoming parameters const Register exception_oop = rax; const Register exception_pc = rdx; // other registers used in this stub const Register real_return_addr = rbx; const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); __ block_comment("generate_handle_exception"); + // Save registers, if required. + OopMapSet* oop_maps = new OopMapSet(); + OopMap* oop_map = NULL; + switch (id) { + case forward_exception_id: + // We're handling an exception in the context of a compiled frame. + // The registers have been saved in the standard places. Perform + // an exception lookup in the caller and dispatch to the handler + // if found. Otherwise unwind and dispatch to the callers + // exception handler. + oop_map = generate_oop_map(sasm, 1 /*thread*/); + + // load and clear pending exception oop into RAX + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); + __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + + // load issuing PC (the return address for this stub) into rdx + __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); + + // make sure that the vm_results are cleared (may be unnecessary) + __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); + __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + break; + case handle_exception_nofpu_id: + case handle_exception_id: + // At this point all registers MAY be live. + oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); + break; + case handle_exception_from_callee_id: { + // At this point all registers except exception oop (RAX) and + // exception pc (RDX) are dead. + const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); + oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); + sasm->set_frame_size(frame_size); + WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); + break; + } + default: ShouldNotReachHere(); + } #ifdef TIERED // C2 can leave the fpu stack dirty - if (UseSSE < 2 ) { __ empty_FPU_stack(); } #endif // TIERED // verify that only rax, and rdx is valid at this time
*** 686,734 **** --- 706,758 ---- // save exception oop and issuing pc into JavaThread // (exception handler will load it from here) __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); // save real return address (pc that called this stub) __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); // patch throwing pc into return address (has bci & oop map) __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); // compute the exception handler. // the exception oop and the throwing pc are read from the fields in JavaThread int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map); - // rax,: handler address // will be the deopt blob if nmethod was deoptimized while we looked up // handler regardless of whether handler existed in the nmethod. // only rax, is valid at this time, all other registers have been destroyed by the runtime call __ invalidate_registers(false, true, true, true, true, true); #ifdef ASSERT // Do we have an exception handler in the nmethod? Label done; __ testptr(rax, rax); __ jcc(Assembler::notZero, done); __ stop("no handler found"); __ bind(done); #endif // exception handler found // patch the return address -> the stub will directly return to the exception handler + // patch the return address, this stub will directly return to the exception handler __ movptr(Address(rbp, 1*BytesPerWord), rax); // restore registers restore_live_registers(sasm, save_fpu_registers); + switch (id) { + case forward_exception_id: + case handle_exception_nofpu_id: + case handle_exception_id: + // Restore the registers that were saved at the beginning. + restore_live_registers(sasm, id == handle_exception_nofpu_id); + break; + case handle_exception_from_callee_id: + // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP + // since we do a leave anyway. // return to exception handler + // Pop the return address since we are possibly changing SP (restoring from BP). __ leave(); ! __ ret(0); ! __ pop(rcx); + // Restore SP from BP if the exception PC is a method handle call site. + NOT_LP64(__ get_thread(thread);) + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); + __ jmp(rcx); // jump to exception handler + break; + default: ShouldNotReachHere(); + } + + return oop_maps; } void Runtime1::generate_unwind_exception(StubAssembler *sasm) { // incoming parameters
*** 789,799 **** --- 813,823 ---- // get throwing pc (= return address). // rdx has been destroyed by the call, so it must be set again // the pop is also necessary to simulate the effect of a ret(0) __ pop(exception_pc); ! // Restore SP from BP if the exception PC is a MethodHandle call site. ! // Restore SP from BP if the exception PC is a method handle call site. NOT_LP64(__ get_thread(thread);) __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); // continue at exception handler (return address removed)
*** 932,942 **** --- 956,965 ---- restore_live_registers(sasm); __ leave(); __ ret(0); return oop_maps; } OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
*** 950,988 **** --- 973,985 ---- // stub code & info for the different stubs OopMapSet* oop_maps = NULL; switch (id) { case forward_exception_id: { // we're handling an exception in the context of a compiled // frame. The registers have been saved in the standard // places. Perform an exception lookup in the caller and // dispatch to the handler if found. Otherwise unwind and // dispatch to the callers exception handler. const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); const Register exception_oop = rax; const Register exception_pc = rdx; // load pending exception oop into rax, __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); // clear pending exception __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); // load issuing PC (the return address for this stub) into rdx __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); // make sure that the vm_results are cleared (may be unnecessary) __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); oop_maps = new OopMapSet(); OopMap* oop_map = generate_oop_map(sasm, 1); generate_handle_exception(sasm, oop_maps, oop_map); __ stop("should not reach here"); + oop_maps = generate_handle_exception(id, sasm); + __ leave(); + __ ret(0); } break; case new_instance_id: case fast_new_instance_id:
*** 1313,1329 **** --- 1310,1328 ---- oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; case handle_exception_nofpu_id: save_fpu_registers = false; // fall through case handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); ! oop_maps = new OopMapSet(); OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); ! generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers); ! oop_maps = generate_handle_exception(id, sasm); + } ! break; + + case handle_exception_from_callee_id: + { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); + oop_maps = generate_handle_exception(id, sasm); } break; case unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments);

src/cpu/x86/vm/c1_Runtime1_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File