hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp

Print this page
rev 611 : Merge

*** 1,10 **** - #ifdef USE_PRAGMA_IDENT_SRC - #pragma ident "@(#)c1_Runtime1_x86.cpp 1.197 07/09/17 09:25:58 JVM" - #endif /* ! * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 31,86 **** // Implementation of StubAssembler int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { // setup registers ! const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different"); assert(oop_result1 != thread && oop_result2 != thread, "registers must be different"); assert(args_size >= 0, "illegal args_size"); set_num_rt_args(1 + args_size); // push java thread (becomes first argument of C function) get_thread(thread); ! pushl(thread); set_last_Java_frame(thread, noreg, rbp, NULL); // do the call call(RuntimeAddress(entry)); int call_offset = offset(); // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); ! pushl(rax); { Label L; get_thread(rax); ! cmpl(thread, rax); jcc(Assembler::equal, L); int3(); stop("StubAssembler::call_RT: rdi not callee saved?"); bind(L); } ! popl(rax); #endif reset_last_Java_frame(thread, true, false); // discard thread and arguments ! addl(rsp, (1 + args_size)*BytesPerWord); // check for pending exceptions { Label L; ! cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler ! movl(rax, Address(thread, Thread::pending_exception_offset())); // make sure that the vm_results are cleared if (oop_result1->is_valid()) { ! movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); } if (oop_result2->is_valid()) { ! movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); } if (frame_size() == no_frame_size) { leave(); jump(RuntimeAddress(StubRoutines::forward_exception_entry())); } else if (_stub_id == Runtime1::forward_exception_id) { --- 28,89 ---- // Implementation of StubAssembler int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { // setup registers ! const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different"); assert(oop_result1 != thread && oop_result2 != thread, "registers must be different"); assert(args_size >= 0, "illegal args_size"); + #ifdef _LP64 + mov(c_rarg0, thread); + set_num_rt_args(0); // Nothing on stack + #else set_num_rt_args(1 + args_size); // push java thread (becomes first argument of C function) get_thread(thread); ! push(thread); ! #endif // _LP64 set_last_Java_frame(thread, noreg, rbp, NULL); + // do the call call(RuntimeAddress(entry)); int call_offset = offset(); // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); ! push(rax); { Label L; get_thread(rax); ! cmpptr(thread, rax); jcc(Assembler::equal, L); int3(); stop("StubAssembler::call_RT: rdi not callee saved?"); bind(L); } ! pop(rax); #endif reset_last_Java_frame(thread, true, false); // discard thread and arguments ! NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); // check for pending exceptions { Label L; ! cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler ! movptr(rax, Address(thread, Thread::pending_exception_offset())); // make sure that the vm_results are cleared if (oop_result1->is_valid()) { ! movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); } if (oop_result2->is_valid()) { ! movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); } if (frame_size() == no_frame_size) { leave(); jump(RuntimeAddress(StubRoutines::forward_exception_entry())); } else if (_stub_id == Runtime1::forward_exception_id) {
*** 90,129 **** } bind(L); } // get oop results if there are any and reset the values in the thread if (oop_result1->is_valid()) { ! movl(oop_result1, Address(thread, JavaThread::vm_result_offset())); ! movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); verify_oop(oop_result1); } if (oop_result2->is_valid()) { ! movl(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); ! movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); verify_oop(oop_result2); } return call_offset; } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { ! pushl(arg1); return call_RT(oop_result1, oop_result2, entry, 1); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { ! pushl(arg2); ! pushl(arg1); return call_RT(oop_result1, oop_result2, entry, 2); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { ! pushl(arg3); ! pushl(arg2); ! pushl(arg1); return call_RT(oop_result1, oop_result2, entry, 3); } // Implementation of StubFrame --- 93,168 ---- } bind(L); } // get oop results if there are any and reset the values in the thread if (oop_result1->is_valid()) { ! movptr(oop_result1, Address(thread, JavaThread::vm_result_offset())); ! movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); verify_oop(oop_result1); } if (oop_result2->is_valid()) { ! movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); ! movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); verify_oop(oop_result2); } return call_offset; } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { ! #ifdef _LP64 ! mov(c_rarg1, arg1); ! #else ! push(arg1); ! #endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 1); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { ! #ifdef _LP64 ! if (c_rarg1 == arg2) { ! if (c_rarg2 == arg1) { ! xchgq(arg1, arg2); ! } else { ! mov(c_rarg2, arg2); ! mov(c_rarg1, arg1); ! } ! } else { ! mov(c_rarg1, arg1); ! mov(c_rarg2, arg2); ! } ! #else ! push(arg2); ! push(arg1); ! #endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 2); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { ! #ifdef _LP64 ! // if there is any conflict use the stack ! if (arg1 == c_rarg2 || arg1 == c_rarg3 || ! arg2 == c_rarg1 || arg1 == c_rarg3 || ! arg3 == c_rarg1 || arg1 == c_rarg2) { ! push(arg3); ! push(arg2); ! push(arg1); ! pop(c_rarg1); ! pop(c_rarg2); ! pop(c_rarg3); ! } else { ! mov(c_rarg1, arg1); ! mov(c_rarg2, arg2); ! mov(c_rarg3, arg3); ! } ! #else ! push(arg3); ! push(arg2); ! push(arg1); ! #endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 3); } // Implementation of StubFrame
*** 155,165 **** // + 1: return address // + 2: argument with offset 0 // + 3: argument with offset 1 // + 4: ... ! __ movl(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); } StubFrame::~StubFrame() { __ leave(); --- 194,204 ---- // + 1: return address // + 2: argument with offset 0 // + 3: argument with offset 1 // + 4: ... ! __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); } StubFrame::~StubFrame() { __ leave();
*** 171,213 **** // Implementation of Runtime1 #define __ sasm-> ! const int float_regs_as_doubles_size_in_words = 16; ! const int xmm_regs_as_doubles_size_in_words = 16; // Stack layout for saving/restoring all the registers needed during a runtime // call (this includes deoptimization) // Note: note that users of this frame may well have arguments to some runtime // while these values are on the stack. These positions neglect those arguments // but the code in save_live_registers will take the argument count into // account. // enum reg_save_layout { ! dummy1, ! dummy2, // Two temps to be used as needed by users of save/restore callee registers ! temp_2_off, ! temp_1_off, ! xmm_regs_as_doubles_off, ! float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_words, ! fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_words, ! fpu_state_end_off = fpu_state_off + FPUStateSizeInWords, ! marker = fpu_state_end_off, ! extra_space_offset, rdi_off = extra_space_offset, ! rsi_off, ! rbp_off, ! rsp_off, ! rbx_off, ! rdx_off, ! rcx_off, ! rax_off, ! saved_rbp_off, ! return_off, ! reg_save_frame_size, // As noted: neglects any parameters to runtime // equates // illegal instruction handler continue_dest_off = temp_1_off, --- 210,284 ---- // Implementation of Runtime1 #define __ sasm-> ! const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; ! const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; // Stack layout for saving/restoring all the registers needed during a runtime // call (this includes deoptimization) // Note: note that users of this frame may well have arguments to some runtime // while these values are on the stack. These positions neglect those arguments // but the code in save_live_registers will take the argument count into // account. // + #ifdef _LP64 + #define SLOT2(x) x, + #define SLOT_PER_WORD 2 + #else + #define SLOT2(x) + #define SLOT_PER_WORD 1 + #endif // _LP64 + enum reg_save_layout { ! // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that ! // happen and will assert if the stack size we create is misaligned ! #ifdef _LP64 ! align_dummy_0, align_dummy_1, ! #endif // _LP64 ! dummy1, SLOT2(dummy1H) // 0, 4 ! dummy2, SLOT2(dummy2H) // 8, 12 // Two temps to be used as needed by users of save/restore callee registers ! temp_2_off, SLOT2(temp_2H_off) // 16, 20 ! temp_1_off, SLOT2(temp_1H_off) // 24, 28 ! xmm_regs_as_doubles_off, // 32 ! float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 ! fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 ! // fpu_state_end_off is exclusive ! fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 ! marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 ! extra_space_offset, // 360 ! #ifdef _LP64 ! r15_off = extra_space_offset, r15H_off, // 360, 364 ! r14_off, r14H_off, // 368, 372 ! r13_off, r13H_off, // 376, 380 ! r12_off, r12H_off, // 384, 388 ! r11_off, r11H_off, // 392, 396 ! r10_off, r10H_off, // 400, 404 ! r9_off, r9H_off, // 408, 412 ! r8_off, r8H_off, // 416, 420 ! rdi_off, rdiH_off, // 424, 428 ! #else rdi_off = extra_space_offset, ! #endif // _LP64 ! rsi_off, SLOT2(rsiH_off) // 432, 436 ! rbp_off, SLOT2(rbpH_off) // 440, 444 ! rsp_off, SLOT2(rspH_off) // 448, 452 ! rbx_off, SLOT2(rbxH_off) // 456, 460 ! rdx_off, SLOT2(rdxH_off) // 464, 468 ! rcx_off, SLOT2(rcxH_off) // 472, 476 ! rax_off, SLOT2(raxH_off) // 480, 484 ! saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 ! return_off, SLOT2(returnH_off) // 496, 500 ! reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 ! ! #ifdef _WIN64 ! c_rarg0_off = rcx_off, ! #else ! c_rarg0_off = rdi_off, ! #endif // WIN64 // equates // illegal instruction handler continue_dest_off = temp_1_off,
*** 230,251 **** // describe FPU registers. In all other cases it should be sufficient // to simply save their current value. static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { ! int frame_size = reg_save_frame_size + num_rt_args; // args + thread ! sasm->set_frame_size(frame_size); // record saved value locations in an OopMap // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread ! OopMap* map = new OopMap(frame_size, 0); map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); if (save_fpu_registers) { if (UseSSE < 2) { int fpu_off = float_regs_as_doubles_off; for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { --- 301,353 ---- // describe FPU registers. In all other cases it should be sufficient // to simply save their current value. static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { ! ! // In 64bit all the args are in regs so there are no additional stack slots ! LP64_ONLY(num_rt_args = 0); ! LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) ! int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread ! sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); // record saved value locations in an OopMap // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread ! OopMap* map = new OopMap(frame_size_in_slots, 0); map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); + #ifdef _LP64 + map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); + + // This is stupid but needed. + map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); + + map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); + #endif // _LP64 if (save_fpu_registers) { if (UseSSE < 2) { int fpu_off = float_regs_as_doubles_off; for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
*** 289,369 **** static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { __ block_comment("save_live_registers"); ! int frame_size = reg_save_frame_size + num_rt_args; // args + thread // frame_size = round_to(frame_size, 4); ! sasm->set_frame_size(frame_size); ! __ pushad(); // integer registers // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); ! __ subl(rsp, extra_space_offset * wordSize); #ifdef ASSERT ! __ movl(Address(rsp, marker * wordSize), 0xfeedbeef); #endif if (save_fpu_registers) { if (UseSSE < 2) { // save FPU stack ! __ fnsave(Address(rsp, fpu_state_off * wordSize)); __ fwait(); #ifdef ASSERT Label ok; ! __ cmpw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std()); __ jccb(Assembler::equal, ok); __ stop("corrupted control word detected"); __ bind(ok); #endif // Reset the control word to guard against exceptions being unmasked // since fstp_d can cause FPU stack underflow exceptions. Write it // into the on stack copy and then reload that to make sure that the // current and future values are correct. ! __ movw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std()); ! __ frstor(Address(rsp, fpu_state_off * wordSize)); // Save the FPU registers in de-opt-able form ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 0)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 8)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 16)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 24)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 32)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 40)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 48)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 56)); } if (UseSSE >= 2) { // save XMM registers // XMM registers can contain float or double values, but this is not known here, // so always save them as doubles. // note that float values are _not_ converted automatically, so for float values // the second word contains only garbage data. ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7); } else if (UseSSE == 1) { // save XMM registers as float because double not supported without SSE2 ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7); } } // FPU stack must be empty now __ verify_FPU(0, "save_live_registers"); --- 391,482 ---- static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { __ block_comment("save_live_registers"); ! // 64bit passes the args in regs to the c++ runtime ! int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread // frame_size = round_to(frame_size, 4); ! sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); ! __ pusha(); // integer registers // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); ! __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); #ifdef ASSERT ! __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); #endif if (save_fpu_registers) { if (UseSSE < 2) { // save FPU stack ! __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); __ fwait(); #ifdef ASSERT Label ok; ! __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); __ jccb(Assembler::equal, ok); __ stop("corrupted control word detected"); __ bind(ok); #endif // Reset the control word to guard against exceptions being unmasked // since fstp_d can cause FPU stack underflow exceptions. Write it // into the on stack copy and then reload that to make sure that the // current and future values are correct. ! __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); ! __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); // Save the FPU registers in de-opt-able form ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); ! __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); } if (UseSSE >= 2) { // save XMM registers // XMM registers can contain float or double values, but this is not known here, // so always save them as doubles. // note that float values are _not_ converted automatically, so for float values // the second word contains only garbage data. ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); ! #ifdef _LP64 ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); ! __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); ! #endif // _LP64 } else if (UseSSE == 1) { // save XMM registers as float because double not supported without SSE2 ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); ! __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); } } // FPU stack must be empty now __ verify_FPU(0, "save_live_registers");
*** 374,405 **** static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { if (restore_fpu_registers) { if (UseSSE >= 2) { // restore XMM registers ! __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0)); ! __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8)); ! __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16)); ! __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24)); ! __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32)); ! __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40)); ! __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48)); ! __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56)); } else if (UseSSE == 1) { // restore XMM registers ! __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0)); ! __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8)); ! __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16)); ! __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24)); ! __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32)); ! __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40)); ! __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48)); ! __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56)); } if (UseSSE < 2) { ! __ frstor(Address(rsp, fpu_state_off * wordSize)); } else { // check that FPU stack is really empty __ verify_FPU(0, "restore_live_registers"); } --- 487,528 ---- static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { if (restore_fpu_registers) { if (UseSSE >= 2) { // restore XMM registers ! __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); ! __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); ! __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); ! __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); ! __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); ! __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); ! __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); ! __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); ! #ifdef _LP64 ! __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); ! __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); ! __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); ! __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); ! __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); ! __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); ! __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); ! __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); ! #endif // _LP64 } else if (UseSSE == 1) { // restore XMM registers ! __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); ! __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); ! __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); ! __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); ! __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); ! __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); ! __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); ! __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); } if (UseSSE < 2) { ! __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); } else { // check that FPU stack is really empty __ verify_FPU(0, "restore_live_registers"); }
*** 409,450 **** } #ifdef ASSERT { Label ok; ! __ cmpl(Address(rsp, marker * wordSize), 0xfeedbeef); __ jcc(Assembler::equal, ok); __ stop("bad offsets in frame"); __ bind(ok); } ! #endif ! __ addl(rsp, extra_space_offset * wordSize); } static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { __ block_comment("restore_live_registers"); restore_fpu(sasm, restore_fpu_registers); ! __ popad(); } static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { __ block_comment("restore_live_registers_except_rax"); restore_fpu(sasm, restore_fpu_registers); ! __ popl(rdi); ! __ popl(rsi); ! __ popl(rbp); ! __ popl(rbx); // skip this value ! __ popl(rbx); ! __ popl(rdx); ! __ popl(rcx); ! __ addl(rsp, 4); } void Runtime1::initialize_pd() { // nothing to do --- 532,594 ---- } #ifdef ASSERT { Label ok; ! __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); __ jcc(Assembler::equal, ok); __ stop("bad offsets in frame"); __ bind(ok); } ! #endif // ASSERT ! __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); } static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { __ block_comment("restore_live_registers"); restore_fpu(sasm, restore_fpu_registers); ! __ popa(); } static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { __ block_comment("restore_live_registers_except_rax"); restore_fpu(sasm, restore_fpu_registers); ! #ifdef _LP64 ! __ movptr(r15, Address(rsp, 0)); ! __ movptr(r14, Address(rsp, wordSize)); ! __ movptr(r13, Address(rsp, 2 * wordSize)); ! __ movptr(r12, Address(rsp, 3 * wordSize)); ! __ movptr(r11, Address(rsp, 4 * wordSize)); ! __ movptr(r10, Address(rsp, 5 * wordSize)); ! __ movptr(r9, Address(rsp, 6 * wordSize)); ! __ movptr(r8, Address(rsp, 7 * wordSize)); ! __ movptr(rdi, Address(rsp, 8 * wordSize)); ! __ movptr(rsi, Address(rsp, 9 * wordSize)); ! __ movptr(rbp, Address(rsp, 10 * wordSize)); ! // skip rsp ! __ movptr(rbx, Address(rsp, 12 * wordSize)); ! __ movptr(rdx, Address(rsp, 13 * wordSize)); ! __ movptr(rcx, Address(rsp, 14 * wordSize)); ! ! __ addptr(rsp, 16 * wordSize); ! #else ! ! __ pop(rdi); ! __ pop(rsi); ! __ pop(rbp); ! __ pop(rbx); // skip this value ! __ pop(rbx); ! __ pop(rdx); ! __ pop(rcx); ! __ addptr(rsp, BytesPerWord); ! #endif // _LP64 } void Runtime1::initialize_pd() { // nothing to do
*** 466,479 **** // registers used by this stub const Register temp_reg = rbx; // load argument for exception that is passed as an argument into the stub if (has_argument) { ! __ movl(temp_reg, Address(rbp, 2*BytesPerWord)); ! __ pushl(temp_reg); } - int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); --- 610,626 ---- // registers used by this stub const Register temp_reg = rbx; // load argument for exception that is passed as an argument into the stub if (has_argument) { ! #ifdef _LP64 ! __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); ! #else ! __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); ! __ push(temp_reg); ! #endif // _LP64 } int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map);
*** 487,497 **** // incoming parameters const Register exception_oop = rax; const Register exception_pc = rdx; // other registers used in this stub const Register real_return_addr = rbx; ! const Register thread = rdi; __ block_comment("generate_handle_exception"); #ifdef TIERED // C2 can leave the fpu stack dirty --- 634,644 ---- // incoming parameters const Register exception_oop = rax; const Register exception_pc = rdx; // other registers used in this stub const Register real_return_addr = rbx; ! const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); __ block_comment("generate_handle_exception"); #ifdef TIERED // C2 can leave the fpu stack dirty
*** 504,542 **** __ invalidate_registers(false, true, true, false, true, true); // verify that rax, contains a valid exception __ verify_not_null_oop(exception_oop); // load address of JavaThread object for thread-local data ! __ get_thread(thread); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are // empty before writing to them Label oop_empty; ! __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop already set"); __ bind(oop_empty); Label pc_empty; ! __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc already set"); __ bind(pc_empty); #endif // save exception oop and issuing pc into JavaThread // (exception handler will load it from here) ! __ movl(Address(thread, JavaThread::exception_oop_offset()), exception_oop); ! __ movl(Address(thread, JavaThread::exception_pc_offset()), exception_pc); // save real return address (pc that called this stub) ! __ movl(real_return_addr, Address(rbp, 1*BytesPerWord)); ! __ movl(Address(rsp, temp_1_off * BytesPerWord), real_return_addr); // patch throwing pc into return address (has bci & oop map) ! __ movl(Address(rbp, 1*BytesPerWord), exception_pc); // compute the exception handler. // the exception oop and the throwing pc are read from the fields in JavaThread int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map); --- 651,689 ---- __ invalidate_registers(false, true, true, false, true, true); // verify that rax, contains a valid exception __ verify_not_null_oop(exception_oop); // load address of JavaThread object for thread-local data ! NOT_LP64(__ get_thread(thread);) #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are // empty before writing to them Label oop_empty; ! __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop already set"); __ bind(oop_empty); Label pc_empty; ! __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc already set"); __ bind(pc_empty); #endif // save exception oop and issuing pc into JavaThread // (exception handler will load it from here) ! __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); ! __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); // save real return address (pc that called this stub) ! __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); ! __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); // patch throwing pc into return address (has bci & oop map) ! __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); // compute the exception handler. // the exception oop and the throwing pc are read from the fields in JavaThread int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map);
*** 549,564 **** __ invalidate_registers(false, true, true, true, true, true); // Do we have an exception handler in the nmethod? Label no_handler; Label done; ! __ testl(rax, rax); __ jcc(Assembler::zero, no_handler); // exception handler found // patch the return address -> the stub will directly return to the exception handler ! __ movl(Address(rbp, 1*BytesPerWord), rax); // restore registers restore_live_registers(sasm, save_fpu_registers); // return to exception handler --- 696,711 ---- __ invalidate_registers(false, true, true, true, true, true); // Do we have an exception handler in the nmethod? Label no_handler; Label done; ! __ testptr(rax, rax); __ jcc(Assembler::zero, no_handler); // exception handler found // patch the return address -> the stub will directly return to the exception handler ! __ movptr(Address(rbp, 1*BytesPerWord), rax); // restore registers restore_live_registers(sasm, save_fpu_registers); // return to exception handler
*** 569,590 **** // no exception handler found in this method, so the exception is // forwarded to the caller (using the unwind code of the nmethod) // there is no need to restore the registers // restore the real return address that was saved before the RT-call ! __ movl(real_return_addr, Address(rsp, temp_1_off * BytesPerWord)); ! __ movl(Address(rbp, 1*BytesPerWord), real_return_addr); // load address of JavaThread object for thread-local data ! __ get_thread(thread); // restore exception oop into rax, (convention for unwind code) ! __ movl(exception_oop, Address(thread, JavaThread::exception_oop_offset())); // clear exception fields in JavaThread because they are no longer needed // (fields must be cleared because they are processed by GC otherwise) ! __ movl(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); ! __ movl(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); // pop the stub frame off __ leave(); generate_unwind_exception(sasm); --- 716,737 ---- // no exception handler found in this method, so the exception is // forwarded to the caller (using the unwind code of the nmethod) // there is no need to restore the registers // restore the real return address that was saved before the RT-call ! __ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size)); ! __ movptr(Address(rbp, 1*BytesPerWord), real_return_addr); // load address of JavaThread object for thread-local data ! NOT_LP64(__ get_thread(thread);) // restore exception oop into rax, (convention for unwind code) ! __ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset())); // clear exception fields in JavaThread because they are no longer needed // (fields must be cleared because they are processed by GC otherwise) ! __ movptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); ! __ movptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); // pop the stub frame off __ leave(); generate_unwind_exception(sasm);
*** 596,621 **** // incoming parameters const Register exception_oop = rax; // other registers used in this stub const Register exception_pc = rdx; const Register handler_addr = rbx; ! const Register thread = rdi; // verify that only rax, is valid at this time __ invalidate_registers(false, true, true, true, true, true); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty ! __ get_thread(thread); Label oop_empty; ! __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; ! __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); #endif --- 743,768 ---- // incoming parameters const Register exception_oop = rax; // other registers used in this stub const Register exception_pc = rdx; const Register handler_addr = rbx; ! const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // verify that only rax, is valid at this time __ invalidate_registers(false, true, true, true, true, true); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty ! NOT_LP64(__ get_thread(thread);) Label oop_empty; ! __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; ! __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); #endif
*** 623,658 **** __ empty_FPU_stack(); // leave activation of nmethod __ leave(); // store return address (is on top of stack after leave) ! __ movl(exception_pc, Address(rsp, 0)); __ verify_oop(exception_oop); // save exception oop from rax, to stack before call ! __ pushl(exception_oop); // search the exception handler address of the caller (using the return address) __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc); // rax,: exception handler address of the caller // only rax, is valid at this time, all other registers have been destroyed by the call __ invalidate_registers(false, true, true, true, true, true); // move result of call into correct register ! __ movl(handler_addr, rax); // restore exception oop in rax, (required convention of exception handler) ! __ popl(exception_oop); __ verify_oop(exception_oop); // get throwing pc (= return address). // rdx has been destroyed by the call, so it must be set again // the pop is also necessary to simulate the effect of a ret(0) ! __ popl(exception_pc); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); // continue at exception handler (return address removed) --- 770,805 ---- __ empty_FPU_stack(); // leave activation of nmethod __ leave(); // store return address (is on top of stack after leave) ! __ movptr(exception_pc, Address(rsp, 0)); __ verify_oop(exception_oop); // save exception oop from rax, to stack before call ! __ push(exception_oop); // search the exception handler address of the caller (using the return address) __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc); // rax,: exception handler address of the caller // only rax, is valid at this time, all other registers have been destroyed by the call __ invalidate_registers(false, true, true, true, true, true); // move result of call into correct register ! __ movptr(handler_addr, rax); // restore exception oop in rax, (required convention of exception handler) ! __ pop(exception_oop); __ verify_oop(exception_oop); // get throwing pc (= return address). // rdx has been destroyed by the call, so it must be set again // the pop is also necessary to simulate the effect of a ret(0) ! __ pop(exception_pc); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); // continue at exception handler (return address removed)
*** 678,761 **** DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); OopMap* oop_map = save_live_registers(sasm, num_rt_args); ! __ pushl(rax); // push dummy const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) // push java thread (becomes first argument of C function) __ get_thread(thread); ! __ pushl(thread); __ set_last_Java_frame(thread, noreg, rbp, NULL); // do the call __ call(RuntimeAddress(target)); OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(__ offset(), oop_map); // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); ! __ pushl(rax); { Label L; __ get_thread(rax); ! __ cmpl(thread, rax); __ jcc(Assembler::equal, L); ! __ stop("StubAssembler::call_RT: rdi not callee saved?"); __ bind(L); } ! __ popl(rax); #endif __ reset_last_Java_frame(thread, true, false); ! __ popl(rcx); // discard thread arg ! __ popl(rcx); // discard dummy // check for pending exceptions { Label L; ! __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); __ jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler ! __ testl(rax, rax); // have we deoptimized? __ jump_cc(Assembler::equal, RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); // the deopt blob expects exceptions in the special fields of // JavaThread, so copy and clear pending exception. // load and clear pending exception ! __ movl(rax, Address(thread, Thread::pending_exception_offset())); ! __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); // check that there is really a valid exception __ verify_not_null_oop(rax); // load throwing pc: this is the return address of the stub ! __ movl(rdx, Address(rsp, return_off * BytesPerWord)); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty Label oop_empty; ! __ cmpoop(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; ! __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); #endif // store exception oop and throwing pc to JavaThread ! __ movl(Address(thread, JavaThread::exception_oop_offset()), rax); ! __ movl(Address(thread, JavaThread::exception_pc_offset()), rdx); restore_live_registers(sasm); __ leave(); ! __ addl(rsp, 4); // remove return address from stack // Forward the exception directly to deopt blob. We can blow no // registers and must leave throwing pc on the stack. A patch may // have values live in registers so the entry point with the // exception in tls. --- 825,916 ---- DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); OopMap* oop_map = save_live_registers(sasm, num_rt_args); ! #ifdef _LP64 ! const Register thread = r15_thread; ! // No need to worry about dummy ! __ mov(c_rarg0, thread); ! #else ! __ push(rax); // push dummy const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) // push java thread (becomes first argument of C function) __ get_thread(thread); ! __ push(thread); ! #endif // _LP64 __ set_last_Java_frame(thread, noreg, rbp, NULL); // do the call __ call(RuntimeAddress(target)); OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(__ offset(), oop_map); // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); ! __ push(rax); { Label L; __ get_thread(rax); ! __ cmpptr(thread, rax); __ jcc(Assembler::equal, L); ! __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); __ bind(L); } ! __ pop(rax); #endif __ reset_last_Java_frame(thread, true, false); ! #ifndef _LP64 ! __ pop(rcx); // discard thread arg ! __ pop(rcx); // discard dummy ! #endif // _LP64 // check for pending exceptions { Label L; ! __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler ! __ testptr(rax, rax); // have we deoptimized? __ jump_cc(Assembler::equal, RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); // the deopt blob expects exceptions in the special fields of // JavaThread, so copy and clear pending exception. // load and clear pending exception ! __ movptr(rax, Address(thread, Thread::pending_exception_offset())); ! __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); // check that there is really a valid exception __ verify_not_null_oop(rax); // load throwing pc: this is the return address of the stub ! __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty Label oop_empty; ! __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; ! __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); #endif // store exception oop and throwing pc to JavaThread ! __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); ! __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); restore_live_registers(sasm); __ leave(); ! __ addptr(rsp, BytesPerWord); // remove return address from stack // Forward the exception directly to deopt blob. We can blow no // registers and must leave throwing pc on the stack. A patch may // have values live in registers so the entry point with the // exception in tls.
*** 768,778 **** // Runtime will return true if the nmethod has been deoptimized during // the patching process. In that case we must do a deopt reexecute instead. Label reexecuteEntry, cont; ! __ testl(rax, rax); // have we deoptimized? __ jcc(Assembler::equal, cont); // no // Will reexecute. Proper return address is already on the stack we just restore // registers, pop all of our frame but the return address and jump to the deopt blob restore_live_registers(sasm); --- 923,933 ---- // Runtime will return true if the nmethod has been deoptimized during // the patching process. In that case we must do a deopt reexecute instead. Label reexecuteEntry, cont; ! __ testptr(rax, rax); // have we deoptimized? __ jcc(Assembler::equal, cont); // no // Will reexecute. Proper return address is already on the stack we just restore // registers, pop all of our frame but the return address and jump to the deopt blob restore_live_registers(sasm);
*** 807,831 **** // frame. The registers have been saved in the standard // places. Perform an exception lookup in the caller and // dispatch to the handler if found. Otherwise unwind and // dispatch to the callers exception handler. ! const Register thread = rdi; const Register exception_oop = rax; const Register exception_pc = rdx; // load pending exception oop into rax, ! __ movl(exception_oop, Address(thread, Thread::pending_exception_offset())); // clear pending exception ! __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); // load issuing PC (the return address for this stub) into rdx ! __ movl(exception_pc, Address(rbp, 1*BytesPerWord)); // make sure that the vm_results are cleared (may be unnecessary) ! __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); ! __ movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); --- 962,986 ---- // frame. The registers have been saved in the standard // places. Perform an exception lookup in the caller and // dispatch to the handler if found. Otherwise unwind and // dispatch to the callers exception handler. ! const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); const Register exception_oop = rax; const Register exception_pc = rdx; // load pending exception oop into rax, ! __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); // clear pending exception ! __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); // load issuing PC (the return address for this stub) into rdx ! __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); // make sure that the vm_results are cleared (may be unnecessary) ! __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); ! __ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop);
*** 858,869 **** Register obj_size = rcx; Register t1 = rbx; Register t2 = rsi; assert_different_registers(klass, obj, obj_size, t1, t2); ! __ pushl(rdi); ! __ pushl(rbx); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); __ jcc(Assembler::notEqual, slow_path); --- 1013,1024 ---- Register obj_size = rcx; Register t1 = rbx; Register t2 = rsi; assert_different_registers(klass, obj, obj_size, t1, t2); ! __ push(rdi); ! __ push(rbx); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); __ jcc(Assembler::notEqual, slow_path);
*** 890,921 **** Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass) __ bind(retry_tlab); ! // get the instance size __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); ! __ popl(rbx); ! __ popl(rdi); __ ret(0); __ bind(try_eden); ! // get the instance size __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ eden_allocate(obj, obj_size, 0, t1, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); ! __ popl(rbx); ! __ popl(rdi); __ ret(0); __ bind(slow_path); ! __ popl(rbx); ! __ popl(rdi); } __ enter(); OopMap* map = save_live_registers(sasm, 2); int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); --- 1045,1076 ---- Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass) __ bind(retry_tlab); ! // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); ! __ pop(rbx); ! __ pop(rdi); __ ret(0); __ bind(try_eden); ! // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ eden_allocate(obj, obj_size, 0, t1, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); ! __ pop(rbx); ! __ pop(rdi); __ ret(0); __ bind(slow_path); ! __ pop(rbx); ! __ pop(rdi); } __ enter(); OopMap* map = save_live_registers(sasm, 2); int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
*** 997,1050 **** __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx __ bind(retry_tlab); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); ! __ shll(arr_size /* by t1=rcx, mod 32 */); ! __ shrl(t1, Klass::_lh_header_size_shift); ! __ andl(t1, Klass::_lh_header_size_mask); ! __ addl(arr_size, t1); ! __ addl(arr_size, MinObjAlignmentInBytesMask); // align up ! __ andl(arr_size, ~MinObjAlignmentInBytesMask); __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size __ initialize_header(obj, klass, length, t1, t2); __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); ! __ andl(t1, Klass::_lh_header_size_mask); ! __ subl(arr_size, t1); // body length ! __ addl(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); __ bind(try_eden); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); ! __ shll(arr_size /* by t1=rcx, mod 32 */); ! __ shrl(t1, Klass::_lh_header_size_shift); ! __ andl(t1, Klass::_lh_header_size_mask); ! __ addl(arr_size, t1); ! __ addl(arr_size, MinObjAlignmentInBytesMask); // align up ! __ andl(arr_size, ~MinObjAlignmentInBytesMask); __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size __ initialize_header(obj, klass, length, t1, t2); __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); ! __ andl(t1, Klass::_lh_header_size_mask); ! __ subl(arr_size, t1); // body length ! __ addl(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); __ bind(slow_path); --- 1152,1209 ---- __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx __ bind(retry_tlab); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) + // since size is postive movl does right thing on 64bit __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); + // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); ! __ shlptr(arr_size /* by t1=rcx, mod 32 */); ! __ shrptr(t1, Klass::_lh_header_size_shift); ! __ andptr(t1, Klass::_lh_header_size_mask); ! __ addptr(arr_size, t1); ! __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up ! __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size __ initialize_header(obj, klass, length, t1, t2); __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); ! __ andptr(t1, Klass::_lh_header_size_mask); ! __ subptr(arr_size, t1); // body length ! __ addptr(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); __ bind(try_eden); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) + // since size is postive movl does right thing on 64bit __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); + // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); ! __ shlptr(arr_size /* by t1=rcx, mod 32 */); ! __ shrptr(t1, Klass::_lh_header_size_shift); ! __ andptr(t1, Klass::_lh_header_size_mask); ! __ addptr(arr_size, t1); ! __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up ! __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size __ initialize_header(obj, klass, length, t1, t2); __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); ! __ andptr(t1, Klass::_lh_header_size_mask); ! __ subptr(arr_size, t1); // body length ! __ addptr(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); __ bind(slow_path);
*** 1090,1108 **** case register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); // The object is passed on the stack and we haven't pushed a // frame yet so it's one work away from top of stack. ! __ movl(rax, Address(rsp, 1 * BytesPerWord)); __ verify_oop(rax); // load the klass and check the has finalizer flag Label register_finalizer; Register t = rsi; ! __ movl(t, Address(rax, oopDesc::klass_offset_in_bytes())); __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); __ ret(0); --- 1249,1275 ---- case register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); + // This is called via call_runtime so the arguments + // will be place in C abi locations + + #ifdef _LP64 + __ verify_oop(c_rarg0); + __ mov(rax, c_rarg0); + #else // The object is passed on the stack and we haven't pushed a // frame yet so it's one work away from top of stack. ! __ movptr(rax, Address(rsp, 1 * BytesPerWord)); __ verify_oop(rax); + #endif // _LP64 // load the klass and check the has finalizer flag Label register_finalizer; Register t = rsi; ! __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes())); __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); __ ret(0);
*** 1186,1235 **** break; case slow_subtype_check_id: { enum layout { ! rax_off, ! rcx_off, ! rsi_off, ! rdi_off, ! saved_rbp_off, ! return_off, ! sub_off, ! super_off, framesize }; __ set_info("slow_subtype_check", dont_gc_arguments); ! __ pushl(rdi); ! __ pushl(rsi); ! __ pushl(rcx); ! __ pushl(rax); ! __ movl(rsi, Address(rsp, (super_off - 1) * BytesPerWord)); // super ! __ movl(rax, Address(rsp, (sub_off - 1) * BytesPerWord)); // sub ! ! __ movl(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); ! __ movl(rcx,Address(rdi,arrayOopDesc::length_offset_in_bytes())); ! __ addl(rdi,arrayOopDesc::base_offset_in_bytes(T_OBJECT)); Label miss; __ repne_scan(); __ jcc(Assembler::notEqual, miss); ! __ movl(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); ! __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 1); // result ! __ popl(rax); ! __ popl(rcx); ! __ popl(rsi); ! __ popl(rdi); __ ret(0); __ bind(miss); ! __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 0); // result ! __ popl(rax); ! __ popl(rcx); ! __ popl(rsi); ! __ popl(rdi); __ ret(0); } break; case monitorenter_nofpu_id: --- 1353,1405 ---- break; case slow_subtype_check_id: { enum layout { ! rax_off, SLOT2(raxH_off) ! rcx_off, SLOT2(rcxH_off) ! rsi_off, SLOT2(rsiH_off) ! rdi_off, SLOT2(rdiH_off) ! // saved_rbp_off, SLOT2(saved_rbpH_off) ! return_off, SLOT2(returnH_off) ! sub_off, SLOT2(subH_off) ! super_off, SLOT2(superH_off) framesize }; __ set_info("slow_subtype_check", dont_gc_arguments); ! __ push(rdi); ! __ push(rsi); ! __ push(rcx); ! __ push(rax); ! ! // This is called by pushing args and not with C abi ! __ movptr(rsi, Address(rsp, (super_off) * VMRegImpl::stack_slot_size)); // super ! __ movptr(rax, Address(rsp, (sub_off ) * VMRegImpl::stack_slot_size)); // sub ! ! __ movptr(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); ! // since size is postive movl does right thing on 64bit ! __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); ! __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); Label miss; __ repne_scan(); __ jcc(Assembler::notEqual, miss); ! __ movptr(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); ! __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 1); // result ! __ pop(rax); ! __ pop(rcx); ! __ pop(rsi); ! __ pop(rdi); __ ret(0); __ bind(miss); ! __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 0); // result ! __ pop(rax); ! __ pop(rcx); ! __ pop(rsi); ! __ pop(rdi); __ ret(0); } break; case monitorenter_nofpu_id:
*** 1238,1247 **** --- 1408,1419 ---- case monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); + // Called with store_parameter and not C abi + f.load_argument(1, rax); // rax,: object f.load_argument(0, rbx); // rbx,: lock address int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
*** 1257,1266 **** --- 1429,1440 ---- case monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); + // Called with store_parameter and not C abi + f.load_argument(0, rax); // rax,: lock address // note: really a leaf routine but must setup last java sp // => use call_RT for now (speed can be improved by // doing last java sp setup manually)
*** 1305,1338 **** StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); // we can't gc here so skip the oopmap but make sure that all // the live registers get saved. save_live_registers(sasm, 1); ! __ pushl(rax); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); ! __ popl(rax); restore_live_registers(sasm); } break; case fpu2long_stub_id: { // rax, and rdx are destroyed, but should be free since the result is returned there // preserve rsi,ecx ! __ pushl(rsi); ! __ pushl(rcx); // check for NaN Label return0, do_return, return_min_jlong, do_convert; ! Address value_high_word(rsp, 8); ! Address value_low_word(rsp, 4); ! Address result_high_word(rsp, 16); ! Address result_low_word(rsp, 12); ! __ subl(rsp, 20); __ fst_d(value_low_word); __ movl(rax, value_high_word); __ andl(rax, 0x7ff00000); __ cmpl(rax, 0x7ff00000); __ jcc(Assembler::notEqual, do_convert); --- 1479,1513 ---- StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); // we can't gc here so skip the oopmap but make sure that all // the live registers get saved. save_live_registers(sasm, 1); ! __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); ! NOT_LP64(__ pop(rax)); restore_live_registers(sasm); } break; case fpu2long_stub_id: { // rax, and rdx are destroyed, but should be free since the result is returned there // preserve rsi,ecx ! __ push(rsi); ! __ push(rcx); ! LP64_ONLY(__ push(rdx);) // check for NaN Label return0, do_return, return_min_jlong, do_convert; ! Address value_high_word(rsp, wordSize + 4); ! Address value_low_word(rsp, wordSize); ! Address result_high_word(rsp, 3*wordSize + 4); ! Address result_low_word(rsp, 3*wordSize); ! __ subptr(rsp, 32); // more than enough on 32bit __ fst_d(value_low_word); __ movl(rax, value_high_word); __ andl(rax, 0x7ff00000); __ cmpl(rax, 0x7ff00000); __ jcc(Assembler::notEqual, do_convert);
*** 1341,1403 **** __ orl(rax, value_low_word); __ jcc(Assembler::notZero, return0); __ bind(do_convert); __ fnstcw(Address(rsp, 0)); ! __ movzxw(rax, Address(rsp, 0)); __ orl(rax, 0xc00); __ movw(Address(rsp, 2), rax); __ fldcw(Address(rsp, 2)); __ fwait(); __ fistp_d(result_low_word); __ fldcw(Address(rsp, 0)); __ fwait(); ! __ movl(rax, result_low_word); __ movl(rdx, result_high_word); ! __ movl(rcx, rax); // What the heck is the point of the next instruction??? __ xorl(rcx, 0x0); __ movl(rsi, 0x80000000); __ xorl(rsi, rdx); __ orl(rcx, rsi); __ jcc(Assembler::notEqual, do_return); __ fldz(); __ fcomp_d(value_low_word); __ fnstsw_ax(); __ sahf(); __ jcc(Assembler::above, return_min_jlong); // return max_jlong __ movl(rdx, 0x7fffffff); __ movl(rax, 0xffffffff); __ jmp(do_return); __ bind(return_min_jlong); __ movl(rdx, 0x80000000); __ xorl(rax, rax); __ jmp(do_return); __ bind(return0); __ fpop(); ! __ xorl(rdx,rdx); ! __ xorl(rax,rax); __ bind(do_return); ! __ addl(rsp, 20); ! __ popl(rcx); ! __ popl(rsi); __ ret(0); } break; default: { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); ! __ movl(rax, (int)id); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); __ should_not_reach_here(); } break; } return oop_maps; } #undef __ - --- 1516,1757 ---- __ orl(rax, value_low_word); __ jcc(Assembler::notZero, return0); __ bind(do_convert); __ fnstcw(Address(rsp, 0)); ! __ movzwl(rax, Address(rsp, 0)); __ orl(rax, 0xc00); __ movw(Address(rsp, 2), rax); __ fldcw(Address(rsp, 2)); __ fwait(); __ fistp_d(result_low_word); __ fldcw(Address(rsp, 0)); __ fwait(); ! // This gets the entire long in rax on 64bit ! __ movptr(rax, result_low_word); ! // testing of high bits __ movl(rdx, result_high_word); ! __ mov(rcx, rax); // What the heck is the point of the next instruction??? __ xorl(rcx, 0x0); __ movl(rsi, 0x80000000); __ xorl(rsi, rdx); __ orl(rcx, rsi); __ jcc(Assembler::notEqual, do_return); __ fldz(); __ fcomp_d(value_low_word); __ fnstsw_ax(); + #ifdef _LP64 + __ testl(rax, 0x4100); // ZF & CF == 0 + __ jcc(Assembler::equal, return_min_jlong); + #else __ sahf(); __ jcc(Assembler::above, return_min_jlong); + #endif // _LP64 // return max_jlong + #ifndef _LP64 __ movl(rdx, 0x7fffffff); __ movl(rax, 0xffffffff); + #else + __ mov64(rax, CONST64(0x7fffffffffffffff)); + #endif // _LP64 __ jmp(do_return); __ bind(return_min_jlong); + #ifndef _LP64 __ movl(rdx, 0x80000000); __ xorl(rax, rax); + #else + __ mov64(rax, CONST64(0x8000000000000000)); + #endif // _LP64 __ jmp(do_return); __ bind(return0); __ fpop(); ! #ifndef _LP64 ! __ xorptr(rdx,rdx); ! __ xorptr(rax,rax); ! #else ! __ xorptr(rax, rax); ! #endif // _LP64 __ bind(do_return); ! __ addptr(rsp, 32); ! LP64_ONLY(__ pop(rdx);) ! __ pop(rcx); ! __ pop(rsi); __ ret(0); } break; + #ifndef SERIALGC + case g1_pre_barrier_slow_id: + { + StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); + // arg0 : previous value of memory + + BarrierSet* bs = Universe::heap()->barrier_set(); + if (bs->kind() != BarrierSet::G1SATBCTLogging) { + __ movptr(rax, (int)id); + __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); + __ should_not_reach_here(); + break; + } + + __ push(rax); + __ push(rdx); + + const Register pre_val = rax; + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + const Register tmp = rdx; + + NOT_LP64(__ get_thread(thread);) + + Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active())); + + Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + + Label done; + Label runtime; + + // Can we store original value in the thread's buffer? + + LP64_ONLY(__ movslq(tmp, queue_index);) + #ifdef _LP64 + __ cmpq(tmp, 0); + #else + __ cmpl(queue_index, 0); + #endif + __ jcc(Assembler::equal, runtime); + #ifdef _LP64 + __ subq(tmp, wordSize); + __ movl(queue_index, tmp); + __ addq(tmp, buffer); + #else + __ subl(queue_index, wordSize); + __ movl(tmp, buffer); + __ addl(tmp, queue_index); + #endif + + // prev_val (rax) + f.load_argument(0, pre_val); + __ movptr(Address(tmp, 0), pre_val); + __ jmp(done); + + __ bind(runtime); + // load the pre-value + __ push(rcx); + f.load_argument(0, rcx); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); + __ pop(rcx); + + __ bind(done); + __ pop(rdx); + __ pop(rax); + } + break; + + case g1_post_barrier_slow_id: + { + StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); + + + // arg0: store_address + Address store_addr(rbp, 2*BytesPerWord); + + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + Label done; + Label runtime; + + // At this point we know new_value is non-NULL and the new_value crosses regsion. + // Must check to see if card is already dirty + + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + __ push(rax); + __ push(rdx); + + NOT_LP64(__ get_thread(thread);) + ExternalAddress cardtable((address)ct->byte_map_base); + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + + const Register card_addr = rdx; + #ifdef _LP64 + const Register tmp = rscratch1; + f.load_argument(0, card_addr); + __ shrq(card_addr, CardTableModRefBS::card_shift); + __ lea(tmp, cardtable); + // get the address of the card + __ addq(card_addr, tmp); + #else + const Register card_index = rdx; + f.load_argument(0, card_index); + __ shrl(card_index, CardTableModRefBS::card_shift); + + Address index(noreg, card_index, Address::times_1); + __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); + #endif + + __ cmpb(Address(card_addr, 0), 0); + __ jcc(Assembler::equal, done); + + // storing region crossing non-NULL, card is clean. + // dirty card and log. + + __ movb(Address(card_addr, 0), 0); + + __ cmpl(queue_index, 0); + __ jcc(Assembler::equal, runtime); + __ subl(queue_index, wordSize); + + const Register buffer_addr = rbx; + __ push(rbx); + + __ movptr(buffer_addr, buffer); + + #ifdef _LP64 + __ movslq(rscratch1, queue_index); + __ addptr(buffer_addr, rscratch1); + #else + __ addptr(buffer_addr, queue_index); + #endif + __ movptr(Address(buffer_addr, 0), card_addr); + + __ pop(rbx); + __ jmp(done); + + __ bind(runtime); + NOT_LP64(__ push(rcx);) + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); + NOT_LP64(__ pop(rcx);) + + __ bind(done); + __ pop(rdx); + __ pop(rax); + + } + break; + #endif // !SERIALGC + default: { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); ! __ movptr(rax, (int)id); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); __ should_not_reach_here(); } break; } return oop_maps; } #undef __