--- old/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2009-08-01 04:07:48.324724475 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2009-08-01 04:07:48.231605389 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_Runtime1_x86.cpp 1.197 07/09/17 09:25:58 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -33,52 +30,58 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { // setup registers - const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different"); assert(oop_result1 != thread && oop_result2 != thread, "registers must be different"); assert(args_size >= 0, "illegal args_size"); +#ifdef _LP64 + mov(c_rarg0, thread); + set_num_rt_args(0); // Nothing on stack +#else set_num_rt_args(1 + args_size); // push java thread (becomes first argument of C function) get_thread(thread); - pushl(thread); + push(thread); +#endif // _LP64 set_last_Java_frame(thread, noreg, rbp, NULL); + // do the call call(RuntimeAddress(entry)); int call_offset = offset(); // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); - pushl(rax); + push(rax); { Label L; get_thread(rax); - cmpl(thread, rax); + cmpptr(thread, rax); jcc(Assembler::equal, L); int3(); stop("StubAssembler::call_RT: rdi not callee saved?"); bind(L); } - popl(rax); + pop(rax); #endif reset_last_Java_frame(thread, true, false); // discard thread and arguments - addl(rsp, (1 + args_size)*BytesPerWord); + NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); // check for pending exceptions { Label L; - cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler - movl(rax, Address(thread, Thread::pending_exception_offset())); + movptr(rax, Address(thread, Thread::pending_exception_offset())); // make sure that the vm_results are cleared if (oop_result1->is_valid()) { - movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); + movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); } if (oop_result2->is_valid()) { - movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); } if (frame_size() == no_frame_size) { leave(); @@ -92,13 +95,13 @@ } // get oop results if there are any and reset the values in the thread if (oop_result1->is_valid()) { - movl(oop_result1, Address(thread, JavaThread::vm_result_offset())); - movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); + movptr(oop_result1, Address(thread, JavaThread::vm_result_offset())); + movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); verify_oop(oop_result1); } if (oop_result2->is_valid()) { - movl(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); - movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); + movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); verify_oop(oop_result2); } return call_offset; @@ -106,22 +109,58 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { - pushl(arg1); +#ifdef _LP64 + mov(c_rarg1, arg1); +#else + push(arg1); +#endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 1); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { - pushl(arg2); - pushl(arg1); +#ifdef _LP64 + if (c_rarg1 == arg2) { + if (c_rarg2 == arg1) { + xchgq(arg1, arg2); + } else { + mov(c_rarg2, arg2); + mov(c_rarg1, arg1); + } + } else { + mov(c_rarg1, arg1); + mov(c_rarg2, arg2); + } +#else + push(arg2); + push(arg1); +#endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 2); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { - pushl(arg3); - pushl(arg2); - pushl(arg1); +#ifdef _LP64 + // if there is any conflict use the stack + if (arg1 == c_rarg2 || arg1 == c_rarg3 || + arg2 == c_rarg1 || arg1 == c_rarg3 || + arg3 == c_rarg1 || arg1 == c_rarg2) { + push(arg3); + push(arg2); + push(arg1); + pop(c_rarg1); + pop(c_rarg2); + pop(c_rarg3); + } else { + mov(c_rarg1, arg1); + mov(c_rarg2, arg2); + mov(c_rarg3, arg3); + } +#else + push(arg3); + push(arg2); + push(arg1); +#endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 3); } @@ -157,7 +196,7 @@ // + 3: argument with offset 1 // + 4: ... - __ movl(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); + __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); } @@ -173,8 +212,8 @@ #define __ sasm-> -const int float_regs_as_doubles_size_in_words = 16; -const int xmm_regs_as_doubles_size_in_words = 16; +const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; +const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; // Stack layout for saving/restoring all the registers needed during a runtime // call (this includes deoptimization) @@ -183,29 +222,61 @@ // but the code in save_live_registers will take the argument count into // account. // +#ifdef _LP64 + #define SLOT2(x) x, + #define SLOT_PER_WORD 2 +#else + #define SLOT2(x) + #define SLOT_PER_WORD 1 +#endif // _LP64 + enum reg_save_layout { - dummy1, - dummy2, + // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that + // happen and will assert if the stack size we create is misaligned +#ifdef _LP64 + align_dummy_0, align_dummy_1, +#endif // _LP64 + dummy1, SLOT2(dummy1H) // 0, 4 + dummy2, SLOT2(dummy2H) // 8, 12 // Two temps to be used as needed by users of save/restore callee registers - temp_2_off, - temp_1_off, - xmm_regs_as_doubles_off, - float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_words, - fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_words, - fpu_state_end_off = fpu_state_off + FPUStateSizeInWords, - marker = fpu_state_end_off, - extra_space_offset, + temp_2_off, SLOT2(temp_2H_off) // 16, 20 + temp_1_off, SLOT2(temp_1H_off) // 24, 28 + xmm_regs_as_doubles_off, // 32 + float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 + fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 + // fpu_state_end_off is exclusive + fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 + marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 + extra_space_offset, // 360 +#ifdef _LP64 + r15_off = extra_space_offset, r15H_off, // 360, 364 + r14_off, r14H_off, // 368, 372 + r13_off, r13H_off, // 376, 380 + r12_off, r12H_off, // 384, 388 + r11_off, r11H_off, // 392, 396 + r10_off, r10H_off, // 400, 404 + r9_off, r9H_off, // 408, 412 + r8_off, r8H_off, // 416, 420 + rdi_off, rdiH_off, // 424, 428 +#else rdi_off = extra_space_offset, - rsi_off, - rbp_off, - rsp_off, - rbx_off, - rdx_off, - rcx_off, - rax_off, - saved_rbp_off, - return_off, - reg_save_frame_size, // As noted: neglects any parameters to runtime +#endif // _LP64 + rsi_off, SLOT2(rsiH_off) // 432, 436 + rbp_off, SLOT2(rbpH_off) // 440, 444 + rsp_off, SLOT2(rspH_off) // 448, 452 + rbx_off, SLOT2(rbxH_off) // 456, 460 + rdx_off, SLOT2(rdxH_off) // 464, 468 + rcx_off, SLOT2(rcxH_off) // 472, 476 + rax_off, SLOT2(raxH_off) // 480, 484 + saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 + return_off, SLOT2(returnH_off) // 496, 500 + reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 + +#ifdef _WIN64 + c_rarg0_off = rcx_off, +#else + c_rarg0_off = rdi_off, +#endif // WIN64 // equates @@ -232,18 +303,49 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { - int frame_size = reg_save_frame_size + num_rt_args; // args + thread - sasm->set_frame_size(frame_size); + + // In 64bit all the args are in regs so there are no additional stack slots + LP64_ONLY(num_rt_args = 0); + LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) + int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread + sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); // record saved value locations in an OopMap // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread - OopMap* map = new OopMap(frame_size, 0); + OopMap* map = new OopMap(frame_size_in_slots, 0); map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); +#ifdef _LP64 + map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); + + // This is stupid but needed. + map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); + + map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); +#endif // _LP64 if (save_fpu_registers) { if (UseSSE < 2) { @@ -291,30 +393,31 @@ bool save_fpu_registers = true) { __ block_comment("save_live_registers"); - int frame_size = reg_save_frame_size + num_rt_args; // args + thread + // 64bit passes the args in regs to the c++ runtime + int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread // frame_size = round_to(frame_size, 4); - sasm->set_frame_size(frame_size); + sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); - __ pushad(); // integer registers + __ pusha(); // integer registers // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); - __ subl(rsp, extra_space_offset * wordSize); + __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); #ifdef ASSERT - __ movl(Address(rsp, marker * wordSize), 0xfeedbeef); + __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); #endif if (save_fpu_registers) { if (UseSSE < 2) { // save FPU stack - __ fnsave(Address(rsp, fpu_state_off * wordSize)); + __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); __ fwait(); #ifdef ASSERT Label ok; - __ cmpw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std()); + __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); __ jccb(Assembler::equal, ok); __ stop("corrupted control word detected"); __ bind(ok); @@ -324,49 +427,59 @@ // since fstp_d can cause FPU stack underflow exceptions. Write it // into the on stack copy and then reload that to make sure that the // current and future values are correct. - __ movw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std()); - __ frstor(Address(rsp, fpu_state_off * wordSize)); + __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); + __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); - // Save the FPU registers in de-opt-able form - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 0)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 8)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 16)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 24)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 32)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 40)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 48)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 56)); + // Save the FPU registers in de-opt-able form + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); } if (UseSSE >= 2) { // save XMM registers // XMM registers can contain float or double values, but this is not known here, // so always save them as doubles. - // note that float values are _not_ converted automatically, so for float values + // note that float values are _not_ converted automatically, so for float values // the second word contains only garbage data. - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); +#ifdef _LP64 + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); +#endif // _LP64 } else if (UseSSE == 1) { // save XMM registers as float because double not supported without SSE2 - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); } } // FPU stack must be empty now - __ verify_FPU(0, "save_live_registers"); + __ verify_FPU(0, "save_live_registers"); return generate_oop_map(sasm, num_rt_args, save_fpu_registers); } @@ -376,49 +489,59 @@ if (restore_fpu_registers) { if (UseSSE >= 2) { // restore XMM registers - __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0)); - __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8)); - __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16)); - __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24)); - __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32)); - __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40)); - __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48)); - __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56)); + __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); + __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); + __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); + __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); + __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); + __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); + __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); + __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); +#ifdef _LP64 + __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); + __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); + __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); + __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); + __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); + __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); + __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); + __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); +#endif // _LP64 } else if (UseSSE == 1) { // restore XMM registers - __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0)); - __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8)); - __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16)); - __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24)); - __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32)); - __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40)); - __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48)); - __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56)); + __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); + __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); + __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); + __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); + __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); + __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); + __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); + __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); } if (UseSSE < 2) { - __ frstor(Address(rsp, fpu_state_off * wordSize)); + __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); } else { // check that FPU stack is really empty - __ verify_FPU(0, "restore_live_registers"); + __ verify_FPU(0, "restore_live_registers"); } } else { // check that FPU stack is really empty - __ verify_FPU(0, "restore_live_registers"); + __ verify_FPU(0, "restore_live_registers"); } #ifdef ASSERT { Label ok; - __ cmpl(Address(rsp, marker * wordSize), 0xfeedbeef); + __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); __ jcc(Assembler::equal, ok); __ stop("bad offsets in frame"); __ bind(ok); } -#endif +#endif // ASSERT - __ addl(rsp, extra_space_offset * wordSize); + __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); } @@ -426,7 +549,7 @@ __ block_comment("restore_live_registers"); restore_fpu(sasm, restore_fpu_registers); - __ popad(); + __ popa(); } @@ -435,14 +558,35 @@ restore_fpu(sasm, restore_fpu_registers); - __ popl(rdi); - __ popl(rsi); - __ popl(rbp); - __ popl(rbx); // skip this value - __ popl(rbx); - __ popl(rdx); - __ popl(rcx); - __ addl(rsp, 4); +#ifdef _LP64 + __ movptr(r15, Address(rsp, 0)); + __ movptr(r14, Address(rsp, wordSize)); + __ movptr(r13, Address(rsp, 2 * wordSize)); + __ movptr(r12, Address(rsp, 3 * wordSize)); + __ movptr(r11, Address(rsp, 4 * wordSize)); + __ movptr(r10, Address(rsp, 5 * wordSize)); + __ movptr(r9, Address(rsp, 6 * wordSize)); + __ movptr(r8, Address(rsp, 7 * wordSize)); + __ movptr(rdi, Address(rsp, 8 * wordSize)); + __ movptr(rsi, Address(rsp, 9 * wordSize)); + __ movptr(rbp, Address(rsp, 10 * wordSize)); + // skip rsp + __ movptr(rbx, Address(rsp, 12 * wordSize)); + __ movptr(rdx, Address(rsp, 13 * wordSize)); + __ movptr(rcx, Address(rsp, 14 * wordSize)); + + __ addptr(rsp, 16 * wordSize); +#else + + __ pop(rdi); + __ pop(rsi); + __ pop(rbp); + __ pop(rbx); // skip this value + __ pop(rbx); + __ pop(rdx); + __ pop(rcx); + __ addptr(rsp, BytesPerWord); +#endif // _LP64 } @@ -468,10 +612,13 @@ // load argument for exception that is passed as an argument into the stub if (has_argument) { - __ movl(temp_reg, Address(rbp, 2*BytesPerWord)); - __ pushl(temp_reg); +#ifdef _LP64 + __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); +#else + __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); + __ push(temp_reg); +#endif // _LP64 } - int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); OopMapSet* oop_maps = new OopMapSet(); @@ -489,7 +636,7 @@ const Register exception_pc = rdx; // other registers used in this stub const Register real_return_addr = rbx; - const Register thread = rdi; + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); __ block_comment("generate_handle_exception"); @@ -506,19 +653,19 @@ __ verify_not_null_oop(exception_oop); // load address of JavaThread object for thread-local data - __ get_thread(thread); + NOT_LP64(__ get_thread(thread);) #ifdef ASSERT - // check that fields in JavaThread for exception oop and issuing pc are + // check that fields in JavaThread for exception oop and issuing pc are // empty before writing to them Label oop_empty; - __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop already set"); __ bind(oop_empty); Label pc_empty; - __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc already set"); __ bind(pc_empty); @@ -526,17 +673,17 @@ // save exception oop and issuing pc into JavaThread // (exception handler will load it from here) - __ movl(Address(thread, JavaThread::exception_oop_offset()), exception_oop); - __ movl(Address(thread, JavaThread::exception_pc_offset()), exception_pc); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); // save real return address (pc that called this stub) - __ movl(real_return_addr, Address(rbp, 1*BytesPerWord)); - __ movl(Address(rsp, temp_1_off * BytesPerWord), real_return_addr); + __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); + __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); // patch throwing pc into return address (has bci & oop map) - __ movl(Address(rbp, 1*BytesPerWord), exception_pc); + __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); - // compute the exception handler. + // compute the exception handler. // the exception oop and the throwing pc are read from the fields in JavaThread int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map); @@ -551,12 +698,12 @@ // Do we have an exception handler in the nmethod? Label no_handler; Label done; - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, no_handler); // exception handler found // patch the return address -> the stub will directly return to the exception handler - __ movl(Address(rbp, 1*BytesPerWord), rax); + __ movptr(Address(rbp, 1*BytesPerWord), rax); // restore registers restore_live_registers(sasm, save_fpu_registers); @@ -566,23 +713,23 @@ __ ret(0); __ bind(no_handler); - // no exception handler found in this method, so the exception is + // no exception handler found in this method, so the exception is // forwarded to the caller (using the unwind code of the nmethod) // there is no need to restore the registers // restore the real return address that was saved before the RT-call - __ movl(real_return_addr, Address(rsp, temp_1_off * BytesPerWord)); - __ movl(Address(rbp, 1*BytesPerWord), real_return_addr); + __ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size)); + __ movptr(Address(rbp, 1*BytesPerWord), real_return_addr); // load address of JavaThread object for thread-local data - __ get_thread(thread); + NOT_LP64(__ get_thread(thread);) // restore exception oop into rax, (convention for unwind code) - __ movl(exception_oop, Address(thread, JavaThread::exception_oop_offset())); + __ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset())); // clear exception fields in JavaThread because they are no longer needed // (fields must be cleared because they are processed by GC otherwise) - __ movl(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); - __ movl(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); // pop the stub frame off __ leave(); @@ -598,22 +745,22 @@ // other registers used in this stub const Register exception_pc = rdx; const Register handler_addr = rbx; - const Register thread = rdi; + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // verify that only rax, is valid at this time __ invalidate_registers(false, true, true, true, true, true); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty - __ get_thread(thread); + NOT_LP64(__ get_thread(thread);) Label oop_empty; - __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; - __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); @@ -623,14 +770,14 @@ __ empty_FPU_stack(); // leave activation of nmethod - __ leave(); + __ leave(); // store return address (is on top of stack after leave) - __ movl(exception_pc, Address(rsp, 0)); + __ movptr(exception_pc, Address(rsp, 0)); __ verify_oop(exception_oop); // save exception oop from rax, to stack before call - __ pushl(exception_oop); + __ push(exception_oop); // search the exception handler address of the caller (using the return address) __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc); @@ -638,19 +785,19 @@ // only rax, is valid at this time, all other registers have been destroyed by the call __ invalidate_registers(false, true, true, true, true, true); - + // move result of call into correct register - __ movl(handler_addr, rax); + __ movptr(handler_addr, rax); // restore exception oop in rax, (required convention of exception handler) - __ popl(exception_oop); + __ pop(exception_oop); __ verify_oop(exception_oop); - // get throwing pc (= return address). + // get throwing pc (= return address). // rdx has been destroyed by the call, so it must be set again - // the pop is also necessary to simulate the effect of a ret(0) - __ popl(exception_pc); + // the pop is also necessary to simulate the effect of a ret(0) + __ pop(exception_pc); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); @@ -669,7 +816,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { - // use the maximum number of runtime-arguments here because it is difficult to + // use the maximum number of runtime-arguments here because it is difficult to // distinguish each RT-Call. // Note: This number affects also the RT-Call in generate_handle_exception because // the oop-map is shared for all calls. @@ -680,12 +827,18 @@ OopMap* oop_map = save_live_registers(sasm, num_rt_args); - __ pushl(rax); // push dummy +#ifdef _LP64 + const Register thread = r15_thread; + // No need to worry about dummy + __ mov(c_rarg0, thread); +#else + __ push(rax); // push dummy const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) // push java thread (becomes first argument of C function) __ get_thread(thread); - __ pushl(thread); + __ push(thread); +#endif // _LP64 __ set_last_Java_frame(thread, noreg, rbp, NULL); // do the call __ call(RuntimeAddress(target)); @@ -694,27 +847,29 @@ // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); - __ pushl(rax); + __ push(rax); { Label L; __ get_thread(rax); - __ cmpl(thread, rax); + __ cmpptr(thread, rax); __ jcc(Assembler::equal, L); - __ stop("StubAssembler::call_RT: rdi not callee saved?"); + __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); __ bind(L); } - __ popl(rax); + __ pop(rax); #endif __ reset_last_Java_frame(thread, true, false); - __ popl(rcx); // discard thread arg - __ popl(rcx); // discard dummy +#ifndef _LP64 + __ pop(rcx); // discard thread arg + __ pop(rcx); // discard dummy +#endif // _LP64 // check for pending exceptions { Label L; - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler - __ testl(rax, rax); // have we deoptimized? + __ testptr(rax, rax); // have we deoptimized? __ jump_cc(Assembler::equal, RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); @@ -722,38 +877,38 @@ // JavaThread, so copy and clear pending exception. // load and clear pending exception - __ movl(rax, Address(thread, Thread::pending_exception_offset())); - __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ movptr(rax, Address(thread, Thread::pending_exception_offset())); + __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); - // check that there is really a valid exception + // check that there is really a valid exception __ verify_not_null_oop(rax); // load throwing pc: this is the return address of the stub - __ movl(rdx, Address(rsp, return_off * BytesPerWord)); + __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty Label oop_empty; - __ cmpoop(Address(thread, JavaThread::exception_oop_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; - __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); #endif // store exception oop and throwing pc to JavaThread - __ movl(Address(thread, JavaThread::exception_oop_offset()), rax); - __ movl(Address(thread, JavaThread::exception_pc_offset()), rdx); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); restore_live_registers(sasm); __ leave(); - __ addl(rsp, 4); // remove return address from stack + __ addptr(rsp, BytesPerWord); // remove return address from stack // Forward the exception directly to deopt blob. We can blow no // registers and must leave throwing pc on the stack. A patch may @@ -770,7 +925,7 @@ Label reexecuteEntry, cont; - __ testl(rax, rax); // have we deoptimized? + __ testptr(rax, rax); // have we deoptimized? __ jcc(Assembler::equal, cont); // no // Will reexecute. Proper return address is already on the stack we just restore @@ -809,21 +964,21 @@ // dispatch to the handler if found. Otherwise unwind and // dispatch to the callers exception handler. - const Register thread = rdi; + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); const Register exception_oop = rax; const Register exception_pc = rdx; // load pending exception oop into rax, - __ movl(exception_oop, Address(thread, Thread::pending_exception_offset())); + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); // clear pending exception - __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); // load issuing PC (the return address for this stub) into rdx - __ movl(exception_pc, Address(rbp, 1*BytesPerWord)); + __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); // make sure that the vm_results are cleared (may be unnecessary) - __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); - __ movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); + __ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); @@ -851,7 +1006,7 @@ assert(id == fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } - + if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB && FastTLABRefill) { Label slow_path; @@ -859,9 +1014,9 @@ Register t1 = rbx; Register t2 = rsi; assert_different_registers(klass, obj, obj_size, t1, t2); - - __ pushl(rdi); - __ pushl(rbx); + + __ push(rdi); + __ push(rbx); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized @@ -889,33 +1044,33 @@ // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass) - + __ bind(retry_tlab); - // get the instance size + // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); - __ popl(rbx); - __ popl(rdi); + __ pop(rbx); + __ pop(rdi); __ ret(0); __ bind(try_eden); - // get the instance size + // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ eden_allocate(obj, obj_size, 0, t1, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); - __ popl(rbx); - __ popl(rdi); + __ pop(rbx); + __ pop(rdi); __ ret(0); __ bind(slow_path); - __ popl(rbx); - __ popl(rdi); + __ pop(rbx); + __ pop(rdi); } - + __ enter(); OopMap* map = save_live_registers(sasm, 2); int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); @@ -951,7 +1106,7 @@ case new_type_array_id: case new_object_array_id: - { + { Register length = rbx; // Incoming Register klass = rdx; // Incoming Register obj = rax; // Result @@ -995,19 +1150,21 @@ // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx - + __ bind(retry_tlab); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) + // since size is postive movl does right thing on 64bit __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); + // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); - __ shll(arr_size /* by t1=rcx, mod 32 */); - __ shrl(t1, Klass::_lh_header_size_shift); - __ andl(t1, Klass::_lh_header_size_mask); - __ addl(arr_size, t1); - __ addl(arr_size, MinObjAlignmentInBytesMask); // align up - __ andl(arr_size, ~MinObjAlignmentInBytesMask); + __ shlptr(arr_size /* by t1=rcx, mod 32 */); + __ shrptr(t1, Klass::_lh_header_size_shift); + __ andptr(t1, Klass::_lh_header_size_mask); + __ addptr(arr_size, t1); + __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up + __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size @@ -1015,24 +1172,26 @@ __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); - __ andl(t1, Klass::_lh_header_size_mask); - __ subl(arr_size, t1); // body length - __ addl(t1, obj); // body start + __ andptr(t1, Klass::_lh_header_size_mask); + __ subptr(arr_size, t1); // body length + __ addptr(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); __ bind(try_eden); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) + // since size is postive movl does right thing on 64bit __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); + // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); - __ shll(arr_size /* by t1=rcx, mod 32 */); - __ shrl(t1, Klass::_lh_header_size_shift); - __ andl(t1, Klass::_lh_header_size_mask); - __ addl(arr_size, t1); - __ addl(arr_size, MinObjAlignmentInBytesMask); // align up - __ andl(arr_size, ~MinObjAlignmentInBytesMask); + __ shlptr(arr_size /* by t1=rcx, mod 32 */); + __ shrptr(t1, Klass::_lh_header_size_shift); + __ andptr(t1, Klass::_lh_header_size_mask); + __ addptr(arr_size, t1); + __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up + __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size @@ -1040,9 +1199,9 @@ __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); - __ andl(t1, Klass::_lh_header_size_mask); - __ subl(arr_size, t1); // body length - __ addl(t1, obj); // body start + __ andptr(t1, Klass::_lh_header_size_mask); + __ subptr(arr_size, t1); // body length + __ addptr(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); @@ -1091,16 +1250,24 @@ case register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); - + + // This is called via call_runtime so the arguments + // will be place in C abi locations + +#ifdef _LP64 + __ verify_oop(c_rarg0); + __ mov(rax, c_rarg0); +#else // The object is passed on the stack and we haven't pushed a // frame yet so it's one work away from top of stack. - __ movl(rax, Address(rsp, 1 * BytesPerWord)); + __ movptr(rax, Address(rsp, 1 * BytesPerWord)); __ verify_oop(rax); +#endif // _LP64 // load the klass and check the has finalizer flag Label register_finalizer; Register t = rsi; - __ movl(t, Address(rax, oopDesc::klass_offset_in_bytes())); + __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes())); __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); @@ -1188,46 +1355,49 @@ case slow_subtype_check_id: { enum layout { - rax_off, - rcx_off, - rsi_off, - rdi_off, - saved_rbp_off, - return_off, - sub_off, - super_off, + rax_off, SLOT2(raxH_off) + rcx_off, SLOT2(rcxH_off) + rsi_off, SLOT2(rsiH_off) + rdi_off, SLOT2(rdiH_off) + // saved_rbp_off, SLOT2(saved_rbpH_off) + return_off, SLOT2(returnH_off) + sub_off, SLOT2(subH_off) + super_off, SLOT2(superH_off) framesize }; - + __ set_info("slow_subtype_check", dont_gc_arguments); - __ pushl(rdi); - __ pushl(rsi); - __ pushl(rcx); - __ pushl(rax); - __ movl(rsi, Address(rsp, (super_off - 1) * BytesPerWord)); // super - __ movl(rax, Address(rsp, (sub_off - 1) * BytesPerWord)); // sub - - __ movl(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); - __ movl(rcx,Address(rdi,arrayOopDesc::length_offset_in_bytes())); - __ addl(rdi,arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + __ push(rdi); + __ push(rsi); + __ push(rcx); + __ push(rax); + + // This is called by pushing args and not with C abi + __ movptr(rsi, Address(rsp, (super_off) * VMRegImpl::stack_slot_size)); // super + __ movptr(rax, Address(rsp, (sub_off ) * VMRegImpl::stack_slot_size)); // sub + + __ movptr(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); + // since size is postive movl does right thing on 64bit + __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); + __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); Label miss; __ repne_scan(); __ jcc(Assembler::notEqual, miss); - __ movl(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); - __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 1); // result - __ popl(rax); - __ popl(rcx); - __ popl(rsi); - __ popl(rdi); + __ movptr(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); + __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 1); // result + __ pop(rax); + __ pop(rcx); + __ pop(rsi); + __ pop(rdi); __ ret(0); __ bind(miss); - __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 0); // result - __ popl(rax); - __ popl(rcx); - __ popl(rsi); - __ popl(rdi); + __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 0); // result + __ pop(rax); + __ pop(rcx); + __ pop(rsi); + __ pop(rdi); __ ret(0); } break; @@ -1240,11 +1410,13 @@ StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); + // Called with store_parameter and not C abi + f.load_argument(1, rax); // rax,: object f.load_argument(0, rbx); // rbx,: lock address int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); - + oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, map); restore_live_registers(sasm, save_fpu_registers); @@ -1255,12 +1427,14 @@ save_fpu_registers = false; // fall through case monitorexit_id: - { + { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); + // Called with store_parameter and not C abi + f.load_argument(0, rax); // rax,: lock address - + // note: really a leaf routine but must setup last java sp // => use call_RT for now (speed can be improved by // doing last java sp setup manually) @@ -1279,7 +1453,7 @@ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - + case load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); // we should set up register map @@ -1307,9 +1481,9 @@ // the live registers get saved. save_live_registers(sasm, 1); - __ pushl(rax); + __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); - __ popl(rax); + NOT_LP64(__ pop(rax)); restore_live_registers(sasm); } @@ -1319,18 +1493,19 @@ { // rax, and rdx are destroyed, but should be free since the result is returned there // preserve rsi,ecx - __ pushl(rsi); - __ pushl(rcx); - + __ push(rsi); + __ push(rcx); + LP64_ONLY(__ push(rdx);) + // check for NaN Label return0, do_return, return_min_jlong, do_convert; - - Address value_high_word(rsp, 8); - Address value_low_word(rsp, 4); - Address result_high_word(rsp, 16); - Address result_low_word(rsp, 12); - - __ subl(rsp, 20); + + Address value_high_word(rsp, wordSize + 4); + Address value_low_word(rsp, wordSize); + Address result_high_word(rsp, 3*wordSize + 4); + Address result_low_word(rsp, 3*wordSize); + + __ subptr(rsp, 32); // more than enough on 32bit __ fst_d(value_low_word); __ movl(rax, value_high_word); __ andl(rax, 0x7ff00000); @@ -1340,10 +1515,10 @@ __ andl(rax, 0xfffff); __ orl(rax, value_low_word); __ jcc(Assembler::notZero, return0); - + __ bind(do_convert); __ fnstcw(Address(rsp, 0)); - __ movzxw(rax, Address(rsp, 0)); + __ movzwl(rax, Address(rsp, 0)); __ orl(rax, 0xc00); __ movw(Address(rsp, 2), rax); __ fldcw(Address(rsp, 2)); @@ -1351,9 +1526,11 @@ __ fistp_d(result_low_word); __ fldcw(Address(rsp, 0)); __ fwait(); - __ movl(rax, result_low_word); + // This gets the entire long in rax on 64bit + __ movptr(rax, result_low_word); + // testing of high bits __ movl(rdx, result_high_word); - __ movl(rcx, rax); + __ mov(rcx, rax); // What the heck is the point of the next instruction??? __ xorl(rcx, 0x0); __ movl(rsi, 0x80000000); @@ -1363,34 +1540,212 @@ __ fldz(); __ fcomp_d(value_low_word); __ fnstsw_ax(); +#ifdef _LP64 + __ testl(rax, 0x4100); // ZF & CF == 0 + __ jcc(Assembler::equal, return_min_jlong); +#else __ sahf(); __ jcc(Assembler::above, return_min_jlong); +#endif // _LP64 // return max_jlong +#ifndef _LP64 __ movl(rdx, 0x7fffffff); __ movl(rax, 0xffffffff); +#else + __ mov64(rax, CONST64(0x7fffffffffffffff)); +#endif // _LP64 __ jmp(do_return); - + __ bind(return_min_jlong); +#ifndef _LP64 __ movl(rdx, 0x80000000); __ xorl(rax, rax); +#else + __ mov64(rax, CONST64(0x8000000000000000)); +#endif // _LP64 __ jmp(do_return); - + __ bind(return0); __ fpop(); - __ xorl(rdx,rdx); - __ xorl(rax,rax); - +#ifndef _LP64 + __ xorptr(rdx,rdx); + __ xorptr(rax,rax); +#else + __ xorptr(rax, rax); +#endif // _LP64 + __ bind(do_return); - __ addl(rsp, 20); - __ popl(rcx); - __ popl(rsi); + __ addptr(rsp, 32); + LP64_ONLY(__ pop(rdx);) + __ pop(rcx); + __ pop(rsi); __ ret(0); } break; - + +#ifndef SERIALGC + case g1_pre_barrier_slow_id: + { + StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); + // arg0 : previous value of memory + + BarrierSet* bs = Universe::heap()->barrier_set(); + if (bs->kind() != BarrierSet::G1SATBCTLogging) { + __ movptr(rax, (int)id); + __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); + __ should_not_reach_here(); + break; + } + + __ push(rax); + __ push(rdx); + + const Register pre_val = rax; + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + const Register tmp = rdx; + + NOT_LP64(__ get_thread(thread);) + + Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active())); + + Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + + Label done; + Label runtime; + + // Can we store original value in the thread's buffer? + + LP64_ONLY(__ movslq(tmp, queue_index);) +#ifdef _LP64 + __ cmpq(tmp, 0); +#else + __ cmpl(queue_index, 0); +#endif + __ jcc(Assembler::equal, runtime); +#ifdef _LP64 + __ subq(tmp, wordSize); + __ movl(queue_index, tmp); + __ addq(tmp, buffer); +#else + __ subl(queue_index, wordSize); + __ movl(tmp, buffer); + __ addl(tmp, queue_index); +#endif + + // prev_val (rax) + f.load_argument(0, pre_val); + __ movptr(Address(tmp, 0), pre_val); + __ jmp(done); + + __ bind(runtime); + // load the pre-value + __ push(rcx); + f.load_argument(0, rcx); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); + __ pop(rcx); + + __ bind(done); + __ pop(rdx); + __ pop(rax); + } + break; + + case g1_post_barrier_slow_id: + { + StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); + + + // arg0: store_address + Address store_addr(rbp, 2*BytesPerWord); + + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + Label done; + Label runtime; + + // At this point we know new_value is non-NULL and the new_value crosses regsion. + // Must check to see if card is already dirty + + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + __ push(rax); + __ push(rdx); + + NOT_LP64(__ get_thread(thread);) + ExternalAddress cardtable((address)ct->byte_map_base); + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + + const Register card_addr = rdx; +#ifdef _LP64 + const Register tmp = rscratch1; + f.load_argument(0, card_addr); + __ shrq(card_addr, CardTableModRefBS::card_shift); + __ lea(tmp, cardtable); + // get the address of the card + __ addq(card_addr, tmp); +#else + const Register card_index = rdx; + f.load_argument(0, card_index); + __ shrl(card_index, CardTableModRefBS::card_shift); + + Address index(noreg, card_index, Address::times_1); + __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); +#endif + + __ cmpb(Address(card_addr, 0), 0); + __ jcc(Assembler::equal, done); + + // storing region crossing non-NULL, card is clean. + // dirty card and log. + + __ movb(Address(card_addr, 0), 0); + + __ cmpl(queue_index, 0); + __ jcc(Assembler::equal, runtime); + __ subl(queue_index, wordSize); + + const Register buffer_addr = rbx; + __ push(rbx); + + __ movptr(buffer_addr, buffer); + +#ifdef _LP64 + __ movslq(rscratch1, queue_index); + __ addptr(buffer_addr, rscratch1); +#else + __ addptr(buffer_addr, queue_index); +#endif + __ movptr(Address(buffer_addr, 0), card_addr); + + __ pop(rbx); + __ jmp(done); + + __ bind(runtime); + NOT_LP64(__ push(rcx);) + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); + NOT_LP64(__ pop(rcx);) + + __ bind(done); + __ pop(rdx); + __ pop(rax); + + } + break; +#endif // !SERIALGC + default: { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); - __ movl(rax, (int)id); + __ movptr(rax, (int)id); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); __ should_not_reach_here(); } @@ -1400,4 +1755,3 @@ } #undef __ -