hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp

Print this page
rev 611 : Merge

*** 1,10 **** - #ifdef USE_PRAGMA_IDENT_SRC - #pragma ident "@(#)stubGenerator_x86_32.cpp 1.96 07/11/08 08:17:08 JVM" - #endif /* ! * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 31,40 **** --- 28,38 ---- // Declaration and definition of StubGenerator (no .hpp file). // For a more detailed description of the stub routine structure // see the comment in stubRoutines.hpp #define __ _masm-> + #define a__ ((Assembler*)_masm)-> #ifdef PRODUCT #define BLOCK_COMMENT(str) /* nothing */ #else #define BLOCK_COMMENT(str) __ block_comment(str)
*** 68,78 **** #ifdef PRODUCT #define inc_counter_np(counter) (0) #else void inc_counter_np_(int& counter) { ! __ increment(ExternalAddress((address)&counter)); } #define inc_counter_np(counter) \ BLOCK_COMMENT("inc_counter " #counter); \ inc_counter_np_(counter); #endif //PRODUCT --- 66,76 ---- #ifdef PRODUCT #define inc_counter_np(counter) (0) #else void inc_counter_np_(int& counter) { ! __ incrementl(ExternalAddress((address)&counter)); } #define inc_counter_np(counter) \ BLOCK_COMMENT("inc_counter " #counter); \ inc_counter_np_(counter); #endif //PRODUCT
*** 138,157 **** const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! sse_save = UseSSE > 0; // stub code __ enter(); ! __ movl(rcx, parameter_size); // parameter counter ! __ shll(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes ! __ addl(rcx, locals_count_in_bytes); // reserve space for register saves ! __ subl(rsp, rcx); ! __ andl(rsp, -(StackAlignmentInBytes)); // Align stack // save rdi, rsi, & rbx, according to C calling conventions ! __ movl(saved_rdi, rdi); ! __ movl(saved_rsi, rsi); ! __ movl(saved_rbx, rbx); // save and initialize %mxcsr if (sse_save) { Label skip_ldmx; __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); --- 136,155 ---- const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! sse_save = UseSSE > 0; // stub code __ enter(); ! __ movptr(rcx, parameter_size); // parameter counter ! __ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes ! __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves ! __ subptr(rsp, rcx); ! __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack // save rdi, rsi, & rbx, according to C calling conventions ! __ movptr(saved_rdi, rdi); ! __ movptr(saved_rsi, rsi); ! __ movptr(saved_rbx, rbx); // save and initialize %mxcsr if (sse_save) { Label skip_ldmx; __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save);
*** 167,178 **** __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); #ifdef ASSERT // make sure we have no pending exceptions { Label L; ! __ movl(rcx, thread); ! __ cmpl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); __ jcc(Assembler::equal, L); __ stop("StubRoutines::call_stub: entered with pending exception"); __ bind(L); } #endif --- 165,176 ---- __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); #ifdef ASSERT // make sure we have no pending exceptions { Label L; ! __ movptr(rcx, thread); ! __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("StubRoutines::call_stub: entered with pending exception"); __ bind(L); } #endif
*** 190,223 **** // Copy Java parameters in reverse order (receiver last) // Note that the argument order is inverted in the process // source is rdx[rcx: N-1..0] // dest is rsp[rbx: 0..N-1] ! __ movl(rdx, parameters); // parameter pointer ! __ xorl(rbx, rbx); __ BIND(loop); if (TaggedStackInterpreter) { ! __ movl(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -2*wordSize)); // get tag ! __ movl(Address(rsp, rbx, Interpreter::stackElementScale(), Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag } // get parameter ! __ movl(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); ! __ movl(Address(rsp, rbx, Interpreter::stackElementScale(), Interpreter::expr_offset_in_bytes(0)), rax); // store parameter __ increment(rbx); __ decrement(rcx); __ jcc(Assembler::notZero, loop); // call Java function __ BIND(parameters_done); ! __ movl(rbx, method); // get methodOop ! __ movl(rax, entry_point); // get entry_point ! __ movl(rsi, rsp); // set sender sp BLOCK_COMMENT("call Java function"); __ call(rax); BLOCK_COMMENT("call_stub_return_address:"); return_address = __ pc(); --- 188,221 ---- // Copy Java parameters in reverse order (receiver last) // Note that the argument order is inverted in the process // source is rdx[rcx: N-1..0] // dest is rsp[rbx: 0..N-1] ! __ movptr(rdx, parameters); // parameter pointer ! __ xorptr(rbx, rbx); __ BIND(loop); if (TaggedStackInterpreter) { ! __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -2*wordSize)); // get tag ! __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag } // get parameter ! __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); ! __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), Interpreter::expr_offset_in_bytes(0)), rax); // store parameter __ increment(rbx); __ decrement(rcx); __ jcc(Assembler::notZero, loop); // call Java function __ BIND(parameters_done); ! __ movptr(rbx, method); // get methodOop ! __ movptr(rax, entry_point); // get entry_point ! __ mov(rsi, rsp); // set sender sp BLOCK_COMMENT("call Java function"); __ call(rax); BLOCK_COMMENT("call_stub_return_address:"); return_address = __ pc();
*** 226,236 **** __ BIND(common_return); // store result depending on type // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) ! __ movl(rdi, result); Label is_long, is_float, is_double, exit; __ movl(rsi, result_type); __ cmpl(rsi, T_LONG); __ jcc(Assembler::equal, is_long); __ cmpl(rsi, T_FLOAT); --- 224,234 ---- __ BIND(common_return); // store result depending on type // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) ! __ movptr(rdi, result); Label is_long, is_float, is_double, exit; __ movl(rsi, result_type); __ cmpl(rsi, T_LONG); __ jcc(Assembler::equal, is_long); __ cmpl(rsi, T_FLOAT);
*** 244,268 **** // check that FPU stack is empty __ verify_FPU(0, "generate_call_stub"); // pop parameters ! __ leal(rsp, rsp_after_call); // restore %mxcsr if (sse_save) { __ ldmxcsr(mxcsr_save); } // restore rdi, rsi and rbx, ! __ movl(rbx, saved_rbx); ! __ movl(rsi, saved_rsi); ! __ movl(rdi, saved_rdi); ! __ addl(rsp, 4*wordSize); // return ! __ popl(rbp); __ ret(0); // handle return types different from T_INT __ BIND(is_long); __ movl(Address(rdi, 0 * wordSize), rax); --- 242,266 ---- // check that FPU stack is empty __ verify_FPU(0, "generate_call_stub"); // pop parameters ! __ lea(rsp, rsp_after_call); // restore %mxcsr if (sse_save) { __ ldmxcsr(mxcsr_save); } // restore rdi, rsi and rbx, ! __ movptr(rbx, saved_rbx); ! __ movptr(rsi, saved_rsi); ! __ movptr(rdi, saved_rdi); ! __ addptr(rsp, 4*wordSize); // return ! __ pop(rbp); __ ret(0); // handle return types different from T_INT __ BIND(is_long); __ movl(Address(rdi, 0 * wordSize), rax);
*** 292,302 **** // piece of code that can handle compiled results and cleaning the fpu // stack. compiled code will be set to return here instead of the // return above that handles interpreter returns. BLOCK_COMMENT("call_stub_compiled_return:"); ! StubRoutines::i486::set_call_stub_compiled_return( __ pc()); #ifdef COMPILER2 if (UseSSE >= 2) { __ verify_FPU(0, "call_stub_compiled_return"); } else { --- 290,300 ---- // piece of code that can handle compiled results and cleaning the fpu // stack. compiled code will be set to return here instead of the // return above that handles interpreter returns. BLOCK_COMMENT("call_stub_compiled_return:"); ! StubRoutines::x86::set_call_stub_compiled_return( __ pc()); #ifdef COMPILER2 if (UseSSE >= 2) { __ verify_FPU(0, "call_stub_compiled_return"); } else {
*** 338,361 **** const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! address start = __ pc(); // get thread directly ! __ movl(rcx, thread); #ifdef ASSERT // verify that threads correspond { Label L; __ get_thread(rbx); ! __ cmpl(rbx, rcx); __ jcc(Assembler::equal, L); __ stop("StubRoutines::catch_exception: threads must correspond"); __ bind(L); } #endif // set pending exception __ verify_oop(rax); ! __ movl(Address(rcx, Thread::pending_exception_offset()), rax ); __ lea(Address(rcx, Thread::exception_file_offset ()), ExternalAddress((address)__FILE__)); __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); // complete return to VM assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); --- 336,359 ---- const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! address start = __ pc(); // get thread directly ! __ movptr(rcx, thread); #ifdef ASSERT // verify that threads correspond { Label L; __ get_thread(rbx); ! __ cmpptr(rbx, rcx); __ jcc(Assembler::equal, L); __ stop("StubRoutines::catch_exception: threads must correspond"); __ bind(L); } #endif // set pending exception __ verify_oop(rax); ! __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); __ lea(Address(rcx, Thread::exception_file_offset ()), ExternalAddress((address)__FILE__)); __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); // complete return to VM assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
*** 390,422 **** #ifdef ASSERT // make sure this code is only executed if there is a pending exception { Label L; __ get_thread(rcx); ! __ cmpl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); } #endif // compute exception handler into rbx, ! __ movl(rax, Address(rsp, 0)); BLOCK_COMMENT("call exception_handler_for_return_address"); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax); ! __ movl(rbx, rax); // setup rax, & rdx, remove return address & clear pending exception __ get_thread(rcx); ! __ popl(rdx); ! __ movl(rax, Address(rcx, Thread::pending_exception_offset())); ! __ movl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); #ifdef ASSERT // make sure exception is set { Label L; ! __ testl(rax, rax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); } #endif --- 388,420 ---- #ifdef ASSERT // make sure this code is only executed if there is a pending exception { Label L; __ get_thread(rcx); ! __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); } #endif // compute exception handler into rbx, ! __ movptr(rax, Address(rsp, 0)); BLOCK_COMMENT("call exception_handler_for_return_address"); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax); ! __ mov(rbx, rax); // setup rax, & rdx, remove return address & clear pending exception __ get_thread(rcx); ! __ pop(rdx); ! __ movptr(rax, Address(rcx, Thread::pending_exception_offset())); ! __ movptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); #ifdef ASSERT // make sure exception is set { Label L; ! __ testptr(rax, rax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); } #endif
*** 448,464 **** address generate_atomic_xchg() { StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); address start = __ pc(); ! __ pushl(rdx); Address exchange(rsp, 2 * wordSize); Address dest_addr(rsp, 3 * wordSize); __ movl(rax, exchange); ! __ movl(rdx, dest_addr); ! __ xchg(rax, Address(rdx, 0)); ! __ popl(rdx); __ ret(0); return start; } --- 446,462 ---- address generate_atomic_xchg() { StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); address start = __ pc(); ! __ push(rdx); Address exchange(rsp, 2 * wordSize); Address dest_addr(rsp, 3 * wordSize); __ movl(rax, exchange); ! __ movptr(rdx, dest_addr); ! __ xchgl(rax, Address(rdx, 0)); ! __ pop(rdx); __ ret(0); return start; }
*** 477,488 **** const Address mxcsr_save(rsp, 0); if (CheckJNICalls && UseSSE > 0 ) { Label ok_ret; ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); ! __ pushl(rax); ! __ subl(rsp, wordSize); // allocate a temp location __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); __ cmp32(rax, mxcsr_std); __ jcc(Assembler::equal, ok_ret); --- 475,486 ---- const Address mxcsr_save(rsp, 0); if (CheckJNICalls && UseSSE > 0 ) { Label ok_ret; ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); ! __ push(rax); ! __ subptr(rsp, wordSize); // allocate a temp location __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); __ cmp32(rax, mxcsr_std); __ jcc(Assembler::equal, ok_ret);
*** 490,501 **** __ warn("MXCSR changed by native JNI code."); __ ldmxcsr(mxcsr_std); __ bind(ok_ret); ! __ addl(rsp, wordSize); ! __ popl(rax); } __ ret(0); return start; --- 488,499 ---- __ warn("MXCSR changed by native JNI code."); __ ldmxcsr(mxcsr_std); __ bind(ok_ret); ! __ addptr(rsp, wordSize); ! __ pop(rax); } __ ret(0); return start;
*** 515,526 **** const Address fpu_cntrl_wrd_save(rsp, 0); if (CheckJNICalls) { Label ok_ret; ! __ pushl(rax); ! __ subl(rsp, wordSize); // allocate a temp location __ fnstcw(fpu_cntrl_wrd_save); __ movl(rax, fpu_cntrl_wrd_save); __ andl(rax, FPU_CNTRL_WRD_MASK); ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); __ cmp32(rax, fpu_std); --- 513,524 ---- const Address fpu_cntrl_wrd_save(rsp, 0); if (CheckJNICalls) { Label ok_ret; ! __ push(rax); ! __ subptr(rsp, wordSize); // allocate a temp location __ fnstcw(fpu_cntrl_wrd_save); __ movl(rax, fpu_cntrl_wrd_save); __ andl(rax, FPU_CNTRL_WRD_MASK); ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); __ cmp32(rax, fpu_std);
*** 529,540 **** __ warn("Floating point control word changed by native JNI code."); __ fldcw(fpu_std); __ bind(ok_ret); ! __ addl(rsp, wordSize); ! __ popl(rax); } __ ret(0); return start; --- 527,538 ---- __ warn("Floating point control word changed by native JNI code."); __ fldcw(fpu_std); __ bind(ok_ret); ! __ addptr(rsp, wordSize); ! __ pop(rax); } __ ret(0); return start;
*** 564,589 **** }; assert(FPUStateSizeInWords == 27, "update stack layout"); // Save outgoing argument to stack across push_FPU_state() ! __ subl(rsp, wordSize * 2); __ fstp_d(Address(rsp, 0)); // Save CPU & FPU state ! __ pushl(rbx); ! __ pushl(rcx); ! __ pushl(rsi); ! __ pushl(rdi); ! __ pushl(rbp); __ push_FPU_state(); // push_FPU_state() resets the FP top of stack // Load original double into FP top of stack __ fld_d(Address(rsp, saved_argument_off * wordSize)); // Store double into stack as outgoing argument ! __ subl(rsp, wordSize*2); __ fst_d(Address(rsp, 0)); // Prepare FPU for doing math in C-land __ empty_FPU_stack(); // Call the C code to massage the double. Result in EAX --- 562,587 ---- }; assert(FPUStateSizeInWords == 27, "update stack layout"); // Save outgoing argument to stack across push_FPU_state() ! __ subptr(rsp, wordSize * 2); __ fstp_d(Address(rsp, 0)); // Save CPU & FPU state ! __ push(rbx); ! __ push(rcx); ! __ push(rsi); ! __ push(rdi); ! __ push(rbp); __ push_FPU_state(); // push_FPU_state() resets the FP top of stack // Load original double into FP top of stack __ fld_d(Address(rsp, saved_argument_off * wordSize)); // Store double into stack as outgoing argument ! __ subptr(rsp, wordSize*2); __ fst_d(Address(rsp, 0)); // Prepare FPU for doing math in C-land __ empty_FPU_stack(); // Call the C code to massage the double. Result in EAX
*** 593,608 **** { BLOCK_COMMENT("SharedRuntime::d2l"); } __ call_VM_leaf( fcn, 2 ); // Restore CPU & FPU state __ pop_FPU_state(); ! __ popl(rbp); ! __ popl(rdi); ! __ popl(rsi); ! __ popl(rcx); ! __ popl(rbx); ! __ addl(rsp, wordSize * 2); __ ret(0); return start; } --- 591,606 ---- { BLOCK_COMMENT("SharedRuntime::d2l"); } __ call_VM_leaf( fcn, 2 ); // Restore CPU & FPU state __ pop_FPU_state(); ! __ pop(rbp); ! __ pop(rdi); ! __ pop(rsi); ! __ pop(rcx); ! __ pop(rbx); ! __ addptr(rsp, wordSize * 2); __ ret(0); return start; }
*** 614,630 **** // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) address generate_handler_for_unsafe_access() { StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); address start = __ pc(); ! __ pushl(0); // hole for return address-to-be ! __ pushad(); // push registers Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); BLOCK_COMMENT("call handle_unsafe_access"); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); ! __ movl(next_pc, rax); // stuff next address ! __ popad(); __ ret(0); // jump to next address return start; } --- 612,628 ---- // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) address generate_handler_for_unsafe_access() { StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); address start = __ pc(); ! __ push(0); // hole for return address-to-be ! __ pusha(); // push registers Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); BLOCK_COMMENT("call handle_unsafe_access"); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); ! __ movptr(next_pc, rax); // stuff next address ! __ popa(); __ ret(0); // jump to next address return start; }
*** 644,709 **** // [tos + 3]: char* error message // [tos + 4]: oop object to verify // [tos + 5]: saved rax, - saved by caller and bashed Label exit, error; ! __ pushfd(); ! __ increment(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); ! __ pushl(rdx); // save rdx // make sure object is 'reasonable' ! __ movl(rax, Address(rsp, 4 * wordSize)); // get object ! __ testl(rax, rax); __ jcc(Assembler::zero, exit); // if obj is NULL it is ok // Check if the oop is in the right area of memory const int oop_mask = Universe::verify_oop_mask(); const int oop_bits = Universe::verify_oop_bits(); ! __ movl(rdx, rax); ! __ andl(rdx, oop_mask); ! __ cmpl(rdx, oop_bits); __ jcc(Assembler::notZero, error); // make sure klass is 'reasonable' ! __ movl(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass ! __ testl(rax, rax); __ jcc(Assembler::zero, error); // if klass is NULL it is broken // Check if the klass is in the right area of memory const int klass_mask = Universe::verify_klass_mask(); const int klass_bits = Universe::verify_klass_bits(); ! __ movl(rdx, rax); ! __ andl(rdx, klass_mask); ! __ cmpl(rdx, klass_bits); __ jcc(Assembler::notZero, error); // make sure klass' klass is 'reasonable' ! __ movl(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass ! __ testl(rax, rax); __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken ! __ movl(rdx, rax); ! __ andl(rdx, klass_mask); ! __ cmpl(rdx, klass_bits); __ jcc(Assembler::notZero, error); // if klass not in right area // of memory it is broken too. // return if everything seems ok __ bind(exit); ! __ movl(rax, Address(rsp, 5 * wordSize)); // get saved rax, back ! __ popl(rdx); // restore rdx ! __ popfd(); // restore EFLAGS __ ret(3 * wordSize); // pop arguments // handle errors __ bind(error); ! __ movl(rax, Address(rsp, 5 * wordSize)); // get saved rax, back ! __ popl(rdx); // get saved rdx back ! __ popfd(); // get saved EFLAGS off stack -- will be ignored ! __ pushad(); // push registers (eip = return address & msg are already pushed) BLOCK_COMMENT("call MacroAssembler::debug"); ! __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); ! __ popad(); __ ret(3 * wordSize); // pop arguments return start; } // --- 642,707 ---- // [tos + 3]: char* error message // [tos + 4]: oop object to verify // [tos + 5]: saved rax, - saved by caller and bashed Label exit, error; ! __ pushf(); ! __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); ! __ push(rdx); // save rdx // make sure object is 'reasonable' ! __ movptr(rax, Address(rsp, 4 * wordSize)); // get object ! __ testptr(rax, rax); __ jcc(Assembler::zero, exit); // if obj is NULL it is ok // Check if the oop is in the right area of memory const int oop_mask = Universe::verify_oop_mask(); const int oop_bits = Universe::verify_oop_bits(); ! __ mov(rdx, rax); ! __ andptr(rdx, oop_mask); ! __ cmpptr(rdx, oop_bits); __ jcc(Assembler::notZero, error); // make sure klass is 'reasonable' ! __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass ! __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass is NULL it is broken // Check if the klass is in the right area of memory const int klass_mask = Universe::verify_klass_mask(); const int klass_bits = Universe::verify_klass_bits(); ! __ mov(rdx, rax); ! __ andptr(rdx, klass_mask); ! __ cmpptr(rdx, klass_bits); __ jcc(Assembler::notZero, error); // make sure klass' klass is 'reasonable' ! __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass ! __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken ! __ mov(rdx, rax); ! __ andptr(rdx, klass_mask); ! __ cmpptr(rdx, klass_bits); __ jcc(Assembler::notZero, error); // if klass not in right area // of memory it is broken too. // return if everything seems ok __ bind(exit); ! __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back ! __ pop(rdx); // restore rdx ! __ popf(); // restore EFLAGS __ ret(3 * wordSize); // pop arguments // handle errors __ bind(error); ! __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back ! __ pop(rdx); // get saved rdx back ! __ popf(); // get saved EFLAGS off stack -- will be ignored ! __ pusha(); // push registers (eip = return address & msg are already pushed) BLOCK_COMMENT("call MacroAssembler::debug"); ! __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); ! __ popa(); __ ret(3 * wordSize); // pop arguments return start; } //
*** 712,744 **** // Input: // start - starting address // end - element count void gen_write_ref_array_pre_barrier(Register start, Register count) { assert_different_registers(start, count); - #if 0 // G1 only BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { ! __ pushad(); // push registers ! __ pushl(count); ! __ pushl(start); ! __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); ! __ addl(esp, wordSize * 2); ! __ popad(); } break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: case BarrierSet::ModRef: break; default : ShouldNotReachHere(); } - #endif // 0 - G1 only } // // Generate a post-barrier for an array store --- 710,740 ---- // Input: // start - starting address // end - element count void gen_write_ref_array_pre_barrier(Register start, Register count) { assert_different_registers(start, count); BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { ! __ pusha(); // push registers ! __ push(count); ! __ push(start); ! __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre))); ! __ addptr(rsp, 2*wordSize); ! __ popa(); } break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: case BarrierSet::ModRef: break; default : ShouldNotReachHere(); } } // // Generate a post-barrier for an array store
*** 750,773 **** // void gen_write_ref_array_post_barrier(Register start, Register count) { BarrierSet* bs = Universe::heap()->barrier_set(); assert_different_registers(start, count); switch (bs->kind()) { - #if 0 // G1 only case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { ! __ pushad(); // push registers ! __ pushl(count); ! __ pushl(start); ! __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); ! __ addl(esp, wordSize * 2); ! __ popad(); } break; - #endif // 0 G1 only case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: { CardTableModRefBS* ct = (CardTableModRefBS*)bs; --- 746,767 ---- // void gen_write_ref_array_post_barrier(Register start, Register count) { BarrierSet* bs = Universe::heap()->barrier_set(); assert_different_registers(start, count); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { ! __ pusha(); // push registers ! __ push(count); ! __ push(start); ! __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post))); ! __ addptr(rsp, 2*wordSize); ! __ popa(); } break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: { CardTableModRefBS* ct = (CardTableModRefBS*)bs;
*** 775,792 **** Label L_loop; const Register end = count; // elements count; end == start+count-1 assert_different_registers(start, end); ! __ leal(end, Address(start, count, Address::times_4, -4)); ! __ shrl(start, CardTableModRefBS::card_shift); ! __ shrl(end, CardTableModRefBS::card_shift); ! __ subl(end, start); // end --> count __ BIND(L_loop); ! ExternalAddress base((address)ct->byte_map_base); ! Address index(start, count, Address::times_1, 0); ! __ movbyte(ArrayAddress(base, index), 0); __ decrement(count); __ jcc(Assembler::greaterEqual, L_loop); } break; case BarrierSet::ModRef: --- 769,786 ---- Label L_loop; const Register end = count; // elements count; end == start+count-1 assert_different_registers(start, end); ! __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); ! __ shrptr(start, CardTableModRefBS::card_shift); ! __ shrptr(end, CardTableModRefBS::card_shift); ! __ subptr(end, start); // end --> count __ BIND(L_loop); ! intptr_t disp = (intptr_t) ct->byte_map_base; ! Address cardtable(start, count, Address::times_1, disp); ! __ movb(cardtable, 0); __ decrement(count); __ jcc(Assembler::greaterEqual, L_loop); } break; case BarrierSet::ModRef:
*** 795,812 **** --- 789,870 ---- ShouldNotReachHere(); } } + + // Copy 64 bytes chunks + // + // Inputs: + // from - source array address + // to_from - destination array address - from + // qword_count - 8-bytes element count, negative + // + void xmm_copy_forward(Register from, Register to_from, Register qword_count) { + assert( UseSSE >= 2, "supported cpu only" ); + Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; + // Copy 64-byte chunks + __ jmpb(L_copy_64_bytes); + __ align(16); + __ BIND(L_copy_64_bytes_loop); + + if(UseUnalignedLoadStores) { + __ movdqu(xmm0, Address(from, 0)); + __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); + __ movdqu(xmm1, Address(from, 16)); + __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); + __ movdqu(xmm2, Address(from, 32)); + __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); + __ movdqu(xmm3, Address(from, 48)); + __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); + + } else { + __ movq(xmm0, Address(from, 0)); + __ movq(Address(from, to_from, Address::times_1, 0), xmm0); + __ movq(xmm1, Address(from, 8)); + __ movq(Address(from, to_from, Address::times_1, 8), xmm1); + __ movq(xmm2, Address(from, 16)); + __ movq(Address(from, to_from, Address::times_1, 16), xmm2); + __ movq(xmm3, Address(from, 24)); + __ movq(Address(from, to_from, Address::times_1, 24), xmm3); + __ movq(xmm4, Address(from, 32)); + __ movq(Address(from, to_from, Address::times_1, 32), xmm4); + __ movq(xmm5, Address(from, 40)); + __ movq(Address(from, to_from, Address::times_1, 40), xmm5); + __ movq(xmm6, Address(from, 48)); + __ movq(Address(from, to_from, Address::times_1, 48), xmm6); + __ movq(xmm7, Address(from, 56)); + __ movq(Address(from, to_from, Address::times_1, 56), xmm7); + } + + __ addl(from, 64); + __ BIND(L_copy_64_bytes); + __ subl(qword_count, 8); + __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); + __ addl(qword_count, 8); + __ jccb(Assembler::zero, L_exit); + // + // length is too short, just copy qwords + // + __ BIND(L_copy_8_bytes); + __ movq(xmm0, Address(from, 0)); + __ movq(Address(from, to_from, Address::times_1), xmm0); + __ addl(from, 8); + __ decrement(qword_count); + __ jcc(Assembler::greater, L_copy_8_bytes); + __ BIND(L_exit); + } + // Copy 64 bytes chunks // // Inputs: // from - source array address // to_from - destination array address - from // qword_count - 8-bytes element count, negative // void mmx_copy_forward(Register from, Register to_from, Register qword_count) { + assert( VM_Version::supports_mmx(), "supported cpu only" ); Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; // Copy 64-byte chunks __ jmpb(L_copy_64_bytes); __ align(16); __ BIND(L_copy_64_bytes_loop);
*** 824,834 **** __ movq(Address(from, to_from, Address::times_1, 32), mmx4); __ movq(mmx7, Address(from, 56)); __ movq(Address(from, to_from, Address::times_1, 40), mmx5); __ movq(Address(from, to_from, Address::times_1, 48), mmx6); __ movq(Address(from, to_from, Address::times_1, 56), mmx7); ! __ addl(from, 64); __ BIND(L_copy_64_bytes); __ subl(qword_count, 8); __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); __ addl(qword_count, 8); __ jccb(Assembler::zero, L_exit); --- 882,892 ---- __ movq(Address(from, to_from, Address::times_1, 32), mmx4); __ movq(mmx7, Address(from, 56)); __ movq(Address(from, to_from, Address::times_1, 40), mmx5); __ movq(Address(from, to_from, Address::times_1, 48), mmx6); __ movq(Address(from, to_from, Address::times_1, 56), mmx7); ! __ addptr(from, 64); __ BIND(L_copy_64_bytes); __ subl(qword_count, 8); __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); __ addl(qword_count, 8); __ jccb(Assembler::zero, L_exit);
*** 836,846 **** // length is too short, just copy qwords // __ BIND(L_copy_8_bytes); __ movq(mmx0, Address(from, 0)); __ movq(Address(from, to_from, Address::times_1), mmx0); ! __ addl(from, 8); __ decrement(qword_count); __ jcc(Assembler::greater, L_copy_8_bytes); __ BIND(L_exit); __ emms(); } --- 894,904 ---- // length is too short, just copy qwords // __ BIND(L_copy_8_bytes); __ movq(mmx0, Address(from, 0)); __ movq(Address(from, to_from, Address::times_1), mmx0); ! __ addptr(from, 8); __ decrement(qword_count); __ jcc(Assembler::greater, L_copy_8_bytes); __ BIND(L_exit); __ emms(); }
*** 853,890 **** address start = __ pc(); Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; ! int shift = Address::times_4 - sf; const Register from = rsi; // source array address const Register to = rdi; // destination array address const Register count = rcx; // elements count const Register to_from = to; // (to - from) const Register saved_to = rdx; // saved destination array address __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ pushl(rsi); ! __ pushl(rdi); ! __ movl(from , Address(rsp, 12+ 4)); ! __ movl(to , Address(rsp, 12+ 8)); __ movl(count, Address(rsp, 12+ 12)); if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); gen_write_ref_array_pre_barrier(to, count); ! __ movl(saved_to, to); // save 'to' } *entry = __ pc(); // Entry point from conjoint arraycopy stub. BLOCK_COMMENT("Entry:"); ! __ subl(to, from); // to --> to_from __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp ! if (!aligned && (t == T_BYTE || t == T_SHORT)) { // align source address at 4 bytes address boundary if (t == T_BYTE) { // One byte misalignment happens only for byte arrays __ testl(from, 1); __ jccb(Assembler::zero, L_skip_align1); --- 911,948 ---- address start = __ pc(); Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; ! int shift = Address::times_ptr - sf; const Register from = rsi; // source array address const Register to = rdi; // destination array address const Register count = rcx; // elements count const Register to_from = to; // (to - from) const Register saved_to = rdx; // saved destination array address __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ push(rsi); ! __ push(rdi); ! __ movptr(from , Address(rsp, 12+ 4)); ! __ movptr(to , Address(rsp, 12+ 8)); __ movl(count, Address(rsp, 12+ 12)); if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); gen_write_ref_array_pre_barrier(to, count); ! __ mov(saved_to, to); // save 'to' } *entry = __ pc(); // Entry point from conjoint arraycopy stub. BLOCK_COMMENT("Entry:"); ! __ subptr(to, from); // to --> to_from __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp ! if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { // align source address at 4 bytes address boundary if (t == T_BYTE) { // One byte misalignment happens only for byte arrays __ testl(from, 1); __ jccb(Assembler::zero, L_skip_align1);
*** 897,950 **** // Two bytes misalignment happens only for byte and short (char) arrays __ testl(from, 2); __ jccb(Assembler::zero, L_skip_align2); __ movw(rax, Address(from, 0)); __ movw(Address(from, to_from, Address::times_1, 0), rax); ! __ addl(from, 2); __ subl(count, 1<<(shift-1)); __ BIND(L_skip_align2); } if (!VM_Version::supports_mmx()) { ! __ movl(rax, count); // save 'count' __ shrl(count, shift); // bytes count ! __ addl(to_from, from); // restore 'to' ! __ rep_movl(); ! __ subl(to_from, from); // restore 'to_from' ! __ movl(count, rax); // restore 'count' __ jmpb(L_copy_2_bytes); // all dwords were copied } else { // align to 8 bytes, we know we are 4 byte aligned to start ! __ testl(from, 4); __ jccb(Assembler::zero, L_copy_64_bytes); __ movl(rax, Address(from, 0)); __ movl(Address(from, to_from, Address::times_1, 0), rax); ! __ addl(from, 4); __ subl(count, 1<<shift); __ BIND(L_copy_64_bytes); ! __ movl(rax, count); __ shrl(rax, shift+1); // 8 bytes chunk count // // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop // mmx_copy_forward(from, to_from, rax); } // copy tailing dword __ BIND(L_copy_4_bytes); __ testl(count, 1<<shift); __ jccb(Assembler::zero, L_copy_2_bytes); __ movl(rax, Address(from, 0)); __ movl(Address(from, to_from, Address::times_1, 0), rax); if (t == T_BYTE || t == T_SHORT) { ! __ addl(from, 4); __ BIND(L_copy_2_bytes); // copy tailing word __ testl(count, 1<<(shift-1)); __ jccb(Assembler::zero, L_copy_byte); __ movw(rax, Address(from, 0)); __ movw(Address(from, to_from, Address::times_1, 0), rax); if (t == T_BYTE) { ! __ addl(from, 2); __ BIND(L_copy_byte); // copy tailing byte __ testl(count, 1); __ jccb(Assembler::zero, L_exit); __ movb(rax, Address(from, 0)); --- 955,1014 ---- // Two bytes misalignment happens only for byte and short (char) arrays __ testl(from, 2); __ jccb(Assembler::zero, L_skip_align2); __ movw(rax, Address(from, 0)); __ movw(Address(from, to_from, Address::times_1, 0), rax); ! __ addptr(from, 2); __ subl(count, 1<<(shift-1)); __ BIND(L_skip_align2); } if (!VM_Version::supports_mmx()) { ! __ mov(rax, count); // save 'count' __ shrl(count, shift); // bytes count ! __ addptr(to_from, from);// restore 'to' ! __ rep_mov(); ! __ subptr(to_from, from);// restore 'to_from' ! __ mov(count, rax); // restore 'count' __ jmpb(L_copy_2_bytes); // all dwords were copied } else { + if (!UseUnalignedLoadStores) { // align to 8 bytes, we know we are 4 byte aligned to start ! __ testptr(from, 4); __ jccb(Assembler::zero, L_copy_64_bytes); __ movl(rax, Address(from, 0)); __ movl(Address(from, to_from, Address::times_1, 0), rax); ! __ addptr(from, 4); __ subl(count, 1<<shift); + } __ BIND(L_copy_64_bytes); ! __ mov(rax, count); __ shrl(rax, shift+1); // 8 bytes chunk count // // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop // + if (UseXMMForArrayCopy) { + xmm_copy_forward(from, to_from, rax); + } else { mmx_copy_forward(from, to_from, rax); } + } // copy tailing dword __ BIND(L_copy_4_bytes); __ testl(count, 1<<shift); __ jccb(Assembler::zero, L_copy_2_bytes); __ movl(rax, Address(from, 0)); __ movl(Address(from, to_from, Address::times_1, 0), rax); if (t == T_BYTE || t == T_SHORT) { ! __ addptr(from, 4); __ BIND(L_copy_2_bytes); // copy tailing word __ testl(count, 1<<(shift-1)); __ jccb(Assembler::zero, L_copy_byte); __ movw(rax, Address(from, 0)); __ movw(Address(from, to_from, Address::times_1, 0), rax); if (t == T_BYTE) { ! __ addptr(from, 2); __ BIND(L_copy_byte); // copy tailing byte __ testl(count, 1); __ jccb(Assembler::zero, L_exit); __ movb(rax, Address(from, 0));
*** 957,975 **** __ BIND(L_copy_2_bytes); } if (t == T_OBJECT) { __ movl(count, Address(rsp, 12+12)); // reread 'count' ! __ movl(to, saved_to); // restore 'to' gen_write_ref_array_post_barrier(to, count); __ BIND(L_0_count); } inc_copy_counter_np(t); ! __ popl(rdi); ! __ popl(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorl(rax, rax); // return 0 __ ret(0); return start; } --- 1021,1039 ---- __ BIND(L_copy_2_bytes); } if (t == T_OBJECT) { __ movl(count, Address(rsp, 12+12)); // reread 'count' ! __ mov(to, saved_to); // restore 'to' gen_write_ref_array_post_barrier(to, count); __ BIND(L_0_count); } inc_copy_counter_np(t); ! __ pop(rdi); ! __ pop(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorptr(rax, rax); // return 0 __ ret(0); return start; }
*** 982,1006 **** address start = __ pc(); Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; ! int shift = Address::times_4 - sf; const Register src = rax; // source array address const Register dst = rdx; // destination array address const Register from = rsi; // source array address const Register to = rdi; // destination array address const Register count = rcx; // elements count const Register end = rax; // array end address __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ pushl(rsi); ! __ pushl(rdi); ! __ movl(src , Address(rsp, 12+ 4)); // from ! __ movl(dst , Address(rsp, 12+ 8)); // to ! __ movl(count, Address(rsp, 12+12)); // count if (t == T_OBJECT) { gen_write_ref_array_pre_barrier(dst, count); } if (entry != NULL) { --- 1046,1070 ---- address start = __ pc(); Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; ! int shift = Address::times_ptr - sf; const Register src = rax; // source array address const Register dst = rdx; // destination array address const Register from = rsi; // source array address const Register to = rdi; // destination array address const Register count = rcx; // elements count const Register end = rax; // array end address __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ push(rsi); ! __ push(rdi); ! __ movptr(src , Address(rsp, 12+ 4)); // from ! __ movptr(dst , Address(rsp, 12+ 8)); // to ! __ movl2ptr(count, Address(rsp, 12+12)); // count if (t == T_OBJECT) { gen_write_ref_array_pre_barrier(dst, count); } if (entry != NULL) {
*** 1010,1036 **** if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); } ! __ movl(from, src); ! __ movl(to , dst); // arrays overlap test RuntimeAddress nooverlap(nooverlap_target); ! __ cmpl(dst, src); ! __ leal(end, Address(src, count, sf, 0)); // src + count * elem_size __ jump_cc(Assembler::belowEqual, nooverlap); ! __ cmpl(dst, end); __ jump_cc(Assembler::aboveEqual, nooverlap); // copy from high to low __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp if (t == T_BYTE || t == T_SHORT) { // Align the end of destination array at 4 bytes address boundary ! __ leal(end, Address(dst, count, sf, 0)); if (t == T_BYTE) { // One byte misalignment happens only for byte arrays __ testl(end, 1); __ jccb(Assembler::zero, L_skip_align1); __ decrement(count); --- 1074,1100 ---- if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); } ! __ mov(from, src); ! __ mov(to , dst); // arrays overlap test RuntimeAddress nooverlap(nooverlap_target); ! __ cmpptr(dst, src); ! __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size __ jump_cc(Assembler::belowEqual, nooverlap); ! __ cmpptr(dst, end); __ jump_cc(Assembler::aboveEqual, nooverlap); // copy from high to low __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp if (t == T_BYTE || t == T_SHORT) { // Align the end of destination array at 4 bytes address boundary ! __ lea(end, Address(dst, count, sf, 0)); if (t == T_BYTE) { // One byte misalignment happens only for byte arrays __ testl(end, 1); __ jccb(Assembler::zero, L_skip_align1); __ decrement(count);
*** 1039,1090 **** __ BIND(L_skip_align1); } // Two bytes misalignment happens only for byte and short (char) arrays __ testl(end, 2); __ jccb(Assembler::zero, L_skip_align2); ! __ subl(count, 1<<(shift-1)); __ movw(rdx, Address(from, count, sf, 0)); __ movw(Address(to, count, sf, 0), rdx); __ BIND(L_skip_align2); __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); } if (!VM_Version::supports_mmx()) { __ std(); ! __ movl(rax, count); // Save 'count' ! __ movl(rdx, to); // Save 'to' ! __ leal(rsi, Address(from, count, sf, -4)); ! __ leal(rdi, Address(to , count, sf, -4)); ! __ shrl(count, shift); // bytes count ! __ rep_movl(); __ cld(); ! __ movl(count, rax); // restore 'count' __ andl(count, (1<<shift)-1); // mask the number of rest elements ! __ movl(from, Address(rsp, 12+4)); // reread 'from' ! __ movl(to, rdx); // restore 'to' __ jmpb(L_copy_2_bytes); // all dword were copied } else { // Align to 8 bytes the end of array. It is aligned to 4 bytes already. ! __ testl(end, 4); __ jccb(Assembler::zero, L_copy_8_bytes); __ subl(count, 1<<shift); __ movl(rdx, Address(from, count, sf, 0)); __ movl(Address(to, count, sf, 0), rdx); __ jmpb(L_copy_8_bytes); __ align(16); // Move 8 bytes __ BIND(L_copy_8_bytes_loop); __ movq(mmx0, Address(from, count, sf, 0)); __ movq(Address(to, count, sf, 0), mmx0); __ BIND(L_copy_8_bytes); __ subl(count, 2<<shift); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); __ addl(count, 2<<shift); __ emms(); } __ BIND(L_copy_4_bytes); // copy prefix qword __ testl(count, 1<<shift); __ jccb(Assembler::zero, L_copy_2_bytes); __ movl(rdx, Address(from, count, sf, -4)); --- 1103,1161 ---- __ BIND(L_skip_align1); } // Two bytes misalignment happens only for byte and short (char) arrays __ testl(end, 2); __ jccb(Assembler::zero, L_skip_align2); ! __ subptr(count, 1<<(shift-1)); __ movw(rdx, Address(from, count, sf, 0)); __ movw(Address(to, count, sf, 0), rdx); __ BIND(L_skip_align2); __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); } if (!VM_Version::supports_mmx()) { __ std(); ! __ mov(rax, count); // Save 'count' ! __ mov(rdx, to); // Save 'to' ! __ lea(rsi, Address(from, count, sf, -4)); ! __ lea(rdi, Address(to , count, sf, -4)); ! __ shrptr(count, shift); // bytes count ! __ rep_mov(); __ cld(); ! __ mov(count, rax); // restore 'count' __ andl(count, (1<<shift)-1); // mask the number of rest elements ! __ movptr(from, Address(rsp, 12+4)); // reread 'from' ! __ mov(to, rdx); // restore 'to' __ jmpb(L_copy_2_bytes); // all dword were copied } else { // Align to 8 bytes the end of array. It is aligned to 4 bytes already. ! __ testptr(end, 4); __ jccb(Assembler::zero, L_copy_8_bytes); __ subl(count, 1<<shift); __ movl(rdx, Address(from, count, sf, 0)); __ movl(Address(to, count, sf, 0), rdx); __ jmpb(L_copy_8_bytes); __ align(16); // Move 8 bytes __ BIND(L_copy_8_bytes_loop); + if (UseXMMForArrayCopy) { + __ movq(xmm0, Address(from, count, sf, 0)); + __ movq(Address(to, count, sf, 0), xmm0); + } else { __ movq(mmx0, Address(from, count, sf, 0)); __ movq(Address(to, count, sf, 0), mmx0); + } __ BIND(L_copy_8_bytes); __ subl(count, 2<<shift); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); __ addl(count, 2<<shift); + if (!UseXMMForArrayCopy) { __ emms(); } + } __ BIND(L_copy_4_bytes); // copy prefix qword __ testl(count, 1<<shift); __ jccb(Assembler::zero, L_copy_2_bytes); __ movl(rdx, Address(from, count, sf, -4));
*** 1112,1130 **** } } else { __ BIND(L_copy_2_bytes); } if (t == T_OBJECT) { ! __ movl(count, Address(rsp, 12+12)); // reread count gen_write_ref_array_post_barrier(to, count); __ BIND(L_0_count); } inc_copy_counter_np(t); ! __ popl(rdi); ! __ popl(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorl(rax, rax); // return 0 __ ret(0); return start; } --- 1183,1201 ---- } } else { __ BIND(L_copy_2_bytes); } if (t == T_OBJECT) { ! __ movl2ptr(count, Address(rsp, 12+12)); // reread count gen_write_ref_array_post_barrier(to, count); __ BIND(L_0_count); } inc_copy_counter_np(t); ! __ pop(rdi); ! __ pop(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorptr(rax, rax); // return 0 __ ret(0); return start; }
*** 1138,1171 **** const Register to = rdx; // destination array address const Register count = rcx; // elements count const Register to_from = rdx; // (to - from) __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ movl(from , Address(rsp, 8+0)); // from ! __ movl(to , Address(rsp, 8+4)); // to ! __ movl(count, Address(rsp, 8+8)); // count *entry = __ pc(); // Entry point from conjoint arraycopy stub. BLOCK_COMMENT("Entry:"); ! __ subl(to, from); // to --> to_from if (VM_Version::supports_mmx()) { mmx_copy_forward(from, to_from, count); } else { __ jmpb(L_copy_8_bytes); __ align(16); __ BIND(L_copy_8_bytes_loop); __ fild_d(Address(from, 0)); __ fistp_d(Address(from, to_from, Address::times_1)); ! __ addl(from, 8); __ BIND(L_copy_8_bytes); __ decrement(count); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); } inc_copy_counter_np(T_LONG); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorl(rax, rax); // return 0 __ ret(0); return start; } address generate_conjoint_long_copy(address nooverlap_target, --- 1209,1246 ---- const Register to = rdx; // destination array address const Register count = rcx; // elements count const Register to_from = rdx; // (to - from) __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ movptr(from , Address(rsp, 8+0)); // from ! __ movptr(to , Address(rsp, 8+4)); // to ! __ movl2ptr(count, Address(rsp, 8+8)); // count *entry = __ pc(); // Entry point from conjoint arraycopy stub. BLOCK_COMMENT("Entry:"); ! __ subptr(to, from); // to --> to_from if (VM_Version::supports_mmx()) { + if (UseXMMForArrayCopy) { + xmm_copy_forward(from, to_from, count); + } else { mmx_copy_forward(from, to_from, count); + } } else { __ jmpb(L_copy_8_bytes); __ align(16); __ BIND(L_copy_8_bytes_loop); __ fild_d(Address(from, 0)); __ fistp_d(Address(from, to_from, Address::times_1)); ! __ addptr(from, 8); __ BIND(L_copy_8_bytes); __ decrement(count); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); } inc_copy_counter_np(T_LONG); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorptr(rax, rax); // return 0 __ ret(0); return start; } address generate_conjoint_long_copy(address nooverlap_target,
*** 1179,1225 **** const Register to = rdx; // destination array address const Register count = rcx; // elements count const Register end_from = rax; // source array end address __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ movl(from , Address(rsp, 8+0)); // from ! __ movl(to , Address(rsp, 8+4)); // to ! __ movl(count, Address(rsp, 8+8)); // count *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); // arrays overlap test ! __ cmpl(to, from); RuntimeAddress nooverlap(nooverlap_target); __ jump_cc(Assembler::belowEqual, nooverlap); ! __ leal(end_from, Address(from, count, Address::times_8, 0)); ! __ cmpl(to, end_from); ! __ movl(from, Address(rsp, 8)); // from __ jump_cc(Assembler::aboveEqual, nooverlap); __ jmpb(L_copy_8_bytes); __ align(16); __ BIND(L_copy_8_bytes_loop); if (VM_Version::supports_mmx()) { __ movq(mmx0, Address(from, count, Address::times_8)); __ movq(Address(to, count, Address::times_8), mmx0); } else { __ fild_d(Address(from, count, Address::times_8)); __ fistp_d(Address(to, count, Address::times_8)); } __ BIND(L_copy_8_bytes); __ decrement(count); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); ! if (VM_Version::supports_mmx()) { __ emms(); } inc_copy_counter_np(T_LONG); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorl(rax, rax); // return 0 __ ret(0); return start; } --- 1254,1305 ---- const Register to = rdx; // destination array address const Register count = rcx; // elements count const Register end_from = rax; // source array end address __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ movptr(from , Address(rsp, 8+0)); // from ! __ movptr(to , Address(rsp, 8+4)); // to ! __ movl2ptr(count, Address(rsp, 8+8)); // count *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); // arrays overlap test ! __ cmpptr(to, from); RuntimeAddress nooverlap(nooverlap_target); __ jump_cc(Assembler::belowEqual, nooverlap); ! __ lea(end_from, Address(from, count, Address::times_8, 0)); ! __ cmpptr(to, end_from); ! __ movptr(from, Address(rsp, 8)); // from __ jump_cc(Assembler::aboveEqual, nooverlap); __ jmpb(L_copy_8_bytes); __ align(16); __ BIND(L_copy_8_bytes_loop); if (VM_Version::supports_mmx()) { + if (UseXMMForArrayCopy) { + __ movq(xmm0, Address(from, count, Address::times_8)); + __ movq(Address(to, count, Address::times_8), xmm0); + } else { __ movq(mmx0, Address(from, count, Address::times_8)); __ movq(Address(to, count, Address::times_8), mmx0); + } } else { __ fild_d(Address(from, count, Address::times_8)); __ fistp_d(Address(to, count, Address::times_8)); } __ BIND(L_copy_8_bytes); __ decrement(count); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); ! if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { __ emms(); } inc_copy_counter_np(T_LONG); __ leave(); // required for proper stackwalking of RuntimeStub frame ! __ xorptr(rax, rax); // return 0 __ ret(0); return start; }
*** 1252,1269 **** Klass::secondary_super_cache_offset_in_bytes()); Address secondary_supers_addr(sub_klass, ss_offset); Address super_cache_addr( sub_klass, sc_offset); // if the pointers are equal, we are done (e.g., String[] elements) ! __ cmpl(sub_klass, super_klass_addr); __ jcc(Assembler::equal, L_success); // check the supertype display: ! __ movl(temp, super_check_offset_addr); Address super_check_addr(sub_klass, temp, Address::times_1, 0); ! __ movl(temp, super_check_addr); // load displayed supertype ! __ cmpl(temp, super_klass_addr); // test the super type __ jcc(Assembler::equal, L_success); // if it was a primary super, we can just fail immediately __ cmpl(super_check_offset_addr, sc_offset); __ jcc(Assembler::notEqual, L_failure); --- 1332,1349 ---- Klass::secondary_super_cache_offset_in_bytes()); Address secondary_supers_addr(sub_klass, ss_offset); Address super_cache_addr( sub_klass, sc_offset); // if the pointers are equal, we are done (e.g., String[] elements) ! __ cmpptr(sub_klass, super_klass_addr); __ jcc(Assembler::equal, L_success); // check the supertype display: ! __ movl2ptr(temp, super_check_offset_addr); Address super_check_addr(sub_klass, temp, Address::times_1, 0); ! __ movptr(temp, super_check_addr); // load displayed supertype ! __ cmpptr(temp, super_klass_addr); // test the super type __ jcc(Assembler::equal, L_success); // if it was a primary super, we can just fail immediately __ cmpl(super_check_offset_addr, sc_offset); __ jcc(Assembler::notEqual, L_failure);
*** 1272,1306 **** // This code is rarely used, so simplicity is a virtue here. inc_counter_np(SharedRuntime::_partial_subtype_ctr); { // The repne_scan instruction uses fixed registers, which we must spill. // (We need a couple more temps in any case.) ! __ pushl(rax); ! __ pushl(rcx); ! __ pushl(rdi); assert_different_registers(sub_klass, rax, rcx, rdi); ! __ movl(rdi, secondary_supers_addr); // Load the array length. __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data. ! __ addl(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [edi] for occurance of rax, // Set NZ/Z based on last compare ! __ movl(rax, super_klass_addr); __ repne_scan(); // Unspill the temp. registers: ! __ popl(rdi); ! __ popl(rcx); ! __ popl(rax); } __ jcc(Assembler::notEqual, L_failure); // Success. Cache the super we found and proceed in triumph. ! __ movl(temp, super_klass_addr); // note: rax, is dead ! __ movl(super_cache_addr, temp); if (!fall_through_on_success) __ jmp(L_success); // Fall through on failure! --- 1352,1386 ---- // This code is rarely used, so simplicity is a virtue here. inc_counter_np(SharedRuntime::_partial_subtype_ctr); { // The repne_scan instruction uses fixed registers, which we must spill. // (We need a couple more temps in any case.) ! __ push(rax); ! __ push(rcx); ! __ push(rdi); assert_different_registers(sub_klass, rax, rcx, rdi); ! __ movptr(rdi, secondary_supers_addr); // Load the array length. __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data. ! __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [edi] for occurance of rax, // Set NZ/Z based on last compare ! __ movptr(rax, super_klass_addr); __ repne_scan(); // Unspill the temp. registers: ! __ pop(rdi); ! __ pop(rcx); ! __ pop(rax); } __ jcc(Assembler::notEqual, L_failure); // Success. Cache the super we found and proceed in triumph. ! __ movptr(temp, super_klass_addr); // note: rax, is dead ! __ movptr(super_cache_addr, temp); if (!fall_through_on_success) __ jmp(L_success); // Fall through on failure!
*** 1339,1362 **** const Register elem_klass = rsi; // each elem._klass (sub_klass) const Register temp = rbx; // lone remaining temp __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ pushl(rsi); ! __ pushl(rdi); ! __ pushl(rbx); Address from_arg(rsp, 16+ 4); // from Address to_arg(rsp, 16+ 8); // to Address length_arg(rsp, 16+12); // elements count Address ckoff_arg(rsp, 16+16); // super_check_offset Address ckval_arg(rsp, 16+20); // super_klass // Load up: ! __ movl(from, from_arg); ! __ movl(to, to_arg); ! __ movl(length, length_arg); *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); //--------------------------------------------------------------- --- 1419,1442 ---- const Register elem_klass = rsi; // each elem._klass (sub_klass) const Register temp = rbx; // lone remaining temp __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ push(rsi); ! __ push(rdi); ! __ push(rbx); Address from_arg(rsp, 16+ 4); // from Address to_arg(rsp, 16+ 8); // to Address length_arg(rsp, 16+12); // elements count Address ckoff_arg(rsp, 16+16); // super_check_offset Address ckval_arg(rsp, 16+20); // super_klass // Load up: ! __ movptr(from, from_arg); ! __ movptr(to, to_arg); ! __ movl2ptr(length, length_arg); *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); //---------------------------------------------------------------
*** 1365,1449 **** // destination array type is not equal to or a supertype // of the source type. Each element must be separately // checked. // Loop-invariant addresses. They are exclusive end pointers. ! Address end_from_addr(from, length, Address::times_4, 0); ! Address end_to_addr(to, length, Address::times_4, 0); Register end_from = from; // re-use Register end_to = to; // re-use Register count = length; // re-use // Loop-variant addresses. They assume post-incremented count < 0. ! Address from_element_addr(end_from, count, Address::times_4, 0); ! Address to_element_addr(end_to, count, Address::times_4, 0); Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); // Copy from low to high addresses, indexed from the end of each array. - __ leal(end_from, end_from_addr); - __ leal(end_to, end_to_addr); gen_write_ref_array_pre_barrier(to, count); assert(length == count, ""); // else fix next line: ! __ negl(count); // negate and test the length __ jccb(Assembler::notZero, L_load_element); // Empty array: Nothing to do. ! __ xorl(rax, rax); // return 0 on (trivial) success __ jmp(L_done); // ======== begin loop ======== // (Loop is rotated; its entry is L_load_element.) // Loop control: // for (count = -count; count != 0; count++) // Base pointers src, dst are biased by 8*count,to last element. __ align(16); __ BIND(L_store_element); ! __ movl(to_element_addr, elem); // store the oop __ increment(count); // increment the count toward zero __ jccb(Assembler::zero, L_do_card_marks); // ======== loop entry is here ======== __ BIND(L_load_element); ! __ movl(elem, from_element_addr); // load the oop ! __ testl(elem, elem); __ jccb(Assembler::zero, L_store_element); // (Could do a trick here: Remember last successful non-null // element stored and make a quick oop equality check on it.) ! __ movl(elem_klass, elem_klass_addr); // query the object klass generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, &L_store_element, NULL); // (On fall-through, we have failed the element type check.) // ======== end loop ======== // It was a real error; we must depend on the caller to finish the job. // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. // Emit GC store barriers for the oops we have copied (length_arg + count), // and report their number to the caller. __ addl(count, length_arg); // transfers = (length - remaining) ! __ movl(rax, count); // save the value ! __ notl(rax); // report (-1^K) to caller ! __ movl(to, to_arg); // reload assert_different_registers(to, count, rax); gen_write_ref_array_post_barrier(to, count); __ jmpb(L_done); // Come here on success only. __ BIND(L_do_card_marks); ! __ movl(count, length_arg); ! __ movl(to, to_arg); // reload gen_write_ref_array_post_barrier(to, count); ! __ xorl(rax, rax); // return 0 on success // Common exit point (success or failure). __ BIND(L_done); ! __ popl(rbx); ! __ popl(rdi); ! __ popl(rsi); inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); return start; --- 1445,1529 ---- // destination array type is not equal to or a supertype // of the source type. Each element must be separately // checked. // Loop-invariant addresses. They are exclusive end pointers. ! Address end_from_addr(from, length, Address::times_ptr, 0); ! Address end_to_addr(to, length, Address::times_ptr, 0); Register end_from = from; // re-use Register end_to = to; // re-use Register count = length; // re-use // Loop-variant addresses. They assume post-incremented count < 0. ! Address from_element_addr(end_from, count, Address::times_ptr, 0); ! Address to_element_addr(end_to, count, Address::times_ptr, 0); Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); // Copy from low to high addresses, indexed from the end of each array. gen_write_ref_array_pre_barrier(to, count); + __ lea(end_from, end_from_addr); + __ lea(end_to, end_to_addr); assert(length == count, ""); // else fix next line: ! __ negptr(count); // negate and test the length __ jccb(Assembler::notZero, L_load_element); // Empty array: Nothing to do. ! __ xorptr(rax, rax); // return 0 on (trivial) success __ jmp(L_done); // ======== begin loop ======== // (Loop is rotated; its entry is L_load_element.) // Loop control: // for (count = -count; count != 0; count++) // Base pointers src, dst are biased by 8*count,to last element. __ align(16); __ BIND(L_store_element); ! __ movptr(to_element_addr, elem); // store the oop __ increment(count); // increment the count toward zero __ jccb(Assembler::zero, L_do_card_marks); // ======== loop entry is here ======== __ BIND(L_load_element); ! __ movptr(elem, from_element_addr); // load the oop ! __ testptr(elem, elem); __ jccb(Assembler::zero, L_store_element); // (Could do a trick here: Remember last successful non-null // element stored and make a quick oop equality check on it.) ! __ movptr(elem_klass, elem_klass_addr); // query the object klass generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, &L_store_element, NULL); // (On fall-through, we have failed the element type check.) // ======== end loop ======== // It was a real error; we must depend on the caller to finish the job. // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. // Emit GC store barriers for the oops we have copied (length_arg + count), // and report their number to the caller. __ addl(count, length_arg); // transfers = (length - remaining) ! __ movl2ptr(rax, count); // save the value ! __ notptr(rax); // report (-1^K) to caller ! __ movptr(to, to_arg); // reload assert_different_registers(to, count, rax); gen_write_ref_array_post_barrier(to, count); __ jmpb(L_done); // Come here on success only. __ BIND(L_do_card_marks); ! __ movl2ptr(count, length_arg); ! __ movptr(to, to_arg); // reload gen_write_ref_array_post_barrier(to, count); ! __ xorptr(rax, rax); // return 0 on success // Common exit point (success or failure). __ BIND(L_done); ! __ pop(rbx); ! __ pop(rdi); ! __ pop(rsi); inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); return start;
*** 1481,1508 **** const Register from = rax; // source array address const Register to = rdx; // destination array address const Register count = rcx; // elements count __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ pushl(rsi); ! __ pushl(rdi); Address from_arg(rsp, 12+ 4); // from Address to_arg(rsp, 12+ 8); // to Address count_arg(rsp, 12+12); // byte count // Load up: ! __ movl(from , from_arg); ! __ movl(to , to_arg); ! __ movl(count, count_arg); // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); const Register bits = rsi; ! __ movl(bits, from); ! __ orl(bits, to); ! __ orl(bits, count); __ testl(bits, BytesPerLong-1); __ jccb(Assembler::zero, L_long_aligned); __ testl(bits, BytesPerInt-1); --- 1561,1588 ---- const Register from = rax; // source array address const Register to = rdx; // destination array address const Register count = rcx; // elements count __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ push(rsi); ! __ push(rdi); Address from_arg(rsp, 12+ 4); // from Address to_arg(rsp, 12+ 8); // to Address count_arg(rsp, 12+12); // byte count // Load up: ! __ movptr(from , from_arg); ! __ movptr(to , to_arg); ! __ movl2ptr(count, count_arg); // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); const Register bits = rsi; ! __ mov(bits, from); ! __ orptr(bits, to); ! __ orptr(bits, count); __ testl(bits, BytesPerLong-1); __ jccb(Assembler::zero, L_long_aligned); __ testl(bits, BytesPerInt-1);
*** 1510,1533 **** __ testl(bits, BytesPerShort-1); __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); __ BIND(L_short_aligned); ! __ shrl(count, LogBytesPerShort); // size => short_count __ movl(count_arg, count); // update 'count' __ jump(RuntimeAddress(short_copy_entry)); __ BIND(L_int_aligned); ! __ shrl(count, LogBytesPerInt); // size => int_count __ movl(count_arg, count); // update 'count' __ jump(RuntimeAddress(int_copy_entry)); __ BIND(L_long_aligned); ! __ shrl(count, LogBytesPerLong); // size => qword_count __ movl(count_arg, count); // update 'count' ! __ popl(rdi); // Do pops here since jlong_arraycopy stub does not do it. ! __ popl(rsi); __ jump(RuntimeAddress(long_copy_entry)); return start; } --- 1590,1613 ---- __ testl(bits, BytesPerShort-1); __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); __ BIND(L_short_aligned); ! __ shrptr(count, LogBytesPerShort); // size => short_count __ movl(count_arg, count); // update 'count' __ jump(RuntimeAddress(short_copy_entry)); __ BIND(L_int_aligned); ! __ shrptr(count, LogBytesPerInt); // size => int_count __ movl(count_arg, count); // update 'count' __ jump(RuntimeAddress(int_copy_entry)); __ BIND(L_long_aligned); ! __ shrptr(count, LogBytesPerLong); // size => qword_count __ movl(count_arg, count); // update 'count' ! __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. ! __ pop(rsi); __ jump(RuntimeAddress(long_copy_entry)); return start; }
*** 1596,1607 **** __ align(CodeEntryAlignment); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ pushl(rsi); ! __ pushl(rdi); // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_generic_array_copy_ctr); // Input values --- 1676,1687 ---- __ align(CodeEntryAlignment); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame ! __ push(rsi); ! __ push(rdi); // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_generic_array_copy_ctr); // Input values
*** 1630,1679 **** const Register dst = rdx; // destination array oop const Register dst_pos = rdi; const Register length = rcx; // transfer count // if (src == NULL) return -1; ! __ movl(src, SRC); // src oop ! __ testl(src, src); __ jccb(Assembler::zero, L_failed_0); // if (src_pos < 0) return -1; ! __ movl(src_pos, SRC_POS); // src_pos __ testl(src_pos, src_pos); __ jccb(Assembler::negative, L_failed_0); // if (dst == NULL) return -1; ! __ movl(dst, DST); // dst oop ! __ testl(dst, dst); __ jccb(Assembler::zero, L_failed_0); // if (dst_pos < 0) return -1; ! __ movl(dst_pos, DST_POS); // dst_pos __ testl(dst_pos, dst_pos); __ jccb(Assembler::negative, L_failed_0); // if (length < 0) return -1; ! __ movl(length, LENGTH); // length __ testl(length, length); __ jccb(Assembler::negative, L_failed_0); // if (src->klass() == NULL) return -1; Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); const Register rcx_src_klass = rcx; // array klass ! __ movl(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); #ifdef ASSERT // assert(src->klass() != NULL); BLOCK_COMMENT("assert klasses not null"); { Label L1, L2; ! __ testl(rcx_src_klass, rcx_src_klass); __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL __ bind(L1); __ stop("broken null klass"); __ bind(L2); ! __ cmpl(dst_klass_addr, 0); __ jccb(Assembler::equal, L1); // this would be broken also BLOCK_COMMENT("assert done"); } #endif //ASSERT --- 1710,1759 ---- const Register dst = rdx; // destination array oop const Register dst_pos = rdi; const Register length = rcx; // transfer count // if (src == NULL) return -1; ! __ movptr(src, SRC); // src oop ! __ testptr(src, src); __ jccb(Assembler::zero, L_failed_0); // if (src_pos < 0) return -1; ! __ movl2ptr(src_pos, SRC_POS); // src_pos __ testl(src_pos, src_pos); __ jccb(Assembler::negative, L_failed_0); // if (dst == NULL) return -1; ! __ movptr(dst, DST); // dst oop ! __ testptr(dst, dst); __ jccb(Assembler::zero, L_failed_0); // if (dst_pos < 0) return -1; ! __ movl2ptr(dst_pos, DST_POS); // dst_pos __ testl(dst_pos, dst_pos); __ jccb(Assembler::negative, L_failed_0); // if (length < 0) return -1; ! __ movl2ptr(length, LENGTH); // length __ testl(length, length); __ jccb(Assembler::negative, L_failed_0); // if (src->klass() == NULL) return -1; Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); const Register rcx_src_klass = rcx; // array klass ! __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); #ifdef ASSERT // assert(src->klass() != NULL); BLOCK_COMMENT("assert klasses not null"); { Label L1, L2; ! __ testptr(rcx_src_klass, rcx_src_klass); __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL __ bind(L1); __ stop("broken null klass"); __ bind(L2); ! __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); __ jccb(Assembler::equal, L1); // this would be broken also BLOCK_COMMENT("assert done"); } #endif //ASSERT
*** 1693,1703 **** jint objArray_lh = Klass::array_layout_helper(T_OBJECT); __ cmpl(src_klass_lh_addr, objArray_lh); __ jcc(Assembler::equal, L_objArray); // if (src->klass() != dst->klass()) return -1; ! __ cmpl(rcx_src_klass, dst_klass_addr); __ jccb(Assembler::notEqual, L_failed_0); const Register rcx_lh = rcx; // layout helper assert(rcx_lh == rcx_src_klass, "known alias"); __ movl(rcx_lh, src_klass_lh_addr); --- 1773,1783 ---- jint objArray_lh = Klass::array_layout_helper(T_OBJECT); __ cmpl(src_klass_lh_addr, objArray_lh); __ jcc(Assembler::equal, L_objArray); // if (src->klass() != dst->klass()) return -1; ! __ cmpptr(rcx_src_klass, dst_klass_addr); __ jccb(Assembler::notEqual, L_failed_0); const Register rcx_lh = rcx; // layout helper assert(rcx_lh == rcx_src_klass, "known alias"); __ movl(rcx_lh, src_klass_lh_addr);
*** 1727,1742 **** const Register rsi_offset = rsi; // array offset const Register src_array = src; // src array offset const Register dst_array = dst; // dst array offset const Register rdi_elsize = rdi; // log2 element size ! __ movl(rsi_offset, rcx_lh); ! __ shrl(rsi_offset, Klass::_lh_header_size_shift); ! __ andl(rsi_offset, Klass::_lh_header_size_mask); // array_offset ! __ addl(src_array, rsi_offset); // src array offset ! __ addl(dst_array, rsi_offset); // dst array offset ! __ andl(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize // next registers should be set before the jump to corresponding stub const Register from = src; // source array address const Register to = dst; // destination array address const Register count = rcx; // elements count --- 1807,1822 ---- const Register rsi_offset = rsi; // array offset const Register src_array = src; // src array offset const Register dst_array = dst; // dst array offset const Register rdi_elsize = rdi; // log2 element size ! __ mov(rsi_offset, rcx_lh); ! __ shrptr(rsi_offset, Klass::_lh_header_size_shift); ! __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset ! __ addptr(src_array, rsi_offset); // src array offset ! __ addptr(dst_array, rsi_offset); // dst array offset ! __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize // next registers should be set before the jump to corresponding stub const Register from = src; // source array address const Register to = dst; // destination array address const Register count = rcx; // elements count
*** 1744,1764 **** #define FROM Address(rsp, 12+ 4) #define TO Address(rsp, 12+ 8) // Not used now #define COUNT Address(rsp, 12+12) // Only for oop arraycopy BLOCK_COMMENT("scale indexes to element size"); ! __ movl(rsi, SRC_POS); // src_pos ! __ shll(rsi); // src_pos << rcx (log2 elsize) assert(src_array == from, ""); ! __ addl(from, rsi); // from = src_array + SRC_POS << log2 elsize ! __ movl(rdi, DST_POS); // dst_pos ! __ shll(rdi); // dst_pos << rcx (log2 elsize) assert(dst_array == to, ""); ! __ addl(to, rdi); // to = dst_array + DST_POS << log2 elsize ! __ movl(FROM, from); // src_addr ! __ movl(rdi_elsize, rcx_lh); // log2 elsize ! __ movl(count, LENGTH); // elements count BLOCK_COMMENT("choose copy loop based on element size"); __ cmpl(rdi_elsize, 0); __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); --- 1824,1844 ---- #define FROM Address(rsp, 12+ 4) #define TO Address(rsp, 12+ 8) // Not used now #define COUNT Address(rsp, 12+12) // Only for oop arraycopy BLOCK_COMMENT("scale indexes to element size"); ! __ movl2ptr(rsi, SRC_POS); // src_pos ! __ shlptr(rsi); // src_pos << rcx (log2 elsize) assert(src_array == from, ""); ! __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize ! __ movl2ptr(rdi, DST_POS); // dst_pos ! __ shlptr(rdi); // dst_pos << rcx (log2 elsize) assert(dst_array == to, ""); ! __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize ! __ movptr(FROM, from); // src_addr ! __ mov(rdi_elsize, rcx_lh); // log2 elsize ! __ movl2ptr(count, LENGTH); // elements count BLOCK_COMMENT("choose copy loop based on element size"); __ cmpl(rdi_elsize, 0); __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy));
*** 1768,1812 **** __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); #ifdef ASSERT __ cmpl(rdi_elsize, LogBytesPerLong); __ jccb(Assembler::notEqual, L_failed); #endif ! __ popl(rdi); // Do pops here since jlong_arraycopy stub does not do it. ! __ popl(rsi); __ jump(RuntimeAddress(entry_jlong_arraycopy)); __ BIND(L_failed); ! __ xorl(rax, rax); ! __ notl(rax); // return -1 ! __ popl(rdi); ! __ popl(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); // objArrayKlass __ BIND(L_objArray); // live at this point: rcx_src_klass, src[_pos], dst[_pos] Label L_plain_copy, L_checkcast_copy; // test array classes for subtyping ! __ cmpl(rcx_src_klass, dst_klass_addr); // usual case is exact equality __ jccb(Assembler::notEqual, L_checkcast_copy); // Identically typed arrays can be copied without element-wise checks. assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); __ BIND(L_plain_copy); ! __ movl(count, LENGTH); // elements count ! __ movl(src_pos, SRC_POS); // reload src_pos ! __ leal(from, Address(src, src_pos, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr ! __ movl(dst_pos, DST_POS); // reload dst_pos ! __ leal(to, Address(dst, dst_pos, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr ! __ movl(FROM, from); // src_addr ! __ movl(TO, to); // dst_addr __ movl(COUNT, count); // count __ jump(RuntimeAddress(entry_oop_arraycopy)); __ BIND(L_checkcast_copy); // live at this point: rcx_src_klass, dst[_pos], src[_pos] --- 1848,1892 ---- __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); #ifdef ASSERT __ cmpl(rdi_elsize, LogBytesPerLong); __ jccb(Assembler::notEqual, L_failed); #endif ! __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. ! __ pop(rsi); __ jump(RuntimeAddress(entry_jlong_arraycopy)); __ BIND(L_failed); ! __ xorptr(rax, rax); ! __ notptr(rax); // return -1 ! __ pop(rdi); ! __ pop(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); // objArrayKlass __ BIND(L_objArray); // live at this point: rcx_src_klass, src[_pos], dst[_pos] Label L_plain_copy, L_checkcast_copy; // test array classes for subtyping ! __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality __ jccb(Assembler::notEqual, L_checkcast_copy); // Identically typed arrays can be copied without element-wise checks. assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); __ BIND(L_plain_copy); ! __ movl2ptr(count, LENGTH); // elements count ! __ movl2ptr(src_pos, SRC_POS); // reload src_pos ! __ lea(from, Address(src, src_pos, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr ! __ movl2ptr(dst_pos, DST_POS); // reload dst_pos ! __ lea(to, Address(dst, dst_pos, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr ! __ movptr(FROM, from); // src_addr ! __ movptr(TO, to); // dst_addr __ movl(COUNT, count); // count __ jump(RuntimeAddress(entry_oop_arraycopy)); __ BIND(L_checkcast_copy); // live at this point: rcx_src_klass, dst[_pos], src[_pos]
*** 1822,1862 **** assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); // Before looking at dst.length, make sure dst is also an objArray. ! __ movl(rsi_dst_klass, dst_klass_addr); __ cmpl(dst_klass_lh_addr, objArray_lh); __ jccb(Assembler::notEqual, L_failed); // It is safe to examine both src.length and dst.length. ! __ movl(src_pos, SRC_POS); // reload rsi arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); // (Now src_pos and dst_pos are killed, but not src and dst.) // We'll need this temp (don't forget to pop it after the type check). ! __ pushl(rbx); Register rbx_src_klass = rbx; ! __ movl(rbx_src_klass, rcx_src_klass); // spill away from rcx ! __ movl(rsi_dst_klass, dst_klass_addr); Address super_check_offset_addr(rsi_dst_klass, sco_offset); Label L_fail_array_check; generate_type_check(rbx_src_klass, super_check_offset_addr, dst_klass_addr, rdi_temp, NULL, &L_fail_array_check); // (On fall-through, we have passed the array type check.) ! __ popl(rbx); __ jmp(L_plain_copy); __ BIND(L_fail_array_check); // Reshuffle arguments so we can call checkcast_arraycopy: // match initial saves for checkcast_arraycopy ! // pushl(rsi); // already done; see above ! // pushl(rdi); // already done; see above ! // pushl(rbx); // already done; see above // Marshal outgoing arguments now, freeing registers. Address from_arg(rsp, 16+ 4); // from Address to_arg(rsp, 16+ 8); // to Address length_arg(rsp, 16+12); // elements count --- 1902,1942 ---- assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); // Before looking at dst.length, make sure dst is also an objArray. ! __ movptr(rsi_dst_klass, dst_klass_addr); __ cmpl(dst_klass_lh_addr, objArray_lh); __ jccb(Assembler::notEqual, L_failed); // It is safe to examine both src.length and dst.length. ! __ movl2ptr(src_pos, SRC_POS); // reload rsi arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); // (Now src_pos and dst_pos are killed, but not src and dst.) // We'll need this temp (don't forget to pop it after the type check). ! __ push(rbx); Register rbx_src_klass = rbx; ! __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx ! __ movptr(rsi_dst_klass, dst_klass_addr); Address super_check_offset_addr(rsi_dst_klass, sco_offset); Label L_fail_array_check; generate_type_check(rbx_src_klass, super_check_offset_addr, dst_klass_addr, rdi_temp, NULL, &L_fail_array_check); // (On fall-through, we have passed the array type check.) ! __ pop(rbx); __ jmp(L_plain_copy); __ BIND(L_fail_array_check); // Reshuffle arguments so we can call checkcast_arraycopy: // match initial saves for checkcast_arraycopy ! // push(rsi); // already done; see above ! // push(rdi); // already done; see above ! // push(rbx); // already done; see above // Marshal outgoing arguments now, freeing registers. Address from_arg(rsp, 16+ 4); // from Address to_arg(rsp, 16+ 8); // to Address length_arg(rsp, 16+12); // elements count
*** 1867,1894 **** Address DST_POS_arg(rsp, 16+16); Address LENGTH_arg(rsp, 16+20); // push rbx, changed the incoming offsets (why not just use rbp,??) // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); ! __ movl(rbx, Address(rsi_dst_klass, ek_offset)); ! __ movl(length, LENGTH_arg); // reload elements count ! __ movl(src_pos, SRC_POS_arg); // reload src_pos ! __ movl(dst_pos, DST_POS_arg); // reload dst_pos ! __ movl(ckval_arg, rbx); // destination element type __ movl(rbx, Address(rbx, sco_offset)); __ movl(ckoff_arg, rbx); // corresponding class check offset __ movl(length_arg, length); // outgoing length argument ! __ leal(from, Address(src, src_pos, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); ! __ movl(from_arg, from); ! __ leal(to, Address(dst, dst_pos, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); ! __ movl(to_arg, to); __ jump(RuntimeAddress(entry_checkcast_arraycopy)); } return start; } --- 1947,1974 ---- Address DST_POS_arg(rsp, 16+16); Address LENGTH_arg(rsp, 16+20); // push rbx, changed the incoming offsets (why not just use rbp,??) // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); ! __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); ! __ movl2ptr(length, LENGTH_arg); // reload elements count ! __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos ! __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos ! __ movptr(ckval_arg, rbx); // destination element type __ movl(rbx, Address(rbx, sco_offset)); __ movl(ckoff_arg, rbx); // corresponding class check offset __ movl(length_arg, length); // outgoing length argument ! __ lea(from, Address(src, src_pos, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); ! __ movptr(from_arg, from); ! __ lea(to, Address(dst, dst_pos, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); ! __ movptr(to_arg, to); __ jump(RuntimeAddress(entry_checkcast_arraycopy)); } return start; }
*** 1935,1948 **** StubRoutines::_jint_arraycopy = generate_conjoint_copy(T_INT, true, Address::times_4, entry, &entry_jint_arraycopy, "jint_arraycopy"); StubRoutines::_oop_disjoint_arraycopy = ! generate_disjoint_copy(T_OBJECT, true, Address::times_4, &entry, "oop_disjoint_arraycopy"); StubRoutines::_oop_arraycopy = ! generate_conjoint_copy(T_OBJECT, true, Address::times_4, entry, &entry_oop_arraycopy, "oop_arraycopy"); StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); StubRoutines::_jlong_arraycopy = --- 2015,2028 ---- StubRoutines::_jint_arraycopy = generate_conjoint_copy(T_INT, true, Address::times_4, entry, &entry_jint_arraycopy, "jint_arraycopy"); StubRoutines::_oop_disjoint_arraycopy = ! generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, "oop_disjoint_arraycopy"); StubRoutines::_oop_arraycopy = ! generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, &entry_oop_arraycopy, "oop_arraycopy"); StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); StubRoutines::_jlong_arraycopy =
*** 2038,2062 **** // thread-local storage and also sets up last_Java_sp slightly // differently than the real call_VM Register java_thread = rbx; __ get_thread(java_thread); if (restore_saved_exception_pc) { ! __ movl(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset()))); ! __ pushl(rax); } __ enter(); // required for proper stackwalking of RuntimeStub frame // pc and rbp, already pushed ! __ subl(rsp, (framesize-2) * wordSize); // prolog // Frame is now completed as far as size and linkage. int frame_complete = __ pc() - start; // push java thread (becomes first argument of C function) ! __ movl(Address(rsp, thread_off * wordSize), java_thread); // Set up last_Java_sp and last_Java_fp __ set_last_Java_frame(java_thread, rsp, rbp, NULL); // Call runtime --- 2118,2142 ---- // thread-local storage and also sets up last_Java_sp slightly // differently than the real call_VM Register java_thread = rbx; __ get_thread(java_thread); if (restore_saved_exception_pc) { ! __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset()))); ! __ push(rax); } __ enter(); // required for proper stackwalking of RuntimeStub frame // pc and rbp, already pushed ! __ subptr(rsp, (framesize-2) * wordSize); // prolog // Frame is now completed as far as size and linkage. int frame_complete = __ pc() - start; // push java thread (becomes first argument of C function) ! __ movptr(Address(rsp, thread_off * wordSize), java_thread); // Set up last_Java_sp and last_Java_fp __ set_last_Java_frame(java_thread, rsp, rbp, NULL); // Call runtime
*** 2076,2086 **** __ leave(); // required for proper stackwalking of RuntimeStub frame // check for pending exceptions #ifdef ASSERT Label L; ! __ cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); __ jcc(Assembler::notEqual, L); __ should_not_reach_here(); __ bind(L); #endif /* ASSERT */ __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); --- 2156,2166 ---- __ leave(); // required for proper stackwalking of RuntimeStub frame // check for pending exceptions #ifdef ASSERT Label L; ! __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, L); __ should_not_reach_here(); __ bind(L); #endif /* ASSERT */ __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
*** 2138,2149 **** generate_handler_for_unsafe_access(); // platform dependent create_control_words(); ! StubRoutines::i486::_verify_mxcsr_entry = generate_verify_mxcsr(); ! StubRoutines::i486::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); } --- 2218,2229 ---- generate_handler_for_unsafe_access(); // platform dependent create_control_words(); ! StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); ! StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); }