1 /*
2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
535 // make sure exception is set
536 {
537 Label L;
538 __ testptr(rax, rax);
539 __ jcc(Assembler::notEqual, L);
540 __ stop("StubRoutines::forward exception: no pending exception (2)");
541 __ bind(L);
542 }
543 #endif
544
545 // continue at exception handler (return address removed)
546 // rax: exception
547 // rbx: exception handler
548 // rdx: throwing pc
549 __ verify_oop(rax);
550 __ jmp(rbx);
551
552 return start;
553 }
554
555 // Implementation of jint atomic_xchg(jint add_value, volatile jint* dest)
556 // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
557 //
558 // Arguments :
559 // c_rarg0: exchange_value
560 // c_rarg0: dest
561 //
562 // Result:
563 // *dest <- ex, return (orig *dest)
564 address generate_atomic_xchg() {
565 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
566 address start = __ pc();
567
568 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
569 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
570 __ ret(0);
571
572 return start;
573 }
574
575 // Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest)
576 // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
577 //
578 // Arguments :
579 // c_rarg0: exchange_value
580 // c_rarg1: dest
581 //
582 // Result:
583 // *dest <- ex, return (orig *dest)
584 address generate_atomic_xchg_long() {
585 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long");
586 address start = __ pc();
587
588 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
589 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
590 __ ret(0);
591
592 return start;
593 }
594
595 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
596 // jint compare_value)
597 //
598 // Arguments :
599 // c_rarg0: exchange_value
600 // c_rarg1: dest
601 // c_rarg2: compare_value
602 //
603 // Result:
604 // if ( compare_value == *dest ) {
605 // *dest = exchange_value
606 // return compare_value;
607 // else
608 // return *dest;
609 address generate_atomic_cmpxchg() {
610 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
611 address start = __ pc();
612
613 __ movl(rax, c_rarg2);
614 __ lock();
615 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
616 __ ret(0);
617
618 return start;
619 }
620
621 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
622 // int8_t compare_value)
623 //
624 // Arguments :
625 // c_rarg0: exchange_value
626 // c_rarg1: dest
627 // c_rarg2: compare_value
628 //
629 // Result:
630 // if ( compare_value == *dest ) {
631 // *dest = exchange_value
632 // return compare_value;
633 // else
634 // return *dest;
635 address generate_atomic_cmpxchg_byte() {
636 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
637 address start = __ pc();
638
639 __ movsbq(rax, c_rarg2);
640 __ lock();
641 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
642 __ ret(0);
643
644 return start;
645 }
646
647 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
648 // volatile int64_t* dest,
649 // int64_t compare_value)
650 // Arguments :
651 // c_rarg0: exchange_value
652 // c_rarg1: dest
653 // c_rarg2: compare_value
654 //
655 // Result:
656 // if ( compare_value == *dest ) {
657 // *dest = exchange_value
658 // return compare_value;
659 // else
660 // return *dest;
661 address generate_atomic_cmpxchg_long() {
662 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
663 address start = __ pc();
664
665 __ movq(rax, c_rarg2);
666 __ lock();
667 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
668 __ ret(0);
669
670 return start;
671 }
672
673 // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
674 // used by Atomic::add(volatile jint* dest, jint add_value)
675 //
676 // Arguments :
677 // c_rarg0: add_value
678 // c_rarg1: dest
679 //
680 // Result:
681 // *dest += add_value
682 // return *dest;
683 address generate_atomic_add() {
684 StubCodeMark mark(this, "StubRoutines", "atomic_add");
685 address start = __ pc();
686
687 __ movl(rax, c_rarg0);
688 __ lock();
689 __ xaddl(Address(c_rarg1, 0), c_rarg0);
690 __ addl(rax, c_rarg0);
691 __ ret(0);
692
693 return start;
694 }
695
696 // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest)
697 // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value)
698 //
699 // Arguments :
700 // c_rarg0: add_value
701 // c_rarg1: dest
702 //
703 // Result:
704 // *dest += add_value
705 // return *dest;
706 address generate_atomic_add_long() {
707 StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
708 address start = __ pc();
709
710 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
711 __ lock();
712 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
713 __ addptr(rax, c_rarg0);
714 __ ret(0);
715
716 return start;
717 }
718
719 // Support for intptr_t OrderAccess::fence()
720 //
721 // Arguments :
722 //
723 // Result:
724 address generate_orderaccess_fence() {
725 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
726 address start = __ pc();
727 __ membar(Assembler::StoreLoad);
728 __ ret(0);
729
730 return start;
731 }
732
733 // Support for intptr_t get_previous_fp()
734 //
735 // This routine is used to find the previous frame pointer for the
736 // caller (current_frame_guess). This is used as part of debugging
737 // ps() is seemingly lost trying to find frames.
738 // This code assumes that caller current_frame_guess) has a frame.
6315 // Generates all stubs and initializes the entry points
6316
6317 // This platform-specific settings are needed by generate_call_stub()
6318 create_control_words();
6319
6320 // entry points that exist in all platforms Note: This is code
6321 // that could be shared among different platforms - however the
6322 // benefit seems to be smaller than the disadvantage of having a
6323 // much more complicated generator structure. See also comment in
6324 // stubRoutines.hpp.
6325
6326 StubRoutines::_forward_exception_entry = generate_forward_exception();
6327
6328 StubRoutines::_call_stub_entry =
6329 generate_call_stub(StubRoutines::_call_stub_return_address);
6330
6331 // is referenced by megamorphic call
6332 StubRoutines::_catch_exception_entry = generate_catch_exception();
6333
6334 // atomic calls
6335 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
6336 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
6337 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
6338 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
6339 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
6340 StubRoutines::_atomic_add_entry = generate_atomic_add();
6341 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long();
6342 StubRoutines::_fence_entry = generate_orderaccess_fence();
6343
6344 // platform dependent
6345 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
6346 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
6347
6348 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
6349
6350 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
6351 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
6352 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
6353 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
6354
6355 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
6356 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
6357 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
6358 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
6359
6360 // Build this early so it's available for the interpreter.
6361 StubRoutines::_throw_StackOverflowError_entry =
|
1 /*
2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
535 // make sure exception is set
536 {
537 Label L;
538 __ testptr(rax, rax);
539 __ jcc(Assembler::notEqual, L);
540 __ stop("StubRoutines::forward exception: no pending exception (2)");
541 __ bind(L);
542 }
543 #endif
544
545 // continue at exception handler (return address removed)
546 // rax: exception
547 // rbx: exception handler
548 // rdx: throwing pc
549 __ verify_oop(rax);
550 __ jmp(rbx);
551
552 return start;
553 }
554
555 // Support for intptr_t OrderAccess::fence()
556 //
557 // Arguments :
558 //
559 // Result:
560 address generate_orderaccess_fence() {
561 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
562 address start = __ pc();
563 __ membar(Assembler::StoreLoad);
564 __ ret(0);
565
566 return start;
567 }
568
569 // Support for intptr_t get_previous_fp()
570 //
571 // This routine is used to find the previous frame pointer for the
572 // caller (current_frame_guess). This is used as part of debugging
573 // ps() is seemingly lost trying to find frames.
574 // This code assumes that caller current_frame_guess) has a frame.
6151 // Generates all stubs and initializes the entry points
6152
6153 // This platform-specific settings are needed by generate_call_stub()
6154 create_control_words();
6155
6156 // entry points that exist in all platforms Note: This is code
6157 // that could be shared among different platforms - however the
6158 // benefit seems to be smaller than the disadvantage of having a
6159 // much more complicated generator structure. See also comment in
6160 // stubRoutines.hpp.
6161
6162 StubRoutines::_forward_exception_entry = generate_forward_exception();
6163
6164 StubRoutines::_call_stub_entry =
6165 generate_call_stub(StubRoutines::_call_stub_return_address);
6166
6167 // is referenced by megamorphic call
6168 StubRoutines::_catch_exception_entry = generate_catch_exception();
6169
6170 // atomic calls
6171 StubRoutines::_fence_entry = generate_orderaccess_fence();
6172
6173 // platform dependent
6174 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
6175 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
6176
6177 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
6178
6179 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
6180 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
6181 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
6182 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
6183
6184 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
6185 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
6186 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
6187 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
6188
6189 // Build this early so it's available for the interpreter.
6190 StubRoutines::_throw_StackOverflowError_entry =
|