9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/cardTableModRefBS.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "nativeInst_x86.hpp"
39 #include "oops/objArrayKlass.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "vmreg_x86.inline.hpp"
42
43
44 // These masks are used to provide 128-bit aligned bitmasks to the XMM
45 // instructions, to allow sign-masking or sign-bit flipping. They allow
46 // fast versions of NegF/NegD and AbsF/AbsD.
47
48 // Note: 'double' and 'long long' have 32-bits alignment on x86.
496 if (handler_base == NULL) {
497 // not enough space left for the handler
498 bailout("deopt handler overflow");
499 return -1;
500 }
501
502 int offset = code_offset();
503 InternalAddress here(__ pc());
504
505 __ pushptr(here.addr());
506 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
507 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
508 __ end_a_stub();
509
510 return offset;
511 }
512
513
514 // This is the fast version of java.lang.String.compare; it has not
515 // OSR-entry and therefore, we generate a slow version for OSR's
516 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
517 __ movptr (rbx, rcx); // receiver is in rcx
518 __ movptr (rax, arg1->as_register());
519
520 // Get addresses of first characters from both Strings
521 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
522 if (java_lang_String::has_offset_field()) {
523 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
524 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
525 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
526 } else {
527 __ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));
528 __ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
529 }
530
531 // rbx, may be NULL
532 add_debug_info_for_null_check_here(info);
533 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
534 if (java_lang_String::has_offset_field()) {
535 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
536 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
566 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
567 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
568 __ negptr(rax);
569
570 // compare the strings in a loop
571
572 Label loop;
573 __ align(wordSize);
574 __ bind(loop);
575 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));
576 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));
577 __ subl(rcx, rbx);
578 __ jcc(Assembler::notZero, haveResult);
579 __ increment(rax);
580 __ jcc(Assembler::notZero, loop);
581
582 // strings are equal up to min length
583
584 __ bind(noLoop);
585 __ pop(rax);
586 return_op(LIR_OprFact::illegalOpr);
587
588 __ bind(haveResult);
589 // leave instruction is going to discard the TOS value
590 __ mov (rax, rcx); // result of call is in rax,
591 }
592
593
594 void LIR_Assembler::return_op(LIR_Opr result) {
595 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
596 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
597 assert(result->fpu() == 0, "result must already be on TOS");
598 }
599
600 // Pop the stack before the safepoint code
601 __ remove_frame(initial_frame_size_in_bytes());
602
603 bool result_is_oop = result->is_valid() ? result->is_oop() : false;
604
605 // Note: we do not need to round double result; float result has the right precision
606 // the poll sets the condition code, but no data registers
607 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
608
609 if (Assembler::is_polling_page_far()) {
610 __ lea(rscratch1, polling_page);
611 __ relocate(relocInfo::poll_return_type);
612 __ testl(rax, Address(rscratch1, 0));
613 } else {
614 __ testl(rax, polling_page);
615 }
616 __ ret(0);
617 }
618
619
620 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
621 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
622 guarantee(info != NULL, "Shouldn't be NULL");
623 int offset = __ offset();
624 if (Assembler::is_polling_page_far()) {
625 __ lea(rscratch1, polling_page);
626 offset = __ offset();
627 add_debug_info_for_branch(info);
628 __ relocate(relocInfo::poll_type);
629 __ testl(rax, Address(rscratch1, 0));
630 } else {
631 add_debug_info_for_branch(info);
632 __ testl(rax, polling_page);
633 }
634 return offset;
635 }
636
637
638 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
639 if (from_reg != to_reg) __ mov(to_reg, from_reg);
640 }
641
642 void LIR_Assembler::swap_reg(Register a, Register b) {
643 __ xchgptr(a, b);
644 }
645
646
647 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
648 assert(src->is_constant(), "should not call otherwise");
649 assert(dest->is_register(), "should not call otherwise");
650 LIR_Const* c = src->as_constant_ptr();
651
652 switch (c->type()) {
653 case T_INT: {
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/cardTableModRefBS.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "vmreg_x86.inline.hpp"
43
44
45 // These masks are used to provide 128-bit aligned bitmasks to the XMM
46 // instructions, to allow sign-masking or sign-bit flipping. They allow
47 // fast versions of NegF/NegD and AbsF/AbsD.
48
49 // Note: 'double' and 'long long' have 32-bits alignment on x86.
497 if (handler_base == NULL) {
498 // not enough space left for the handler
499 bailout("deopt handler overflow");
500 return -1;
501 }
502
503 int offset = code_offset();
504 InternalAddress here(__ pc());
505
506 __ pushptr(here.addr());
507 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
508 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
509 __ end_a_stub();
510
511 return offset;
512 }
513
514
515 // This is the fast version of java.lang.String.compare; it has not
516 // OSR-entry and therefore, we generate a slow version for OSR's
517 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, C1ThreadLocalSafepoint *tls_stub, CodeEmitInfo* info) {
518 __ movptr (rbx, rcx); // receiver is in rcx
519 __ movptr (rax, arg1->as_register());
520
521 // Get addresses of first characters from both Strings
522 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
523 if (java_lang_String::has_offset_field()) {
524 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
525 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
526 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
527 } else {
528 __ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));
529 __ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
530 }
531
532 // rbx, may be NULL
533 add_debug_info_for_null_check_here(info);
534 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
535 if (java_lang_String::has_offset_field()) {
536 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
537 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
567 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
568 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
569 __ negptr(rax);
570
571 // compare the strings in a loop
572
573 Label loop;
574 __ align(wordSize);
575 __ bind(loop);
576 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));
577 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));
578 __ subl(rcx, rbx);
579 __ jcc(Assembler::notZero, haveResult);
580 __ increment(rax);
581 __ jcc(Assembler::notZero, loop);
582
583 // strings are equal up to min length
584
585 __ bind(noLoop);
586 __ pop(rax);
587 return_op(LIR_OprFact::illegalOpr, tls_stub);
588
589 __ bind(haveResult);
590 // leave instruction is going to discard the TOS value
591 __ mov (rax, rcx); // result of call is in rax,
592 }
593
594
595 void LIR_Assembler::return_op(LIR_Opr result, C1ThreadLocalSafepoint *code_stub) {
596 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
597 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
598 assert(result->fpu() == 0, "result must already be on TOS");
599 }
600
601 // Pop the stack before the safepoint code
602 __ remove_frame(initial_frame_size_in_bytes());
603
604 bool result_is_oop = result->is_valid() ? result->is_oop() : false;
605
606 // Note: we do not need to round double result; float result has the right precision
607 // the poll sets the condition code, but no data registers
608
609 if (!ThreadLocalSafepoints) {
610 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
611
612 if (Assembler::is_polling_page_far()) {
613 __ lea(rscratch1, polling_page);
614 __ relocate(relocInfo::poll_return_type);
615 __ testl(rax, Address(rscratch1, 0));
616 } else {
617 __ testl(rax, polling_page);
618 }
619 } else {
620 #ifdef _LP64
621 code_stub->set_safepoint_pc(__ pc());
622 __ relocate(relocInfo::poll_return_type);
623 __ testb(Address(r15_thread, Thread::yieldpoint_offset()), 2);
624 __ jcc(Assembler::equal, *code_stub->entry());
625 #else
626 ShouldNotReachHere();
627 #endif
628 }
629 __ ret(0);
630 }
631
632
633 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, C1ThreadLocalSafepoint *code_stub, CodeEmitInfo* info) {
634 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
635 guarantee(info != NULL, "Shouldn't be NULL");
636 int offset = __ offset();
637 if (!ThreadLocalSafepoints) {
638 if (Assembler::is_polling_page_far()) {
639 __ lea(rscratch1, polling_page);
640 offset = __ offset();
641 add_debug_info_for_branch(info);
642 __ relocate(relocInfo::poll_type);
643 __ testl(rax, Address(rscratch1, 0));
644 } else {
645 add_debug_info_for_branch(info);
646 __ testl(rax, polling_page);
647 }
648 } else {
649 #ifdef _LP64
650 add_debug_info_for_branch(info);
651 code_stub->set_safepoint_pc(__ pc());
652 __ relocate(relocInfo::poll_type);
653 __ testb(Address(r15_thread, Thread::yieldpoint_offset()), 1);
654 __ jcc(Assembler::equal, *code_stub->entry());
655 #else
656 ShouldNotReachHere();
657 #endif
658 }
659
660 return offset;
661 }
662
663
664 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
665 if (from_reg != to_reg) __ mov(to_reg, from_reg);
666 }
667
668 void LIR_Assembler::swap_reg(Register a, Register b) {
669 __ xchgptr(a, b);
670 }
671
672
673 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
674 assert(src->is_constant(), "should not call otherwise");
675 assert(dest->is_register(), "should not call otherwise");
676 LIR_Const* c = src->as_constant_ptr();
677
678 switch (c->type()) {
679 case T_INT: {
|