625 LIR_Opr reg = rlock_result(x);
626 if (x->x()->type()->is_float_kind()) {
627 Bytecodes::Code code = x->op();
628 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
629 } else if (x->x()->type()->tag() == longTag) {
630 __ lcmp2int(left.result(), right.result(), reg);
631 } else {
632 Unimplemented();
633 }
634 }
635
636
637 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
638 LIR_Opr result = new_register(T_INT);
639 LIR_Opr t1 = LIR_OprFact::illegalOpr;
640 LIR_Opr t2 = LIR_OprFact::illegalOpr;
641 cmp_value.load_item();
642 new_value.load_item();
643
644 // Volatile load may be followed by Unsafe CAS.
645 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
646 __ membar();
647 } else {
648 __ membar_release();
649 }
650
651 if (type == T_OBJECT || type == T_ARRAY) {
652 if (UseCompressedOops) {
653 t1 = new_register(T_OBJECT);
654 t2 = new_register(T_OBJECT);
655 }
656 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
657 } else if (type == T_INT) {
658 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
659 } else if (type == T_LONG) {
660 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
661 } else {
662 Unimplemented();
663 }
664 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
665 result, type);
666 return result;
667 }
668
669
670 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
671 LIR_Opr result = new_register(type);
672 LIR_Opr tmp = FrameMap::R0_opr;
673
674 value.load_item();
675
676 // Volatile load may be followed by Unsafe CAS.
677 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
678 __ membar();
679 } else {
680 __ membar_release();
681 }
682
683 __ xchg(addr, value.result(), result, tmp);
684
685 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
686 __ membar_acquire();
687 } else {
688 __ membar();
689 }
690 return result;
691 }
692
693
694 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
695 LIR_Opr result = new_register(type);
696 LIR_Opr tmp = FrameMap::R0_opr;
697
698 value.load_item();
699
700 // Volatile load may be followed by Unsafe CAS.
701 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
702 __ membar(); // To be safe. Unsafe semantics are unclear.
703 } else {
704 __ membar_release();
705 }
706
707 __ xadd(addr, value.result(), result, tmp);
708
709 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
710 __ membar_acquire();
711 } else {
712 __ membar();
713 }
714 return result;
715 }
716
717
718 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
719 switch (x->id()) {
720 case vmIntrinsics::_dabs: {
721 assert(x->number_of_arguments() == 1, "wrong type");
722 LIRItem value(x->argument_at(0), this);
723 value.load_item();
724 LIR_Opr dst = rlock_result(x);
725 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
726 break;
727 }
728 case vmIntrinsics::_dsqrt: {
729 if (VM_Version::has_fsqrt()) {
|
625 LIR_Opr reg = rlock_result(x);
626 if (x->x()->type()->is_float_kind()) {
627 Bytecodes::Code code = x->op();
628 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
629 } else if (x->x()->type()->tag() == longTag) {
630 __ lcmp2int(left.result(), right.result(), reg);
631 } else {
632 Unimplemented();
633 }
634 }
635
636
637 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
638 LIR_Opr result = new_register(T_INT);
639 LIR_Opr t1 = LIR_OprFact::illegalOpr;
640 LIR_Opr t2 = LIR_OprFact::illegalOpr;
641 cmp_value.load_item();
642 new_value.load_item();
643
644 // Volatile load may be followed by Unsafe CAS.
645 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
646 __ membar();
647 } else {
648 __ membar_release();
649 }
650
651 if (type == T_OBJECT || type == T_ARRAY) {
652 if (UseCompressedOops) {
653 t1 = new_register(T_OBJECT);
654 t2 = new_register(T_OBJECT);
655 }
656 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
657 } else if (type == T_INT) {
658 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
659 } else if (type == T_LONG) {
660 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
661 } else {
662 Unimplemented();
663 }
664 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
665 result, type);
666 return result;
667 }
668
669
670 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
671 LIR_Opr result = new_register(type);
672 LIR_Opr tmp = FrameMap::R0_opr;
673
674 value.load_item();
675
676 // Volatile load may be followed by Unsafe CAS.
677 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
678 __ membar();
679 } else {
680 __ membar_release();
681 }
682
683 __ xchg(addr, value.result(), result, tmp);
684
685 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
686 __ membar_acquire();
687 } else {
688 __ membar();
689 }
690 return result;
691 }
692
693
694 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
695 LIR_Opr result = new_register(type);
696 LIR_Opr tmp = FrameMap::R0_opr;
697
698 value.load_item();
699
700 // Volatile load may be followed by Unsafe CAS.
701 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
702 __ membar(); // To be safe. Unsafe semantics are unclear.
703 } else {
704 __ membar_release();
705 }
706
707 __ xadd(addr, value.result(), result, tmp);
708
709 if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
710 __ membar_acquire();
711 } else {
712 __ membar();
713 }
714 return result;
715 }
716
717
718 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
719 switch (x->id()) {
720 case vmIntrinsics::_dabs: {
721 assert(x->number_of_arguments() == 1, "wrong type");
722 LIRItem value(x->argument_at(0), this);
723 value.load_item();
724 LIR_Opr dst = rlock_result(x);
725 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
726 break;
727 }
728 case vmIntrinsics::_dsqrt: {
729 if (VM_Version::has_fsqrt()) {
|