2739 break;
2740 case T_LONG:
2741 if (kind == LS_xadd) {
2742 load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2743 } else if (kind == LS_xchg) {
2744 load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2745 } else if (kind == LS_cmpxchg) {
2746 load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2747 } else {
2748 ShouldNotReachHere();
2749 }
2750 break;
2751 case T_OBJECT:
2752 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2753 // could be delayed during Parse (for example, in adjust_map_after_if()).
2754 // Execute transformation here to avoid barrier generation in such case.
2755 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2756 newval = _gvn.makecon(TypePtr::NULL_PTR);
2757
2758 // Reference stores need a store barrier.
2759 pre_barrier(true /* do_load*/,
2760 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2761 NULL /* pre_val*/,
2762 T_OBJECT);
2763 #ifdef _LP64
2764 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2765 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2766 if (kind == LS_xchg) {
2767 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2768 newval_enc, adr_type, value_type->make_narrowoop()));
2769 } else {
2770 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2771 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2772 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2773 newval_enc, oldval_enc));
2774 }
2775 } else
2776 #endif
2777 {
2778 if (kind == LS_xchg) {
2779 load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2780 } else {
2781 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2782 load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2783 }
2784 }
2785 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2786 break;
2787 default:
2788 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2789 break;
2790 }
2791
2792 // SCMemProjNodes represent the memory state of a LoadStore. Their
2793 // main role is to prevent LoadStore nodes from being optimized away
2794 // when their results aren't used.
2795 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2796 set_memory(proj, alias_idx);
2797
2798 // Add the trailing membar surrounding the access
2799 insert_mem_bar(Op_MemBarCPUOrder);
2800 insert_mem_bar(Op_MemBarAcquire);
2801
2802 #ifdef _LP64
2803 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2804 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2805 }
2806 #endif
2807
2808 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2809 set_result(load_store);
2810 return true;
2811 }
2812
2813 //----------------------------inline_unsafe_ordered_store----------------------
2814 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2815 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2816 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2817 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2818 // This is another variant of inline_unsafe_access, differing in
2819 // that it always issues store-store ("release") barrier and ensures
2820 // store-atomicity (which only matters for "long").
2821
2822 if (callee()->is_static()) return false; // caller must have the capability!
2823
2824 #ifndef PRODUCT
2825 {
2826 ResourceMark rm;
|
2739 break;
2740 case T_LONG:
2741 if (kind == LS_xadd) {
2742 load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2743 } else if (kind == LS_xchg) {
2744 load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2745 } else if (kind == LS_cmpxchg) {
2746 load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2747 } else {
2748 ShouldNotReachHere();
2749 }
2750 break;
2751 case T_OBJECT:
2752 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2753 // could be delayed during Parse (for example, in adjust_map_after_if()).
2754 // Execute transformation here to avoid barrier generation in such case.
2755 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2756 newval = _gvn.makecon(TypePtr::NULL_PTR);
2757
2758 // Reference stores need a store barrier.
2759 if (kind == LS_xchg) {
2760 // If pre-barrier must execute before the oop store, old value will require do_load here.
2761 if (!can_move_pre_barrier()) {
2762 pre_barrier(true /* do_load*/,
2763 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2764 NULL /* pre_val*/,
2765 T_OBJECT);
2766 } // Else: see below.
2767 } else if (kind == LS_cmpxchg) {
2768 // Same as for newval above:
2769 if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2770 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2771 }
2772 // The only known value which might get overwritten is oldval.
2773 pre_barrier(false /* do_load */,
2774 control(), NULL, NULL, max_juint, NULL, NULL,
2775 oldval /* pre_val */,
2776 T_OBJECT);
2777 } else {
2778 ShouldNotReachHere();
2779 }
2780
2781 #ifdef _LP64
2782 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2783 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2784 if (kind == LS_xchg) {
2785 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2786 newval_enc, adr_type, value_type->make_narrowoop()));
2787 } else {
2788 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2789 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2790 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2791 newval_enc, oldval_enc));
2792 }
2793 } else
2794 #endif
2795 {
2796 if (kind == LS_xchg) {
2797 load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2798 } else {
2799 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2800 load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2801 }
2802 }
2803 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2804 break;
2805 default:
2806 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2807 break;
2808 }
2809
2810 // SCMemProjNodes represent the memory state of a LoadStore. Their
2811 // main role is to prevent LoadStore nodes from being optimized away
2812 // when their results aren't used.
2813 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2814 set_memory(proj, alias_idx);
2815
2816 if (type == T_OBJECT && kind == LS_xchg) {
2817 #ifdef _LP64
2818 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2819 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2820 }
2821 #endif
2822 if (can_move_pre_barrier()) {
2823 // Don't need to load pre_val. The old value is returned by load_store.
2824 // The pre_barrier can execute after the xchg as long as no safepoint
2825 // gets inserted between them.
2826 pre_barrier(false /* do_load */,
2827 control(), NULL, NULL, max_juint, NULL, NULL,
2828 load_store /* pre_val */,
2829 T_OBJECT);
2830 }
2831 }
2832
2833 // Add the trailing membar surrounding the access
2834 insert_mem_bar(Op_MemBarCPUOrder);
2835 insert_mem_bar(Op_MemBarAcquire);
2836
2837 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2838 set_result(load_store);
2839 return true;
2840 }
2841
2842 //----------------------------inline_unsafe_ordered_store----------------------
2843 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2844 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2845 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2846 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2847 // This is another variant of inline_unsafe_access, differing in
2848 // that it always issues store-store ("release") barrier and ensures
2849 // store-atomicity (which only matters for "long").
2850
2851 if (callee()->is_static()) return false; // caller must have the capability!
2852
2853 #ifndef PRODUCT
2854 {
2855 ResourceMark rm;
|