src/share/vm/opto/library_call.cpp

Print this page
rev 5127 : 8023597: Optimize G1 barriers code for unsafe load_store
Summary: Avoid loading old values in G1 pre-barriers for inlined unsafe load_store nodes.


2738     }
2739     break;
2740   case T_LONG:
2741     if (kind == LS_xadd) {
2742       load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2743     } else if (kind == LS_xchg) {
2744       load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2745     } else if (kind == LS_cmpxchg) {
2746       load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2747     } else {
2748       ShouldNotReachHere();
2749     }
2750     break;
2751   case T_OBJECT:
2752     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2753     // could be delayed during Parse (for example, in adjust_map_after_if()).
2754     // Execute transformation here to avoid barrier generation in such case.
2755     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2756       newval = _gvn.makecon(TypePtr::NULL_PTR);
2757 
2758     // Reference stores need a store barrier.
2759     pre_barrier(true /* do_load*/,
2760                 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2761                 NULL /* pre_val*/,




2762                 T_OBJECT);




2763 #ifdef _LP64
2764     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2765       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2766       if (kind == LS_xchg) {
2767         load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2768                                                               newval_enc, adr_type, value_type->make_narrowoop()));
2769       } else {
2770         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2771         Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2772         load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2773                                                                    newval_enc, oldval_enc));
2774       }
2775     } else
2776 #endif
2777     {
2778       if (kind == LS_xchg) {
2779         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2780       } else {
2781         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2782         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2783       }
2784     }
2785     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2786     break;
2787   default:
2788     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2789     break;
2790   }
2791 
2792   // SCMemProjNodes represent the memory state of a LoadStore. Their
2793   // main role is to prevent LoadStore nodes from being optimized away
2794   // when their results aren't used.
2795   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2796   set_memory(proj, alias_idx);
2797 
2798   // Add the trailing membar surrounding the access
2799   insert_mem_bar(Op_MemBarCPUOrder);
2800   insert_mem_bar(Op_MemBarAcquire);
2801 
2802 #ifdef _LP64
2803   if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2804     load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2805   }
2806 #endif












2807 
2808   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2809   set_result(load_store);
2810   return true;
2811 }
2812 
2813 //----------------------------inline_unsafe_ordered_store----------------------
2814 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2815 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2816 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2817 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2818   // This is another variant of inline_unsafe_access, differing in
2819   // that it always issues store-store ("release") barrier and ensures
2820   // store-atomicity (which only matters for "long").
2821 
2822   if (callee()->is_static())  return false;  // caller must have the capability!
2823 
2824 #ifndef PRODUCT
2825   {
2826     ResourceMark rm;




2738     }
2739     break;
2740   case T_LONG:
2741     if (kind == LS_xadd) {
2742       load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2743     } else if (kind == LS_xchg) {
2744       load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2745     } else if (kind == LS_cmpxchg) {
2746       load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2747     } else {
2748       ShouldNotReachHere();
2749     }
2750     break;
2751   case T_OBJECT:
2752     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2753     // could be delayed during Parse (for example, in adjust_map_after_if()).
2754     // Execute transformation here to avoid barrier generation in such case.
2755     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2756       newval = _gvn.makecon(TypePtr::NULL_PTR);
2757 
2758     if (kind == LS_cmpxchg) {
2759       if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2760         oldval = _gvn.makecon(TypePtr::NULL_PTR);
2761       } else {
2762         // The only known value which might get overwritten is oldval.
2763         pre_barrier(false /* do_load */,
2764                     control(), NULL, NULL, max_juint, NULL, NULL,
2765                     oldval /* pre_val */,
2766                     T_OBJECT);
2767       }
2768     }
2769     // LS_xchg: see below.
2770 
2771 #ifdef _LP64
2772     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2773       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2774       if (kind == LS_xchg) {
2775         load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2776                                                               newval_enc, adr_type, value_type->make_narrowoop()));
2777       } else {
2778         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2779         Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2780         load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2781                                                                    newval_enc, oldval_enc));
2782       }
2783     } else
2784 #endif
2785     {
2786       if (kind == LS_xchg) {
2787         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2788       } else {
2789         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2790         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2791       }
2792     }
2793     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2794     break;
2795   default:
2796     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2797     break;
2798   }
2799 
2800   // SCMemProjNodes represent the memory state of a LoadStore. Their
2801   // main role is to prevent LoadStore nodes from being optimized away
2802   // when their results aren't used.
2803   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2804   set_memory(proj, alias_idx);
2805 
2806   if (type == T_OBJECT && kind == LS_xchg) {



2807 #ifdef _LP64
2808     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2809       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2810     }
2811 #endif
2812     // G1: Don't need to load pre_val. The old value is returned by load_store.
2813     // The pre_barrier can execute after the xchg as long as no safepoint
2814     // gets inserted between them.
2815     pre_barrier(false /* do_load */,
2816                 control(), NULL, NULL, max_juint, NULL, NULL,
2817                 load_store /* pre_val */,
2818                 T_OBJECT);
2819   }
2820 
2821   // Add the trailing membar surrounding the access
2822   insert_mem_bar(Op_MemBarCPUOrder);
2823   insert_mem_bar(Op_MemBarAcquire);
2824 
2825   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2826   set_result(load_store);
2827   return true;
2828 }
2829 
2830 //----------------------------inline_unsafe_ordered_store----------------------
2831 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2832 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2833 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2834 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2835   // This is another variant of inline_unsafe_access, differing in
2836   // that it always issues store-store ("release") barrier and ensures
2837   // store-atomicity (which only matters for "long").
2838 
2839   if (callee()->is_static())  return false;  // caller must have the capability!
2840 
2841 #ifndef PRODUCT
2842   {
2843     ResourceMark rm;