src/share/vm/opto/library_call.cpp

Print this page
rev 5102 : opto: Optimize g1 pre_barrier in library_call.


2738     break;
2739   case T_LONG:
2740     if (kind == LS_xadd) {
2741       load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2742     } else if (kind == LS_xchg) {
2743       load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2744     } else if (kind == LS_cmpxchg) {
2745       load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2746     } else {
2747       ShouldNotReachHere();
2748     }
2749     break;
2750   case T_OBJECT:
2751     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2752     // could be delayed during Parse (for example, in adjust_map_after_if()).
2753     // Execute transformation here to avoid barrier generation in such case.
2754     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2755       newval = _gvn.makecon(TypePtr::NULL_PTR);
2756 
2757     // Reference stores need a store barrier.
2758     pre_barrier(true /* do_load*/,
2759                 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2760                 NULL /* pre_val*/,


2761                 T_OBJECT);



2762 #ifdef _LP64
2763     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2764       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2765       if (kind == LS_xchg) {
2766         load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2767                                                               newval_enc, adr_type, value_type->make_narrowoop()));
2768       } else {
2769         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2770         Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2771         load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2772                                                                    newval_enc, oldval_enc));
2773       }
2774     } else
2775 #endif
2776     {
2777       if (kind == LS_xchg) {
2778         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2779       } else {
2780         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2781         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2782       }
2783     }
2784     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2785     break;
2786   default:
2787     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2788     break;
2789   }
2790 
2791   // SCMemProjNodes represent the memory state of a LoadStore. Their
2792   // main role is to prevent LoadStore nodes from being optimized away
2793   // when their results aren't used.
2794   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2795   set_memory(proj, alias_idx);
2796 
2797   // Add the trailing membar surrounding the access
2798   insert_mem_bar(Op_MemBarCPUOrder);
2799   insert_mem_bar(Op_MemBarAcquire);
2800 
2801 #ifdef _LP64
2802   if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2803     load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2804   }
2805 #endif
2806 














2807   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2808   set_result(load_store);
2809   return true;
2810 }
2811 
2812 //----------------------------inline_unsafe_ordered_store----------------------
2813 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2814 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2815 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2816 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2817   // This is another variant of inline_unsafe_access, differing in
2818   // that it always issues store-store ("release") barrier and ensures
2819   // store-atomicity (which only matters for "long").
2820 
2821   if (callee()->is_static())  return false;  // caller must have the capability!
2822 
2823 #ifndef PRODUCT
2824   {
2825     ResourceMark rm;
2826     // Check the signatures.




2738     break;
2739   case T_LONG:
2740     if (kind == LS_xadd) {
2741       load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2742     } else if (kind == LS_xchg) {
2743       load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2744     } else if (kind == LS_cmpxchg) {
2745       load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2746     } else {
2747       ShouldNotReachHere();
2748     }
2749     break;
2750   case T_OBJECT:
2751     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2752     // could be delayed during Parse (for example, in adjust_map_after_if()).
2753     // Execute transformation here to avoid barrier generation in such case.
2754     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2755       newval = _gvn.makecon(TypePtr::NULL_PTR);
2756 
2757     // Reference stores need a store barrier.
2758     if (kind == LS_cmpxchg) {
2759       // The only known value which might get overwritten is oldval.
2760       pre_barrier(false /* do_load */,
2761                   control(), NULL, NULL, max_juint, NULL, NULL,
2762                   oldval /* pre_val */,
2763                   T_OBJECT);
2764     }
2765     // LS_xchg: see below.
2766 
2767 #ifdef _LP64
2768     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2769       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2770       if (kind == LS_xchg) {
2771         load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2772                                                               newval_enc, adr_type, value_type->make_narrowoop()));
2773       } else {
2774         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2775         Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2776         load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2777                                                                    newval_enc, oldval_enc));
2778       }
2779     } else
2780 #endif
2781     {
2782       if (kind == LS_xchg) {
2783         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2784       } else {
2785         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2786         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2787       }
2788     }
2789     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2790     break;
2791   default:
2792     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2793     break;
2794   }
2795 
2796   // SCMemProjNodes represent the memory state of a LoadStore. Their
2797   // main role is to prevent LoadStore nodes from being optimized away
2798   // when their results aren't used.
2799   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2800   set_memory(proj, alias_idx);
2801 




2802 #ifdef _LP64
2803   if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2804     load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2805   }
2806 #endif
2807 
2808   // G1: Don't need to load pre_val. The old value is returned by load_store.
2809   // The pre_barrier can execute after the xchg as long as no safepoint
2810   // gets inserted between them.
2811   if (type == T_OBJECT && kind == LS_xchg) {
2812     pre_barrier(false /* do_load */,
2813                 control(), NULL, NULL, max_juint, NULL, NULL,
2814                 load_store /* pre_val */,
2815                 T_OBJECT);
2816   }
2817 
2818   // Add the trailing membar surrounding the access
2819   insert_mem_bar(Op_MemBarCPUOrder);
2820   insert_mem_bar(Op_MemBarAcquire);
2821 
2822   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2823   set_result(load_store);
2824   return true;
2825 }
2826 
2827 //----------------------------inline_unsafe_ordered_store----------------------
2828 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2829 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2830 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2831 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2832   // This is another variant of inline_unsafe_access, differing in
2833   // that it always issues store-store ("release") barrier and ensures
2834   // store-atomicity (which only matters for "long").
2835 
2836   if (callee()->is_static())  return false;  // caller must have the capability!
2837 
2838 #ifndef PRODUCT
2839   {
2840     ResourceMark rm;
2841     // Check the signatures.