< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

        

@@ -1056,11 +1056,11 @@
   ciKlass*    thread_klass = env()->Thread_klass();
   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
   Node* thread = _gvn.transform(new ThreadLocalNode());
   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
   Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
-  if (UseLoadBarrier) {
+  if (UseZGC) {
     threadObj = load_barrier(threadObj, p);
   }
 
   tls_output = thread;
   return threadObj;

@@ -2637,11 +2637,11 @@
         if (need_read_barrier) {
           // We do not require a mem bar inside pre_barrier if need_mem_bar
           // is set: the barriers would be emitted by us.
           insert_pre_barrier(heap_base_oop, offset, p, !need_mem_bar);
         }
-        if (UseLoadBarrier) {
+        if (UseZGC) {
           if (!VerifyLoadBarriers) {
             p = load_barrier(p, adr);
           } else {
             if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
               p = load_barrier(p, adr);

@@ -3129,11 +3129,11 @@
     // For CAS and weakCAS - if the expected value (oldval) is null, then
     // we can avoid expanding a load barrier (null can't have bad bits)
 
     // CMH - Remove flags and simplify code when final variants are stable
 
-    if (UseLoadBarrier) {
+    if (UseZGC) {
       switch (kind) {
         case LS_get_set:
           break;
         case LS_cmp_swap_weak:
           {

@@ -3214,11 +3214,11 @@
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
     }
 #endif
-    if (UseLoadBarrier && (kind == LS_get_set) && C->directive()->UseSwapLoadBarrierOption) {
+    if (UseZGC && (kind == LS_get_set) && C->directive()->UseSwapLoadBarrierOption) {
       load_store = load_barrier(load_store, adr, false, loadstore_requires_writeback_barrier(kind), false);
     }
 
     if (can_move_pre_barrier()) {
       // Don't need to load pre_val. The old value is returned by load_store.

@@ -3252,11 +3252,11 @@
   return true;
 }
 
 Node* LibraryCallKit::make_cas_loadbarrier(CompareAndSwapNode* cas) {
 
-  assert(UseLoadBarrier, "Must be turned on");
+  assert(UseZGC, "Must be turned on");
   assert(!UseCompressedOops, "Not allowed");
 
   Node* in_ctrl     = cas->in(MemNode::Control);
   Node* in_mem      = cas->in(MemNode::Memory);
   Node* in_adr      = cas->in(MemNode::Address);

@@ -3342,11 +3342,11 @@
   return phi;
 }
 
 Node* LibraryCallKit::make_cmpx_loadbarrier(CompareAndExchangePNode* cmpx) {
 
-  assert(UseLoadBarrier, "Must be turned on");
+  assert(UseZGC, "Must be turned on");
   assert(!UseCompressedOops, "Not allowed");
 
   Node* in_ctrl     = cmpx->in(MemNode::Control);
   Node* in_mem      = cmpx->in(MemNode::Memory);
   Node* in_adr      = cmpx->in(MemNode::Address);

@@ -6356,11 +6356,11 @@
   ciInstanceKlass* klass = env()->Object_klass();
   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
 
   Node* no_ctrl = NULL;
   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
-  if (UseLoadBarrier) {
+  if (UseZGC) {
     result = load_barrier(result, adr, true /* weak */);
   }
 
   // Use the pre-barrier to record the value in the referent field
   pre_barrier(false /* do_load */,

@@ -6425,11 +6425,11 @@
     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
   }
   // Build the load.
   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
-  if (UseLoadBarrier) {
+  if (UseZGC) {
     loadedField = load_barrier(loadedField, adr);
   }
 
   // If reference is volatile, prevent following memory ops from
   // floating up past the volatile read.  Also prevents commoning
< prev index next >