< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 60098 : 8246476: remove AsyncDeflateIdleMonitors option and the safepoint based deflation mechanism
Reviewed-by: dholmes, pchilanomate, coleenp
rev 60099 : coleenp CR

@@ -980,13 +980,12 @@
 }
 
 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
   if (UseBiasedLocking) {
     // NOTE: many places throughout the JVM do not expect a safepoint
-    // to be taken here, in particular most operations on perm gen
-    // objects. However, we only ever bias Java instances and all of
-    // the call sites of identity_hash that might revoke biases have
+    // to be taken here. However, we only ever bias Java instances and all
+    // of the call sites of identity_hash that might revoke biases have
     // been checked to make sure they can handle a safepoint. The
     // added check of the bias pattern is to avoid useless calls to
     // thread-local storage.
     if (obj->mark().has_bias_pattern()) {
       // Handle for oop obj in case of STW safepoint

@@ -1370,13 +1369,13 @@
 
 // -----------------------------------------------------------------------------
 // ObjectMonitor Lifecycle
 // -----------------------
 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
-// free list and associates them with objects. Deflation -- which occurs at
-// STW-time or asynchronously -- disassociates idle monitors from objects.
-// Such scavenged monitors are returned to the om_list_globals._free_list.
+// free list and associates them with objects. Async deflation disassociates
+// idle monitors from objects. Such scavenged monitors are returned to the
+// om_list_globals._free_list.
 //
 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
 //
 // Lifecycle:
 // --   unassigned and on the om_list_globals._free_list

@@ -1385,11 +1384,11 @@
 //      to the ObjectMonitor.
 
 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
   // A large MAXPRIVATE value reduces both list lock contention
   // and list coherency traffic, but also tends to increase the
-  // number of ObjectMonitors in circulation as well as the STW
+  // number of ObjectMonitors in circulation as well as the
   // scavenge costs.  As usual, we lean toward time in space-time
   // tradeoffs.
   const int MAXPRIVATE = 1024;
   NoSafepointVerifier nsv;
 

@@ -1433,13 +1432,13 @@
           // proper value.
           take->add_to_contentions(max_jint);
 
 #ifdef ASSERT
           jint l_contentions = take->contentions();
-#endif
           assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d",
                  l_contentions, take->contentions());
+#endif
         }
         take->Recycle();
         // Since we're taking from the global free-list, take must be Free.
         // om_release() also sets the allocation state to Free because it
         // is called from other code paths.

@@ -1673,13 +1672,13 @@
       cur_om = unmarked_next(cur_om);
     }
     guarantee(in_use_tail != NULL, "invariant");
 #ifdef ASSERT
     int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
-#endif
     assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
            "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
+#endif
     Atomic::store(&self->om_in_use_count, 0);
     // Clear the in-use list head (which also unlocks it):
     Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
     om_unlock(in_use_list);
   }

@@ -1718,13 +1717,13 @@
       }
     }
     guarantee(free_tail != NULL, "invariant");
 #ifdef ASSERT
     int l_om_free_count = Atomic::load(&self->om_free_count);
-#endif
     assert(l_om_free_count == free_count, "free counts don't match: "
            "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
+#endif
     Atomic::store(&self->om_free_count, 0);
     Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
     om_unlock(free_list);
   }
 

@@ -2102,12 +2101,12 @@
     // to fix the linkages in its context.
     ObjectMonitor* prevtail = *free_tail_p;
     // prevtail should have been cleaned up by the caller:
 #ifdef ASSERT
     ObjectMonitor* l_next_om = unmarked_next(prevtail);
-#endif
     assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
+#endif
     om_lock(prevtail);
     prevtail->set_next_om(mid);  // prevtail now points to mid (and is unlocked)
   }
   *free_tail_p = mid;
 

@@ -2401,12 +2400,12 @@
       // but the next field in free_tail_p can flicker to marked
       // and then unmarked while prepend_to_common() is sorting it
       // all out.
 #ifdef ASSERT
       ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
-#endif
       assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
+#endif
 
       prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
 
       OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
     }

@@ -2473,11 +2472,11 @@
 // all remaining monitors are heavyweight.  All exceptions are swallowed.
 // Scanning the extant monitor list can be time consuming.
 // A simple optimization is to add a per-thread flag that indicates a thread
 // called jni_monitorenter() during its lifetime.
 //
-// Instead of No_Savepoint_Verifier it might be cheaper to
+// Instead of NoSafepointVerifier it might be cheaper to
 // use an idiom of the form:
 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
 //   <code that must not run at safepoint>
 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
 // Since the tests are extremely cheap we could leave them enabled
< prev index next >