< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page
@@ -731,20 +731,22 @@
  
  const size_t initial_active_array_size = 8;
  
  OopStorage::OopStorage(const char* name,
                         Mutex* allocation_mutex,
-                        Mutex* active_mutex) :
+                        Mutex* active_mutex,
+                        NotificationFunction notification_function) :
    _name(os::strdup(name)),
    _active_array(ActiveArray::create(initial_active_array_size)),
    _allocation_list(),
    _deferred_updates(NULL),
    _allocation_mutex(allocation_mutex),
    _active_mutex(active_mutex),
    _allocation_count(0),
    _concurrent_iteration_count(0),
-   _needs_cleanup(false)
+   _needs_cleanup(false),
+   _notification_function(notification_function)
  {
    _active_array->increment_refcount();
    assert(_active_mutex->rank() < _allocation_mutex->rank(),
           "%s: active_mutex must have lower rank than allocation_mutex", _name);
    assert(Service_lock->rank() < _active_mutex->rank(),

@@ -806,10 +808,20 @@
  // Minimum time since last service thread check before notification is
  // permitted.  The value of 500ms was an arbitrary choice; frequent, but not
  // too frequent.
  const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
  
+ void OopStorage::notify(size_t num_dead) const {
+   if (_notification_function != NULL) {
+     _notification_function(num_dead);
+   }
+ }
+ 
+ bool OopStorage::can_notify() const {
+   return _notification_function != NULL;
+ }
+ 
  void OopStorage::trigger_cleanup_if_needed() {
    MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
    if (Atomic::load(&needs_cleanup_requested) &&
        !needs_cleanup_triggered &&
        (os::javaTimeNanos() > cleanup_trigger_permit_time)) {

@@ -962,11 +974,12 @@
    _storage(storage),
    _active_array(_storage->obtain_active_array()),
    _block_count(0),              // initialized properly below
    _next_block(0),
    _estimated_thread_count(estimated_thread_count),
-   _concurrent(concurrent)
+   _concurrent(concurrent),
+   _num_dead(0)
  {
    assert(estimated_thread_count > 0, "estimated thread count must be positive");
    update_concurrent_iteration_count(1);
    // Get the block count *after* iteration state updated, so concurrent
    // empty block deletion is suppressed and can't reduce the count.  But
< prev index next >