src/share/vm/runtime/sweeper.cpp

Print this page

        

@@ -125,11 +125,12 @@
 }
 #else
 #define SWEEP(nm)
 #endif
 
-nmethod*  NMethodSweeper::_current         = NULL; // Current nmethod
+nmethod*  NMethodSweeper::_current_nmethod = NULL; // Current nmethod
+int       NMethodSweeper::_current_type    = 0;    // Current CodeBlobType
 long      NMethodSweeper::_traversals      = 0;    // Nof. stack traversals performed
 int       NMethodSweeper::_seen            = 0;    // Nof. nmethods we have currently processed in current pass of CodeCache
 int       NMethodSweeper::_flushed_count   = 0;    // Nof. nmethods flushed in current sweep
 int       NMethodSweeper::_zombified_count = 0;    // Nof. nmethods made zombie in current sweep
 int       NMethodSweeper::_marked_count    = 0;    // Nof. nmethods marked for reclaim in current sweep

@@ -181,11 +182,11 @@
     _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
   }
   return _hotness_counter_reset_val;
 }
 bool NMethodSweeper::sweep_in_progress() {
-  return (_current != NULL);
+  return (_current_nmethod != NULL);
 }
 
 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
 // safepoint.

@@ -196,15 +197,16 @@
   if (!MethodFlushing) {
     return;
   }
 
   // Check for restart
-  assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
+  assert(CodeCache::find_blob_unsafe(_current_nmethod) == _current_nmethod, "Sweeper nmethod cached state invalid");
   if (!sweep_in_progress() && need_marking_phase()) {
     _seen        = 0;
     _invocations = NmethodSweepFraction;
-    _current     = CodeCache::first_nmethod();
+    _current_nmethod        = (nmethod*)CodeCache::first_blob(btMethodNoProfile);
+    _current_type           = btMethodNoProfile;
     _traversals  += 1;
     _total_time_this_sweep = 0;
 
     if (PrintMethodFlushing) {
       tty->print_cr("### Sweep: stack traversal %d", _traversals);

@@ -249,11 +251,10 @@
     _sweep_started = 0;
   }
 }
 
 void NMethodSweeper::sweep_code_cache() {
-
   jlong sweep_start_counter = os::elapsed_counter();
 
   _flushed_count   = 0;
   _zombified_count = 0;
   _marked_count    = 0;

@@ -283,12 +284,15 @@
 
   int freed_memory = 0;
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
+    // Sweep non-profiled and profiled nmethods
+    while (_current_type <= btMethodProfile) {
+
     // The last invocation iterates until there are no more nmethods
-    for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+      while ((swept_count < todo || _invocations == 1) && _current_nmethod != NULL) {
       swept_count++;
       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
         if (PrintMethodFlushing && Verbose) {
           tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
         }

@@ -300,23 +304,36 @@
         thread->java_suspend_self();
       }
       // Since we will give up the CodeCache_lock, always skip ahead
       // to the next nmethod.  Other blobs can be deleted by other
       // threads but nmethods are only reclaimed by the sweeper.
-      nmethod* next = CodeCache::next_nmethod(_current);
+        nmethod* next = (nmethod*)CodeCache::next_blob(_current_nmethod, (CodeBlobType)_current_type);
 
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-        freed_memory += process_nmethod(_current);
+          process_nmethod(_current_nmethod, (CodeBlobType)_current_type);
       }
       _seen++;
-      _current = next;
+        _current_nmethod = next;
     }
+
+      while (_current_nmethod == NULL && _current_type < btMethodProfile) {
+        // We reached the last method of the type
+        // Go to next type that has methods available
+        _current_type++;
+        _current_nmethod = (nmethod*)CodeCache::first_blob((CodeBlobType)_current_type);
   }
 
-  assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
+      if ((swept_count >= todo && _invocations > 1) || _current_nmethod == NULL) {
+        // We processed enough methods for this invocation
+        break;
+      }
+    }
+  }
+
+  assert(_invocations > 1 || _current_nmethod == NULL, "must have scanned the whole cache");
 
   if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
     // we've completed a scan without making progress but there were
     // nmethods we were unable to process either because they were
     // locked or were still on stack. We don't have to aggressively

@@ -356,24 +373,10 @@
 
   if (_invocations == 1) {
     _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
     log_sweep("finished");
   }
-
-  // Sweeper is the only case where memory is released, check here if it
-  // is time to restart the compiler. Only checking if there is a certain
-  // amount of free memory in the code cache might lead to re-enabling
-  // compilation although no memory has been released. For example, there are
-  // cases when compilation was disabled although there is 4MB (or more) free
-  // memory in the code cache. The reason is code cache fragmentation. Therefore,
-  // it only makes sense to re-enable compilation if we have actually freed memory.
-  // Note that typically several kB are released for sweeping 16MB of the code
-  // cache. As a result, 'freed_memory' > 0 to restart the compiler.
-  if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
-    CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
-    log_sweep("restart_compiler");
-  }
 }
 
 class NMethodMarker: public StackObj {
  private:
   CompilerThread* _thread;

@@ -405,11 +408,11 @@
 
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   nm->flush();
 }
 
-int NMethodSweeper::process_nmethod(nmethod *nm) {
+int NMethodSweeper::process_nmethod(nmethod *nm, CodeBlobType bt) {
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
   int freed_memory = 0;
   // Make sure this nmethod doesn't get unloaded during the scan,
   // since safepoints may happen during acquired below locks.

@@ -497,11 +500,11 @@
         nm->dec_hotness_counter();
         // Get the initial value of the hotness counter. This value depends on the
         // ReservedCodeCacheSize
         int reset_val = hotness_counter_reset_val();
         int time_since_reset = reset_val - nm->hotness_counter();
-        double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
+        double threshold = -reset_val + (CodeCache::reverse_free_ratio(bt) * NmethodSweepActivity);
         // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
         // I.e., 'threshold' increases with lower available space in the code cache and a higher
         // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
         // value until it is reset by stack walking - is smaller than the computed threshold, the
         // corresponding nmethod is considered for removal.