1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/methodOop.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/compilationPolicy.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/os.hpp"
  35 #include "runtime/sweeper.hpp"
  36 #include "runtime/vm_operations.hpp"
  37 #include "utilities/events.hpp"
  38 #include "utilities/xmlstream.hpp"
  39 
  40 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
  41 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
  42 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
  43 
  44 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
  45 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
  46 
  47 jint      NMethodSweeper::_locked_seen = 0;
  48 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
  49 bool      NMethodSweeper::_rescan = false;
  50 bool      NMethodSweeper::_do_sweep = false;
  51 bool      NMethodSweeper::_was_full = false;
  52 jint      NMethodSweeper::_advise_to_sweep = 0;
  53 jlong     NMethodSweeper::_last_was_full = 0;
  54 uint      NMethodSweeper::_highest_marked = 0;
  55 long      NMethodSweeper::_was_full_traversal = 0;
  56 
  57 class MarkActivationClosure: public CodeBlobClosure {
  58 public:
  59   virtual void do_code_blob(CodeBlob* cb) {
  60     // If we see an activation belonging to a non_entrant nmethod, we mark it.
  61     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
  62       ((nmethod*)cb)->mark_as_seen_on_stack();
  63     }
  64   }
  65 };
  66 static MarkActivationClosure mark_activation_closure;
  67 
  68 void NMethodSweeper::scan_stacks() {
  69   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
  70   if (!MethodFlushing) return;
  71   _do_sweep = true;
  72 
  73   // No need to synchronize access, since this is always executed at a
  74   // safepoint.  If we aren't in the middle of scan and a rescan
  75   // hasn't been requested then just return. If UseCodeCacheFlushing is on and
  76   // code cache flushing is in progress, don't skip sweeping to help make progress
  77   // clearing space in the code cache.
  78   if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
  79     _do_sweep = false;
  80     return;
  81   }
  82 
  83   // Make sure CompiledIC_lock in unlocked, since we might update some
  84   // inline caches. If it is, we just bail-out and try later.
  85   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
  86 
  87   // Check for restart
  88   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
  89   if (_current == NULL) {
  90     _seen        = 0;
  91     _invocations = NmethodSweepFraction;
  92     _current     = CodeCache::first_nmethod();
  93     _traversals  += 1;
  94     if (PrintMethodFlushing) {
  95       tty->print_cr("### Sweep: stack traversal %d", _traversals);
  96     }
  97     Threads::nmethods_do(&mark_activation_closure);
  98 
  99     // reset the flags since we started a scan from the beginning.
 100     _rescan = false;
 101     _locked_seen = 0;
 102     _not_entrant_seen_on_stack = 0;
 103   }
 104 
 105   if (UseCodeCacheFlushing) {
 106     if (!CodeCache::needs_flushing()) {
 107       // scan_stacks() runs during a safepoint, no race with setters
 108       _advise_to_sweep = 0;
 109     }
 110 
 111     if (was_full()) {
 112       // There was some progress so attempt to restart the compiler
 113       jlong now           = os::javaTimeMillis();
 114       jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
 115       jlong curr_interval = now - _last_was_full;
 116       if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
 117         CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 118         set_was_full(false);
 119 
 120         // Update the _last_was_full time so we can tell how fast the
 121         // code cache is filling up
 122         _last_was_full = os::javaTimeMillis();
 123 
 124         log_sweep("restart_compiler");
 125       }
 126     }
 127   }
 128 }
 129 
 130 void NMethodSweeper::possibly_sweep() {
 131   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
 132   if ((!MethodFlushing) || (!_do_sweep)) return;
 133 
 134   if (_invocations > 0) {
 135     // Only one thread at a time will sweep
 136     jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
 137     if (old != 0) {
 138       return;
 139     }
 140     if (_invocations > 0) {
 141       sweep_code_cache();
 142       _invocations--;
 143     }
 144     _sweep_started = 0;
 145   }
 146 }
 147 
 148 void NMethodSweeper::sweep_code_cache() {
 149 #ifdef ASSERT
 150   jlong sweep_start;
 151   if (PrintMethodFlushing) {
 152     sweep_start = os::javaTimeMillis();
 153   }
 154 #endif
 155   if (PrintMethodFlushing && Verbose) {
 156     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
 157   }
 158 
 159   // We want to visit all nmethods after NmethodSweepFraction
 160   // invocations so divide the remaining number of nmethods by the
 161   // remaining number of invocations.  This is only an estimate since
 162   // the number of nmethods changes during the sweep so the final
 163   // stage must iterate until it there are no more nmethods.
 164   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
 165 
 166   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
 167   assert(!CodeCache_lock->owned_by_self(), "just checking");
 168 
 169   {
 170     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 171 
 172     // The last invocation iterates until there are no more nmethods
 173     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
 174 
 175       // Since we will give up the CodeCache_lock, always skip ahead
 176       // to the next nmethod.  Other blobs can be deleted by other
 177       // threads but nmethods are only reclaimed by the sweeper.
 178       nmethod* next = CodeCache::next_nmethod(_current);
 179 
 180       // Now ready to process nmethod and give up CodeCache_lock
 181       {
 182         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 183         process_nmethod(_current);
 184       }
 185       _seen++;
 186       _current = next;
 187     }
 188   }
 189 
 190   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 191 
 192   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
 193     // we've completed a scan without making progress but there were
 194     // nmethods we were unable to process either because they were
 195     // locked or were still on stack.  We don't have to aggresively
 196     // clean them up so just stop scanning.  We could scan once more
 197     // but that complicates the control logic and it's unlikely to
 198     // matter much.
 199     if (PrintMethodFlushing) {
 200       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
 201     }
 202   }
 203 
 204 #ifdef ASSERT
 205   if(PrintMethodFlushing) {
 206     jlong sweep_end             = os::javaTimeMillis();
 207     tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
 208   }
 209 #endif
 210 
 211   if (_invocations == 1) {
 212     log_sweep("finished");
 213   }
 214 }
 215 
 216 
 217 void NMethodSweeper::process_nmethod(nmethod *nm) {
 218   assert(!CodeCache_lock->owned_by_self(), "just checking");
 219 
 220   // Skip methods that are currently referenced by the VM
 221   if (nm->is_locked_by_vm()) {
 222     // But still remember to clean-up inline caches for alive nmethods
 223     if (nm->is_alive()) {
 224       // Clean-up all inline caches that points to zombie/non-reentrant methods
 225       MutexLocker cl(CompiledIC_lock);
 226       nm->cleanup_inline_caches();
 227     } else {
 228       _locked_seen++;
 229     }
 230     return;
 231   }
 232 
 233   if (nm->is_zombie()) {
 234     // If it is first time, we see nmethod then we mark it. Otherwise,
 235     // we reclame it. When we have seen a zombie method twice, we know that
 236     // there are no inline caches that refer to it.
 237     if (nm->is_marked_for_reclamation()) {
 238       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
 239       if (PrintMethodFlushing && Verbose) {
 240         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
 241       }
 242       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 243       nm->flush();
 244     } else {
 245       if (PrintMethodFlushing && Verbose) {
 246         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
 247       }
 248       nm->mark_for_reclamation();
 249       _rescan = true;
 250     }
 251   } else if (nm->is_not_entrant()) {
 252     // If there is no current activations of this method on the
 253     // stack we can safely convert it to a zombie method
 254     if (nm->can_not_entrant_be_converted()) {
 255       if (PrintMethodFlushing && Verbose) {
 256         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
 257       }
 258       nm->make_zombie();
 259       _rescan = true;
 260     } else {
 261       // Still alive, clean up its inline caches
 262       MutexLocker cl(CompiledIC_lock);
 263       nm->cleanup_inline_caches();
 264       // we coudn't transition this nmethod so don't immediately
 265       // request a rescan.  If this method stays on the stack for a
 266       // long time we don't want to keep rescanning the code cache.
 267       _not_entrant_seen_on_stack++;
 268     }
 269   } else if (nm->is_unloaded()) {
 270     // Unloaded code, just make it a zombie
 271     if (PrintMethodFlushing && Verbose)
 272       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
 273     if (nm->is_osr_method()) {
 274       // No inline caches will ever point to osr methods, so we can just remove it
 275       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 276       nm->flush();
 277     } else {
 278       nm->make_zombie();
 279       _rescan = true;
 280     }
 281   } else {
 282     assert(nm->is_alive(), "should be alive");
 283 
 284     if (UseCodeCacheFlushing) {
 285       if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
 286           (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
 287           CodeCache::needs_flushing()) {
 288         // This method has not been called since the forced cleanup happened
 289         nm->make_not_entrant();
 290       }
 291     }
 292 
 293     // Clean-up all inline caches that points to zombie/non-reentrant methods
 294     MutexLocker cl(CompiledIC_lock);
 295     nm->cleanup_inline_caches();
 296   }
 297 }
 298 
 299 // Code cache unloading: when compilers notice the code cache is getting full,
 300 // they will call a vm op that comes here. This code attempts to speculatively
 301 // unload the oldest half of the nmethods (based on the compile job id) by
 302 // saving the old code in a list in the CodeCache. Then
 303 // execution resumes. If a method so marked is not called by the second sweeper
 304 // stack traversal after the current one, the nmethod will be marked non-entrant and
 305 // got rid of by normal sweeping. If the method is called, the methodOop's
 306 // _code field is restored and the methodOop/nmethod
 307 // go back to their normal state.
 308 void NMethodSweeper::handle_full_code_cache(bool is_full) {
 309   // Only the first one to notice can advise us to start early cleaning
 310   if (!is_full){
 311     jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
 312     if (old != 0) {
 313       return;
 314     }
 315   }
 316 
 317   if (is_full) {
 318     // Since code cache is full, immediately stop new compiles
 319     bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
 320     if (!did_set) {
 321       // only the first to notice can start the cleaning,
 322       // others will go back and block
 323       return;
 324     }
 325     set_was_full(true);
 326 
 327     // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
 328     jlong now = os::javaTimeMillis();
 329     jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
 330     jlong curr_interval = now - _last_was_full;
 331     if (curr_interval < max_interval) {
 332       _rescan = true;
 333       log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
 334                            curr_interval/1000);
 335       return;
 336     }
 337   }
 338 
 339   VM_HandleFullCodeCache op(is_full);
 340   VMThread::execute(&op);
 341 
 342   // rescan again as soon as possible
 343   _rescan = true;
 344 }
 345 
 346 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
 347   // If there was a race in detecting full code cache, only run
 348   // one vm op for it or keep the compiler shut off
 349 
 350   debug_only(jlong start = os::javaTimeMillis();)
 351 
 352   if ((!was_full()) && (is_full)) {
 353     if (!CodeCache::needs_flushing()) {
 354       log_sweep("restart_compiler");
 355       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 356       return;
 357     }
 358   }
 359 
 360   // Traverse the code cache trying to dump the oldest nmethods
 361   uint curr_max_comp_id = CompileBroker::get_compilation_id();
 362   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
 363   log_sweep("start_cleaning");
 364 
 365   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
 366   jint disconnected = 0;
 367   jint made_not_entrant  = 0;
 368   while ((nm != NULL)){
 369     uint curr_comp_id = nm->compile_id();
 370 
 371     // OSR methods cannot be flushed like this. Also, don't flush native methods
 372     // since they are part of the JDK in most cases
 373     if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
 374         (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
 375 
 376       if ((nm->method()->code() == nm)) {
 377         // This method has not been previously considered for
 378         // unloading or it was restored already
 379         CodeCache::speculatively_disconnect(nm);
 380         disconnected++;
 381       } else if (nm->is_speculatively_disconnected()) {
 382         // This method was previously considered for preemptive unloading and was not called since then
 383         CompilationPolicy::policy()->delay_compilation(nm->method());
 384         nm->make_not_entrant();
 385         made_not_entrant++;
 386       }
 387 
 388       if (curr_comp_id > _highest_marked) {
 389         _highest_marked = curr_comp_id;
 390       }
 391     }
 392     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
 393   }
 394 
 395   log_sweep("stop_cleaning",
 396                        "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
 397                        disconnected, made_not_entrant);
 398 
 399   // Shut off compiler. Sweeper will start over with a new stack scan and
 400   // traversal cycle and turn it back on if it clears enough space.
 401   if (was_full()) {
 402     _last_was_full = os::javaTimeMillis();
 403     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
 404   }
 405 
 406   // After two more traversals the sweeper will get rid of unrestored nmethods
 407   _was_full_traversal = _traversals;
 408 #ifdef ASSERT
 409   jlong end = os::javaTimeMillis();
 410   if(PrintMethodFlushing && Verbose) {
 411     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
 412   }
 413 #endif
 414 }
 415 
 416 
 417 // Print out some state information about the current sweep and the
 418 // state of the code cache if it's requested.
 419 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
 420   if (PrintMethodFlushing) {
 421     stringStream s;
 422     // Dump code cache state into a buffer before locking the tty,
 423     // because log_state() will use locks causing lock conflicts.
 424     CodeCache::log_state(&s);
 425 
 426     ttyLocker ttyl;
 427     tty->print("### sweeper: %s ", msg);
 428     if (format != NULL) {
 429       va_list ap;
 430       va_start(ap, format);
 431       tty->vprint(format, ap);
 432       va_end(ap);
 433     }
 434     tty->print_cr(s.as_string());
 435   }
 436 
 437   if (LogCompilation && (xtty != NULL)) {
 438     stringStream s;
 439     // Dump code cache state into a buffer before locking the tty,
 440     // because log_state() will use locks causing lock conflicts.
 441     CodeCache::log_state(&s);
 442 
 443     ttyLocker ttyl;
 444     xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
 445     if (format != NULL) {
 446       va_list ap;
 447       va_start(ap, format);
 448       xtty->vprint(format, ap);
 449       va_end(ap);
 450     }
 451     xtty->print(s.as_string());
 452     xtty->stamp();
 453     xtty->end_elem();
 454   }
 455 }