1 /*
   2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_sweeper.cpp.incl"
  27 
  28 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
  29 CodeBlob* NMethodSweeper::_current = NULL;   // Current nmethod
  30 int       NMethodSweeper::_seen = 0 ;        // No. of blobs we have currently processed in current pass of CodeCache
  31 int       NMethodSweeper::_invocations = 0;  // No. of invocations left until we are completed with this pass
  32 
  33 jint      NMethodSweeper::_locked_seen = 0;
  34 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
  35 bool      NMethodSweeper::_rescan = false;
  36 bool      NMethodSweeper::_was_full = false;
  37 jlong     NMethodSweeper::_advise_to_sweep = 0;
  38 jlong     NMethodSweeper::_last_was_full = 0;
  39 uint      NMethodSweeper::_highest_marked = 0;
  40 long      NMethodSweeper::_was_full_traversal = 0;
  41 
  42 class MarkActivationClosure: public CodeBlobClosure {
  43 public:
  44   virtual void do_code_blob(CodeBlob* cb) {
  45     // If we see an activation belonging to a non_entrant nmethod, we mark it.
  46     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
  47       ((nmethod*)cb)->mark_as_seen_on_stack();
  48     }
  49   }
  50 };
  51 static MarkActivationClosure mark_activation_closure;
  52 
  53 void NMethodSweeper::sweep() {
  54   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
  55   if (!MethodFlushing) return;
  56 
  57   // No need to synchronize access, since this is always executed at a
  58   // safepoint.  If we aren't in the middle of scan and a rescan
  59   // hasn't been requested then just return.
  60   if (_current == NULL && !_rescan) return;
  61 
  62   // Make sure CompiledIC_lock in unlocked, since we might update some
  63   // inline caches. If it is, we just bail-out and try later.
  64   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
  65 
  66   // Check for restart
  67   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
  68   if (_current == NULL) {
  69     _seen        = 0;
  70     _invocations = NmethodSweepFraction;
  71     _current     = CodeCache::first();
  72     _traversals  += 1;
  73     if (PrintMethodFlushing) {
  74       tty->print_cr("### Sweep: stack traversal %d", _traversals);
  75     }
  76     Threads::nmethods_do(&mark_activation_closure);
  77 
  78     // reset the flags since we started a scan from the beginning.
  79     _rescan = false;
  80     _locked_seen = 0;
  81     _not_entrant_seen_on_stack = 0;
  82   }
  83 
  84   if (PrintMethodFlushing && Verbose) {
  85     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
  86   }
  87 
  88   // We want to visit all nmethods after NmethodSweepFraction invocations.
  89   // If invocation is 1 we do the rest
  90   int todo = CodeCache::nof_blobs();
  91   if (_invocations != 1) {
  92     todo = (CodeCache::nof_blobs() - _seen) / _invocations;
  93     _invocations--;
  94   }
  95 
  96   for(int i = 0; i < todo && _current != NULL; i++) {
  97     CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
  98     if (_current->is_nmethod()) {
  99       process_nmethod((nmethod *)_current);
 100     }
 101     _seen++;
 102     _current = next;
 103   }
 104   // Because we could stop on a codeBlob other than an nmethod we skip forward
 105   // to the next nmethod (if any). codeBlobs other than nmethods can be freed
 106   // async to us and make _current invalid while we sleep.
 107   while (_current != NULL && !_current->is_nmethod()) {
 108     _current = CodeCache::next(_current);
 109   }
 110 
 111   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
 112     // we've completed a scan without making progress but there were
 113     // nmethods we were unable to process either because they were
 114     // locked or were still on stack.  We don't have to aggresively
 115     // clean them up so just stop scanning.  We could scan once more
 116     // but that complicates the control logic and it's unlikely to
 117     // matter much.
 118     if (PrintMethodFlushing) {
 119       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
 120     }
 121   }
 122   
 123   if (UseCodeCacheFlushing) {
 124     if (CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) {
 125       // In a safepoint, no race with setters
 126       _advise_to_sweep = false;
 127     }
 128     
 129     if (was_full()) {
 130       // There was some progress so attempt to restart the compiler
 131       jlong now           = os::javaTimeMillis();
 132       jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
 133       jlong curr_interval = now - _last_was_full;
 134       if ((CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) &&
 135           (curr_interval > max_interval)){
 136         CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 137         set_was_full(false);
 138 
 139         // Update the _last_was_full time so we can tell how fast the 
 140         // code cache is filling up
 141         _last_was_full = os::javaTimeMillis();
 142         
 143         if (PrintMethodFlushing) {
 144           tty->print_cr("### sweeper: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT " restarting compiler", 
 145             CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity());
 146         }
 147       } 
 148     }
 149   }
 150 }
 151 
 152 
 153 void NMethodSweeper::process_nmethod(nmethod *nm) {
 154   // Skip methods that are currently referenced by the VM
 155   if (nm->is_locked_by_vm()) {
 156     // But still remember to clean-up inline caches for alive nmethods
 157     if (nm->is_alive()) {
 158       // Clean-up all inline caches that points to zombie/non-reentrant methods
 159       nm->cleanup_inline_caches();
 160     } else {
 161       _locked_seen++;
 162     }
 163     return;
 164   }
 165 
 166   if (nm->is_zombie()) {
 167     // If it is first time, we see nmethod then we mark it. Otherwise,
 168     // we reclame it. When we have seen a zombie method twice, we know that
 169     // there are no inline caches that referes to it.
 170     if (nm->is_marked_for_reclamation()) {
 171       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
 172       if (PrintMethodFlushing && Verbose) {
 173         tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm);
 174       }
 175       nm->flush();
 176     } else {
 177       if (PrintMethodFlushing && Verbose) {
 178         tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm);
 179       }
 180       nm->mark_for_reclamation();
 181       _rescan = true;
 182     }
 183   } else if (nm->is_not_entrant()) {
 184     // If there is no current activations of this method on the
 185     // stack we can safely convert it to a zombie method
 186     if (nm->can_not_entrant_be_converted()) {
 187       if (PrintMethodFlushing && Verbose) {
 188         tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm);
 189       }
 190       nm->make_zombie();
 191       _rescan = true;
 192     } else {
 193       // Still alive, clean up its inline caches
 194       nm->cleanup_inline_caches();
 195       // we coudn't transition this nmethod so don't immediately
 196       // request a rescan.  If this method stays on the stack for a
 197       // long time we don't want to keep rescanning at every safepoint.
 198       _not_entrant_seen_on_stack++;
 199     }
 200   } else if (nm->is_unloaded()) {
 201     // Unloaded code, just make it a zombie
 202     if (PrintMethodFlushing && Verbose)
 203       tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
 204     if (nm->is_osr_method()) {
 205       // No inline caches will ever point to osr methods, so we can just remove it
 206       nm->flush();
 207     } else {
 208       nm->make_zombie();
 209       _rescan = true;
 210     }
 211   } else {
 212     assert(nm->is_alive(), "should be alive");
 213 
 214     if (UseCodeCacheFlushing) {
 215       if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
 216           (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
 217           (CodeCache::unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace)) {
 218         // This method has not been called since the forced cleanup happened
 219         nm->make_not_entrant();
 220         nm->method()->set_saved_code(NULL);
 221       }
 222     }
 223 
 224     // Clean-up all inline caches that points to zombie/non-reentrant methods
 225     nm->cleanup_inline_caches();
 226   }
 227 }
 228 
 229 // Code cache unloading: when compilers notice the code cache is getting full,
 230 // they will call a vm op that comes here. This code attempts to speculatively
 231 // unload the oldest half of the nmethods (based on the compile job id) by
 232 // hiding the methodOop's ref to the nmethod in the _saved_code field. Then 
 233 // execution resumes. If a method so marked is not called by the second
 234 // safepoint from the current one, the nmethod will be marked non-entrant and 
 235 // got rid of by normal sweeping. If the method is called, the methodOop's
 236 // _code field is restored from the _saved_code field and the methodOop/nmethod
 237 // go back to their normal state.
 238 void NMethodSweeper::handle_full_code_cache(bool is_full) {
 239   // Only the first one to notice can advise us to start early cleaning
 240   if (!is_full){
 241     jlong old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
 242     if (old != 0) {
 243       return;
 244     }
 245   }
 246 
 247   if (is_full) {
 248     // Since code cache is full, immediately stop new compiles
 249     bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
 250     if (!did_set) {
 251       // only the first to notice can start the cleaning, 
 252       // others will go back and block
 253       return;
 254     }
 255     set_was_full(true);
 256     
 257     // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up 
 258     jlong now = os::javaTimeMillis();
 259     jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
 260     jlong curr_interval = now - _last_was_full;
 261     if (curr_interval < max_interval) {
 262       _rescan = true;
 263       if (PrintMethodFlushing) {
 264         tty->print_cr("### handle full too often, turning off compiler");
 265       }    
 266       return;
 267     }
 268   }
 269     
 270   VM_HandleFullCodeCache op(is_full);
 271   VMThread::execute(&op);
 272   
 273   // rescan again as soon as possible
 274   _rescan = true;
 275 }
 276 
 277 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
 278   // If there was a race in detecting full code cache, only run  
 279   // one vm op for it or keep the compiler shut off
 280 
 281   debug_only(jlong start = os::javaTimeMillis();)
 282 
 283   if ((!was_full()) && (is_full)) {
 284     if (CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) {
 285       if (PrintMethodFlushing) {
 286         tty->print_cr("### sweeper: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT " restarting compiler", 
 287           CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity());
 288       }
 289       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 290       return;
 291     }
 292   }
 293 
 294   // Traverse the code cache trying to dump the oldest nmethods
 295   uint curr_max_comp_id = CompileBroker::get_compilation_id();
 296   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
 297   if (PrintMethodFlushing) {
 298     tty->print_cr("### Cleaning code cache: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
 299         CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity());
 300   }
 301 
 302   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
 303 
 304         while ((nm != NULL)){
 305     uint curr_comp_id = nm->compile_id();
 306 
 307     // OSR methods cannot be flushed like this. Also, don't flush native methods
 308     // since they are part of the JDK in most cases
 309     if(nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
 310         (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
 311 
 312       if ((nm->method()->code() == nm)) {
 313         // This method has not been previously considered for 
 314         // unloading or it was restored already
 315         nm->method()->clear_code_hedge();
 316       } else if (nm->method()->saved_code() == nm) {              
 317         // This method was previously considered for preemptive unloading and was not called since then
 318         nm->method()->set_saved_code(NULL);
 319         nm->method()->invocation_counter()->decay();
 320         nm->method()->backedge_counter()->decay();
 321         nm->make_not_entrant();
 322       }
 323     
 324       if (curr_comp_id > _highest_marked) {
 325         _highest_marked = curr_comp_id;
 326       }
 327     }
 328     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
 329   }
 330   
 331   // Shut off compiler. Sweeper will run exiting from this safepoint
 332   // and turn it back on if it clears enough space
 333   if (was_full()) {
 334     _last_was_full = os::javaTimeMillis();
 335     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
 336   }
 337   
 338   // After two more traversals the sweeper will get rid of unrestored nmethods
 339   _was_full_traversal = _traversals;
 340   debug_only(jlong end = os::javaTimeMillis(); if(PrintMethodFlushing) tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);)
 341 }